You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

282 lines
9.9 KiB

10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
  1. import array
  2. import numpy as np
  3. import struct
  4. import re
  5. from StringIO import StringIO
  6. from datetime import datetime
  7. class Nd2FileReader(object):
  8. """
  9. Reads .nd2 files, provides an interface to the metadata, and generates numpy arrays from the image data.
  10. """
  11. def __init__(self, filename):
  12. self._absolute_start = None
  13. self._filename = filename
  14. self._file_handler = None
  15. self._chunk_map_start_location = None
  16. self._label_map = {}
  17. self._metadata = {}
  18. self._read_map()
  19. self._parse_dict_data()
  20. self.__dimensions = None
  21. @property
  22. def _dimensions(self):
  23. if self.__dimensions is None:
  24. # The particular slot that this data shows up in changes (seemingly) randomly
  25. for line in self._metadata['ImageTextInfo']['SLxImageTextInfo'].values():
  26. if "Dimensions:" in line:
  27. metadata = line
  28. break
  29. else:
  30. raise Exception("Could not parse metadata dimensions!")
  31. for line in metadata.split("\r\n"):
  32. if line.startswith("Dimensions:"):
  33. self.__dimensions = line
  34. break
  35. return self.__dimensions
  36. @property
  37. def absolute_start(self):
  38. if self._absolute_start is None:
  39. for line in self._metadata['ImageTextInfo']['SLxImageTextInfo'].values():
  40. absolute_start_12 = None
  41. absolute_start_24 = None
  42. # ND2s seem to randomly switch between 12- and 24-hour representations.
  43. try:
  44. absolute_start_24 = datetime.strptime(line, "%m/%d/%Y %H:%M:%S")
  45. except ValueError:
  46. pass
  47. try:
  48. absolute_start_12 = datetime.strptime(line, "%m/%d/%Y %I:%M:%S %p")
  49. except ValueError:
  50. pass
  51. if not absolute_start_12 and not absolute_start_24:
  52. continue
  53. self._absolute_start = absolute_start_12 if absolute_start_12 else absolute_start_24
  54. return self._absolute_start
  55. @property
  56. def fh(self):
  57. if self._file_handler is None:
  58. self._file_handler = open(self._filename, "rb")
  59. return self._file_handler
  60. @property
  61. def time_index_count(self):
  62. """
  63. The number of images for a given field of view, channel, and z_level combination.
  64. Effectively the number of frames.
  65. :rtype: int
  66. """
  67. pattern = r""".*?T'\((\d+)\).*?"""
  68. try:
  69. count = int(re.match(pattern, self._dimensions).group(1))
  70. except AttributeError:
  71. return 1
  72. else:
  73. return count
  74. @property
  75. def z_level_count(self):
  76. pattern = r""".*?Z\((\d+)\).*?"""
  77. try:
  78. count = int(re.match(pattern, self._dimensions).group(1))
  79. except AttributeError:
  80. return 1
  81. else:
  82. return count
  83. @property
  84. def field_of_view_count(self):
  85. """
  86. The metadata contains information about fields of view, but it contains it even if some fields
  87. of view were cropped. We can't find anything that states which fields of view are actually
  88. in the image data, so we have to calculate it. There probably is something somewhere, since
  89. NIS Elements can figure it out, but we haven't found it yet.
  90. """
  91. pattern = r""".*?XY\((\d+)\).*?"""
  92. try:
  93. count = int(re.match(pattern, self._dimensions).group(1))
  94. except AttributeError:
  95. return 1
  96. else:
  97. return count
  98. @property
  99. def channel_count(self):
  100. pattern = r""".*?λ\((\d+)\).*?"""
  101. try:
  102. count = int(re.match(pattern, self._dimensions).group(1))
  103. except AttributeError:
  104. return 1
  105. else:
  106. return count
  107. def get_raw_image_data(self, image_set_number, channel_offset):
  108. chunk = self._label_map["ImageDataSeq|%d!" % image_set_number]
  109. data = self._read_chunk(chunk.location)
  110. timestamp = struct.unpack("d", data[:8])[0]
  111. # The images for the various channels are interleaved within each other. Yes, this is an incredibly unintuitive and nonsensical way
  112. # to store data.
  113. image_data = array.array("H", data)
  114. image_data_start = 4 + channel_offset
  115. return timestamp, image_data[image_data_start::self.channel_count]
  116. def _parse_dict_data(self):
  117. # TODO: Don't like this name
  118. for label in self._top_level_dict_labels:
  119. chunk_location = self._label_map[label].location
  120. data = self._read_chunk(chunk_location)
  121. stop = label.index("LV")
  122. self._metadata[label[:stop]] = self.read_lv_encoding(data, 1)
  123. @property
  124. def metadata(self):
  125. return self._metadata
  126. @property
  127. def _top_level_dict_labels(self):
  128. # TODO: I don't like this name either
  129. for label in self._label_map.keys():
  130. if label.endswith("LV!") or "LV|" in label:
  131. yield label
  132. def _read_map(self):
  133. """
  134. Every label ends with an exclamation point, however, we can't directly search for those to find all the labels
  135. as some of the bytes contain the value 33, which is the ASCII code for "!". So we iteratively find each label,
  136. grab the subsequent data (always 16 bytes long), advance to the next label and repeat.
  137. """
  138. self.fh.seek(-8, 2)
  139. chunk_map_start_location = struct.unpack("Q", self.fh.read(8))[0]
  140. self.fh.seek(chunk_map_start_location)
  141. raw_text = self.fh.read(-1)
  142. label_start = raw_text.index("ND2 FILEMAP SIGNATURE NAME 0001!") + 32
  143. while True:
  144. data_start = raw_text.index("!", label_start) + 1
  145. key = raw_text[label_start: data_start]
  146. location, length = struct.unpack("QQ", raw_text[data_start: data_start + 16])
  147. label, value = key, chunk(location=location, length=length)
  148. if label == "ND2 CHUNK MAP SIGNATURE 0000001!":
  149. # We've reached the end of the chunk map
  150. break
  151. self._label_map[label] = value
  152. label_start = data_start + 16
  153. def _read_chunk(self, chunk_location):
  154. """
  155. Gets the data for a given chunk pointer
  156. """
  157. self.fh.seek(chunk_location)
  158. chunk_data = self._read_chunk_metadata()
  159. header, relative_offset, data_length = self._parse_chunk_metadata(chunk_data)
  160. return self._read_chunk_data(chunk_location, relative_offset, data_length)
  161. def _read_chunk_metadata(self):
  162. """
  163. Gets the chunks metadata, which is always 16 bytes
  164. """
  165. return self.fh.read(16)
  166. def _read_chunk_data(self, chunk_location, relative_offset, data_length):
  167. """
  168. Reads the actual data for a given chunk
  169. """
  170. # We start at the location of the chunk metadata, skip over the metadata, and then proceed to the
  171. # start of the actual data field, which is at some arbitrary place after the metadata.
  172. self.fh.seek(chunk_location + 16 + relative_offset)
  173. return self.fh.read(data_length)
  174. @staticmethod
  175. def _parse_chunk_metadata(chunk_data):
  176. """
  177. Finds out everything about a given chunk. Every chunk begins with the same value, so if that's ever
  178. different we can assume the file has suffered some kind of damage.
  179. """
  180. header, relative_offset, data_length = struct.unpack("IIQ", chunk_data)
  181. if header != 0xabeceda:
  182. raise ValueError("The ND2 file seems to be corrupted.")
  183. return header, relative_offset, data_length
  184. def _get_raw_chunk_map_text(self):
  185. """
  186. Reads the entire chunk map and returns it as a string.
  187. """
  188. @staticmethod
  189. def as_numpy_array(arr):
  190. return np.frombuffer(arr)
  191. def _z_level_count(self):
  192. name = "CustomData|Z!"
  193. st = self._read_chunk(self._label_map[name].location)
  194. res = array.array("d", st)
  195. return len(res)
  196. def read_lv_encoding(self, data, count):
  197. data = StringIO(data)
  198. res = {}
  199. total_count = 0
  200. for c in range(count):
  201. lastpos = data.tell()
  202. total_count += 1
  203. hdr = data.read(2)
  204. if not hdr:
  205. break
  206. typ = ord(hdr[0])
  207. bname = data.read(2*ord(hdr[1]))
  208. name = bname.decode("utf16")[:-1].encode("utf8")
  209. if typ == 1:
  210. value, = struct.unpack("B", data.read(1))
  211. elif typ in [2, 3]:
  212. value, = struct.unpack("I", data.read(4))
  213. elif typ == 5:
  214. value, = struct.unpack("Q", data.read(8))
  215. elif typ == 6:
  216. value, = struct.unpack("d", data.read(8))
  217. elif typ == 8:
  218. value = data.read(2)
  219. while value[-2:] != "\x00\x00":
  220. value += data.read(2)
  221. value = value.decode("utf16")[:-1].encode("utf8")
  222. elif typ == 9:
  223. cnt, = struct.unpack("Q", data.read(8))
  224. value = array.array("B", data.read(cnt))
  225. elif typ == 11:
  226. newcount, length = struct.unpack("<IQ", data.read(12))
  227. length -= data.tell()-lastpos
  228. nextdata = data.read(length)
  229. value = self.read_lv_encoding(nextdata, newcount)
  230. # Skip some offsets
  231. data.read(newcount * 8)
  232. else:
  233. assert 0, "%s hdr %x:%x unknown" % (name, ord(hdr[0]), ord(hdr[1]))
  234. if not name in res:
  235. res[name] = value
  236. else:
  237. if not isinstance(res[name], list):
  238. res[name] = [res[name]]
  239. res[name].append(value)
  240. x = data.read()
  241. assert not x, "skip %d %s" % (len(x), repr(x[:30]))
  242. return res