You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

376 lines
14 KiB

9 years ago
9 years ago
  1. # -*- coding: utf-8 -*-
  2. import array
  3. from collections import namedtuple
  4. from datetime import datetime
  5. import numpy as np
  6. import re
  7. import struct
  8. from StringIO import StringIO
  9. field_of_view = namedtuple('FOV', ['number', 'x', 'y', 'z', 'pfs_offset'])
  10. class Nd2Parser(object):
  11. """
  12. Reads .nd2 files, provides an interface to the metadata, and generates numpy arrays from the image data.
  13. You should not ever need to instantiate this class manually unless you're a developer.
  14. """
  15. CHUNK_HEADER = 0xabeceda
  16. CHUNK_MAP_START = "ND2 FILEMAP SIGNATURE NAME 0001!"
  17. CHUNK_MAP_END = "ND2 CHUNK MAP SIGNATURE 0000001!"
  18. def __init__(self, filename):
  19. self._filename = filename
  20. self._fh = None
  21. self._chunk_map_start_location = None
  22. self._cursor_position = 0
  23. self._dimension_text = None
  24. self._label_map = {}
  25. self.metadata = {}
  26. self._read_map()
  27. self._parse_metadata()
  28. @property
  29. def _file_handle(self):
  30. if self._fh is None:
  31. self._fh = open(self._filename, "rb")
  32. return self._fh
  33. def _get_raw_image_data(self, image_group_number, channel_offset):
  34. """
  35. Reads the raw bytes and the timestamp of an image.
  36. :param image_group_number: groups are made of images with the same time index, field of view and z-level.
  37. :type image_group_number: int
  38. :param channel_offset: the offset in the array where the bytes for this image are found.
  39. :type channel_offset: int
  40. :return: (int, array.array()) or None
  41. """
  42. chunk = self._label_map["ImageDataSeq|%d!" % image_group_number]
  43. data = self._read_chunk(chunk)
  44. # All images in the same image group share the same timestamp! So if you have complicated image data,
  45. # your timestamps may not be entirely accurate. Practically speaking though, they'll only be off by a few
  46. # seconds unless you're doing something super weird.
  47. timestamp = struct.unpack("d", data[:8])[0]
  48. image_group_data = array.array("H", data)
  49. image_data_start = 4 + channel_offset
  50. # The images for the various channels are interleaved within the same array. For example, the second image
  51. # of a four image group will be composed of bytes 2, 6, 10, etc. If you understand why someone would design
  52. # a data structure that way, please send the author of this library a message.
  53. image_data = image_group_data[image_data_start::self._channel_count]
  54. # Skip images that are all zeros! This is important, since NIS Elements creates blank "gap" images if you
  55. # don't have the same number of images each cycle. We discovered this because we only took GFP images every
  56. # other cycle to reduce phototoxicity, but NIS Elements still allocated memory as if we were going to take
  57. # them every cycle.
  58. if np.any(image_data):
  59. return timestamp, image_data
  60. return None
  61. @property
  62. def _dimensions(self):
  63. """
  64. While there are metadata values that represent a lot of what we want to capture, they seem to be unreliable.
  65. Sometimes certain elements don't exist, or change their data type randomly. However, the human-readable text
  66. is always there and in the same exact format, so we just parse that instead.
  67. :rtype: str
  68. """
  69. if self._dimension_text is None:
  70. for line in self.metadata['ImageTextInfo']['SLxImageTextInfo'].values():
  71. if "Dimensions:" in line:
  72. metadata = line
  73. break
  74. else:
  75. raise ValueError("Could not parse metadata dimensions!")
  76. for line in metadata.split("\r\n"):
  77. if line.startswith("Dimensions:"):
  78. self._dimension_text = line
  79. break
  80. else:
  81. raise ValueError("Could not parse metadata dimensions!")
  82. return self._dimension_text
  83. @property
  84. def _channels(self):
  85. """
  86. These are labels created by the NIS Elements user. Typically they may a short description of the filter cube
  87. used (e.g. "bright field", "GFP", etc.)
  88. :rtype: str
  89. """
  90. metadata = self.metadata['ImageMetadataSeq']['SLxPictureMetadata']['sPicturePlanes']
  91. try:
  92. validity = self.metadata['ImageMetadata']['SLxExperiment']['ppNextLevelEx'][''][0]['ppNextLevelEx'][''][0]['pItemValid']
  93. except KeyError:
  94. # If none of the channels have been deleted, there is no validity list, so we just make one
  95. validity = [True for _ in metadata]
  96. # Channel information is contained in dictionaries with the keys a0, a1...an where the number
  97. # indicates the order in which the channel is stored. So by sorting the dicts alphabetically
  98. # we get the correct order.
  99. for (label, chan), valid in zip(sorted(metadata['sPlaneNew'].items()), validity):
  100. if not valid:
  101. continue
  102. yield chan['sDescription']
  103. def _calculate_image_group_number(self, time_index, fov, z_level):
  104. """
  105. Images are grouped together if they share the same time index, field of view, and z-level.
  106. :type time_index: int
  107. :type fov: int
  108. :type z_level: int
  109. :rtype: int
  110. """
  111. return time_index * self._field_of_view_count * self._z_level_count + (fov * self._z_level_count + z_level)
  112. @property
  113. def _channel_offset(self):
  114. """
  115. Image data is interleaved for each image set. That is, if there are four images in a set, the first image
  116. will consist of pixels 1, 5, 9, etc, the second will be pixels 2, 6, 10, and so forth.
  117. :rtype: dict
  118. """
  119. channel_offset = {}
  120. for n, channel in enumerate(self._channels):
  121. channel_offset[channel] = n
  122. return channel_offset
  123. @property
  124. def _absolute_start(self):
  125. """
  126. The date and time when acquisition began.
  127. :rtype: datetime.datetime()
  128. """
  129. for line in self.metadata['ImageTextInfo']['SLxImageTextInfo'].values():
  130. absolute_start_12 = None
  131. absolute_start_24 = None
  132. # ND2s seem to randomly switch between 12- and 24-hour representations.
  133. try:
  134. absolute_start_24 = datetime.strptime(line, "%m/%d/%Y %H:%M:%S")
  135. except ValueError:
  136. pass
  137. try:
  138. absolute_start_12 = datetime.strptime(line, "%m/%d/%Y %I:%M:%S %p")
  139. except ValueError:
  140. pass
  141. if not absolute_start_12 and not absolute_start_24:
  142. continue
  143. return absolute_start_12 if absolute_start_12 else absolute_start_24
  144. raise ValueError("This ND2 has no recorded start time. This is probably a bug.")
  145. @property
  146. def _channel_count(self):
  147. """
  148. The number of different channels used, including bright field.
  149. :rtype: int
  150. """
  151. pattern = r""".*?λ\((\d+)\).*?"""
  152. try:
  153. count = int(re.match(pattern, self._dimensions).group(1))
  154. except AttributeError:
  155. return 1
  156. else:
  157. return count
  158. @property
  159. def _field_of_view_count(self):
  160. """
  161. The metadata contains information about fields of view, but it contains it even if some fields
  162. of view were cropped. We can't find anything that states which fields of view are actually
  163. in the image data, so we have to calculate it. There probably is something somewhere, since
  164. NIS Elements can figure it out, but we haven't found it yet.
  165. :rtype: int
  166. """
  167. pattern = r""".*?XY\((\d+)\).*?"""
  168. try:
  169. count = int(re.match(pattern, self._dimensions).group(1))
  170. except AttributeError:
  171. return 1
  172. else:
  173. return count
  174. @property
  175. def _time_index_count(self):
  176. """
  177. The number of cycles.
  178. :rtype: int
  179. """
  180. pattern = r""".*?T'\((\d+)\).*?"""
  181. try:
  182. count = int(re.match(pattern, self._dimensions).group(1))
  183. except AttributeError:
  184. return 1
  185. else:
  186. return count
  187. @property
  188. def _z_level_count(self):
  189. """
  190. The number of different levels in the Z-plane.
  191. :rtype: int
  192. """
  193. pattern = r""".*?Z\((\d+)\).*?"""
  194. try:
  195. count = int(re.match(pattern, self._dimensions).group(1))
  196. except AttributeError:
  197. return 1
  198. else:
  199. return count
  200. @property
  201. def _image_count(self):
  202. """
  203. The total number of images in the ND2. Warning: this may be inaccurate as it includes "gap" images.
  204. :rtype: int
  205. """
  206. return self.metadata['ImageAttributes']['SLxImageAttributes']['uiSequenceCount']
  207. def _parse_metadata(self):
  208. """
  209. Reads all metadata.
  210. """
  211. for label in self._label_map.keys():
  212. if label.endswith("LV!") or "LV|" in label:
  213. data = self._read_chunk(self._label_map[label])
  214. stop = label.index("LV")
  215. self.metadata[label[:stop]] = self._read_metadata(data, 1)
  216. def _read_map(self):
  217. """
  218. Every label ends with an exclamation point, however, we can't directly search for those to find all the labels
  219. as some of the bytes contain the value 33, which is the ASCII code for "!". So we iteratively find each label,
  220. grab the subsequent data (always 16 bytes long), advance to the next label and repeat.
  221. """
  222. self._file_handle.seek(-8, 2)
  223. chunk_map_start_location = struct.unpack("Q", self._file_handle.read(8))[0]
  224. self._file_handle.seek(chunk_map_start_location)
  225. raw_text = self._file_handle.read(-1)
  226. label_start = raw_text.index(Nd2Parser.CHUNK_MAP_START) + 32
  227. while True:
  228. data_start = raw_text.index("!", label_start) + 1
  229. key = raw_text[label_start: data_start]
  230. location, length = struct.unpack("QQ", raw_text[data_start: data_start + 16])
  231. if key == Nd2Parser.CHUNK_MAP_END:
  232. # We've reached the end of the chunk map
  233. break
  234. self._label_map[key] = location
  235. label_start = data_start + 16
  236. def _read_chunk(self, chunk_location):
  237. """
  238. Gets the data for a given chunk pointer
  239. """
  240. self._file_handle.seek(chunk_location)
  241. # The chunk metadata is always 16 bytes long
  242. chunk_metadata = self._file_handle.read(16)
  243. header, relative_offset, data_length = struct.unpack("IIQ", chunk_metadata)
  244. if header != Nd2Parser.CHUNK_HEADER:
  245. raise ValueError("The ND2 file seems to be corrupted.")
  246. # We start at the location of the chunk metadata, skip over the metadata, and then proceed to the
  247. # start of the actual data field, which is at some arbitrary place after the metadata.
  248. self._file_handle.seek(chunk_location + 16 + relative_offset)
  249. return self._file_handle.read(data_length)
  250. def _parse_unsigned_char(self, data):
  251. return struct.unpack("B", data.read(1))[0]
  252. def _parse_unsigned_int(self, data):
  253. return struct.unpack("I", data.read(4))[0]
  254. def _parse_unsigned_long(self, data):
  255. return struct.unpack("Q", data.read(8))[0]
  256. def _parse_double(self, data):
  257. return struct.unpack("d", data.read(8))[0]
  258. def _parse_string(self, data):
  259. value = data.read(2)
  260. while not value.endswith("\x00\x00"):
  261. # the string ends at the first instance of \x00\x00
  262. value += data.read(2)
  263. return value.decode("utf16")[:-1].encode("utf8")
  264. def _parse_char_array(self, data):
  265. array_length = struct.unpack("Q", data.read(8))[0]
  266. return array.array("B", data.read(array_length))
  267. def _parse_metadata_item(self, data):
  268. """
  269. Reads hierarchical data, analogous to a Python dict.
  270. """
  271. new_count, length = struct.unpack("<IQ", data.read(12))
  272. length -= data.tell() - self._cursor_position
  273. next_data_length = data.read(length)
  274. value = self._read_metadata(next_data_length, new_count)
  275. # Skip some offsets
  276. data.read(new_count * 8)
  277. return value
  278. def _get_value(self, data, data_type):
  279. """
  280. ND2s use various codes to indicate different data types, which we translate here.
  281. """
  282. parser = {1: self._parse_unsigned_char,
  283. 2: self._parse_unsigned_int,
  284. 3: self._parse_unsigned_int,
  285. 5: self._parse_unsigned_long,
  286. 6: self._parse_double,
  287. 8: self._parse_string,
  288. 9: self._parse_char_array,
  289. 11: self._parse_metadata_item}
  290. return parser[data_type](data)
  291. def _read_metadata(self, data, count):
  292. """
  293. Iterates over each element some section of the metadata and parses it.
  294. """
  295. data = StringIO(data)
  296. metadata = {}
  297. for _ in xrange(count):
  298. self._cursor_position = data.tell()
  299. header = data.read(2)
  300. if not header:
  301. # We've reached the end of some hierarchy of data
  302. break
  303. data_type, name_length = map(ord, header)
  304. name = data.read(name_length * 2).decode("utf16")[:-1].encode("utf8")
  305. value = self._get_value(data, data_type)
  306. if name not in metadata.keys():
  307. metadata[name] = value
  308. else:
  309. if not isinstance(metadata[name], list):
  310. # We have encountered this key exactly once before. Since we're seeing it again, we know we
  311. # need to convert it to a list before proceeding.
  312. metadata[name] = [metadata[name]]
  313. # We've encountered this key before so we're guaranteed to be dealing with a list. Thus we append
  314. # the value to the already-existing list.
  315. metadata[name].append(value)
  316. return metadata