You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

376 lines
14 KiB

10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
  1. # -*- coding: utf-8 -*-
  2. import array
  3. import numpy as np
  4. import struct
  5. import re
  6. from StringIO import StringIO
  7. from collections import namedtuple
  8. import logging
  9. from nd2reader.model import Channel
  10. log = logging.getLogger("nd2reader")
  11. log.setLevel(logging.DEBUG)
  12. chunk = namedtuple('Chunk', ['location', 'length'])
  13. field_of_view = namedtuple('FOV', ['number', 'x', 'y', 'z', 'pfs_offset'])
  14. class BaseNd2(object):
  15. def __init__(self, filename):
  16. self._reader = Nd2Reader(filename)
  17. self._channel_offset = None
  18. @property
  19. def height(self):
  20. """
  21. :return: height of each image, in pixels
  22. """
  23. return self._metadata['ImageAttributes']['SLxImageAttributes']['uiHeight']
  24. @property
  25. def width(self):
  26. """
  27. :return: width of each image, in pixels
  28. """
  29. return self._metadata['ImageAttributes']['SLxImageAttributes']['uiWidth']
  30. @property
  31. def channels(self):
  32. metadata = self._metadata['ImageMetadataSeq']['SLxPictureMetadata']['sPicturePlanes']
  33. try:
  34. validity = self._metadata['ImageMetadata']['SLxExperiment']['ppNextLevelEx'][''][0]['ppNextLevelEx'][''][0]['pItemValid']
  35. except KeyError:
  36. # If none of the channels have been deleted, there is no validity list, so we just make one
  37. validity = [True for i in metadata]
  38. # Channel information is contained in dictionaries with the keys a0, a1...an where the number
  39. # indicates the order in which the channel is stored. So by sorting the dicts alphabetically
  40. # we get the correct order.
  41. for (label, chan), valid in zip(sorted(metadata['sPlaneNew'].items()), validity):
  42. if not valid:
  43. continue
  44. name = chan['sDescription']
  45. exposure_time = metadata['sSampleSetting'][label]['dExposureTime']
  46. camera = metadata['sSampleSetting'][label]['pCameraSetting']['CameraUserName']
  47. yield Channel(name, camera, exposure_time)
  48. @property
  49. def _image_count(self):
  50. return self._metadata['ImageAttributes']['SLxImageAttributes']['uiSequenceCount']
  51. @property
  52. def _sequence_count(self):
  53. return self._metadata['ImageEvents']['RLxExperimentRecord']['uiCount']
  54. @property
  55. def time_index_count(self):
  56. """
  57. The number of images for a given field of view, channel, and z_level combination.
  58. Effectively the number of frames.
  59. :rtype: int
  60. """
  61. return self._reader.time_index_count
  62. @property
  63. def z_level_count(self):
  64. return self._reader.z_level_count
  65. @property
  66. def field_of_view_count(self):
  67. """
  68. The metadata contains information about fields of view, but it contains it even if some fields
  69. of view were cropped. We can't find anything that states which fields of view are actually
  70. in the image data, so we have to calculate it. There probably is something somewhere, since
  71. NIS Elements can figure it out, but we haven't found it yet.
  72. """
  73. return self._reader.field_of_view_count
  74. @property
  75. def channel_count(self):
  76. return self._reader.channel_count
  77. @property
  78. def channel_offset(self):
  79. if self._channel_offset is None:
  80. self._channel_offset = {}
  81. for n, channel in enumerate(self.channels):
  82. self._channel_offset[channel.name] = n
  83. return self._channel_offset
  84. @property
  85. def _metadata(self):
  86. return self._reader.metadata
  87. def _calculate_image_set_number(self, time_index, fov, z_level):
  88. return time_index * self.field_of_view_count * self.z_level_count + (fov * self.z_level_count + z_level)
  89. class Nd2Reader(object):
  90. """
  91. Reads .nd2 files, provides an interface to the metadata, and generates numpy arrays from the image data.
  92. """
  93. def __init__(self, filename):
  94. self._filename = filename
  95. self._file_handler = None
  96. self._chunk_map_start_location = None
  97. self._label_map = {}
  98. self._metadata = {}
  99. self._read_map()
  100. self._parse_dict_data()
  101. self.__dimensions = None
  102. @property
  103. def _dimensions(self):
  104. if self.__dimensions is None:
  105. # TODO: Replace this with a single regex
  106. for line in self._metadata['ImageTextInfo']['SLxImageTextInfo'].values():
  107. if "Dimensions:" in line:
  108. metadata = line
  109. break
  110. else:
  111. raise Exception("Could not parse metadata dimensions!")
  112. for line in metadata.split("\r\n"):
  113. if line.startswith("Dimensions:"):
  114. self.__dimensions = line
  115. break
  116. return self.__dimensions
  117. @property
  118. def fh(self):
  119. if self._file_handler is None:
  120. self._file_handler = open(self._filename, "rb")
  121. return self._file_handler
  122. @property
  123. def time_index_count(self):
  124. """
  125. The number of images for a given field of view, channel, and z_level combination.
  126. Effectively the number of frames.
  127. :rtype: int
  128. """
  129. pattern = r""".*?T'\((\d+)\).*?"""
  130. return int(re.match(pattern, self._dimensions).group(1))
  131. @property
  132. def z_level_count(self):
  133. pattern = r""".*?Z\((\d+)\).*?"""
  134. return int(re.match(pattern, self._dimensions).group(1))
  135. @property
  136. def field_of_view_count(self):
  137. """
  138. The metadata contains information about fields of view, but it contains it even if some fields
  139. of view were cropped. We can't find anything that states which fields of view are actually
  140. in the image data, so we have to calculate it. There probably is something somewhere, since
  141. NIS Elements can figure it out, but we haven't found it yet.
  142. """
  143. pattern = r""".*?XY\((\d+)\).*?"""
  144. return int(re.match(pattern, self._dimensions).group(1))
  145. @property
  146. def channel_count(self):
  147. pattern = r""".*?λ\((\d+)\).*?"""
  148. return int(re.match(pattern, self._dimensions).group(1))
  149. def get_raw_image_data(self, image_set_number, channel_offset):
  150. chunk = self._label_map["ImageDataSeq|%d!" % image_set_number]
  151. data = self._read_chunk(chunk.location)
  152. timestamp = struct.unpack("d", data[:8])[0]
  153. # The images for the various channels are interleaved within each other. Yes, this is an incredibly unintuitive and nonsensical way
  154. # to store data.
  155. image_data = array.array("H", data)
  156. image_data_start = 4 + channel_offset
  157. return timestamp, image_data[image_data_start::self.channel_count]
  158. def _parse_dict_data(self):
  159. # TODO: Don't like this name
  160. for label in self._top_level_dict_labels:
  161. chunk_location = self._label_map[label].location
  162. data = self._read_chunk(chunk_location)
  163. stop = label.index("LV")
  164. self._metadata[label[:stop]] = self.read_lv_encoding(data, 1)
  165. @property
  166. def metadata(self):
  167. return self._metadata
  168. @property
  169. def _top_level_dict_labels(self):
  170. # TODO: I don't like this name either
  171. for label in self._label_map.keys():
  172. if label.endswith("LV!") or "LV|" in label:
  173. yield label
  174. def _read_map(self):
  175. """
  176. Every label ends with an exclamation point, however, we can't directly search for those to find all the labels
  177. as some of the bytes contain the value 33, which is the ASCII code for "!". So we iteratively find each label,
  178. grab the subsequent data (always 16 bytes long), advance to the next label and repeat.
  179. """
  180. raw_text = self._get_raw_chunk_map_text()
  181. label_start = self._find_first_label_offset(raw_text)
  182. while True:
  183. data_start = self._get_data_start(label_start, raw_text)
  184. label, value = self._extract_map_key(label_start, data_start, raw_text)
  185. if label == "ND2 CHUNK MAP SIGNATURE 0000001!":
  186. # We've reached the end of the chunk map
  187. break
  188. self._label_map[label] = value
  189. label_start = data_start + 16
  190. @staticmethod
  191. def _find_first_label_offset(raw_text):
  192. """
  193. The chunk map starts with some number of (seemingly) useless bytes, followed
  194. by "ND2 FILEMAP SIGNATURE NAME 0001!". We return the location of the first character after this sequence,
  195. which is the actual beginning of the chunk map.
  196. """
  197. return raw_text.index("ND2 FILEMAP SIGNATURE NAME 0001!") + 32
  198. @staticmethod
  199. def _get_data_start(label_start, raw_text):
  200. """
  201. The data for a given label begins immediately after the first exclamation point
  202. """
  203. return raw_text.index("!", label_start) + 1
  204. @staticmethod
  205. def _extract_map_key(label_start, data_start, raw_text):
  206. """
  207. Chunk map entries are a string label of arbitrary length followed by 16 bytes of data, which represent
  208. the byte offset from the beginning of the file where that data can be found.
  209. """
  210. key = raw_text[label_start: data_start]
  211. location, length = struct.unpack("QQ", raw_text[data_start: data_start + 16])
  212. return key, chunk(location=location, length=length)
  213. @property
  214. def chunk_map_start_location(self):
  215. """
  216. The position in bytes from the beginning of the file where the chunk map begins.
  217. The chunk map is a series of string labels followed by the position (in bytes) of the respective data.
  218. """
  219. if self._chunk_map_start_location is None:
  220. # Put the cursor 8 bytes before the end of the file
  221. self.fh.seek(-8, 2)
  222. # Read the last 8 bytes of the file
  223. self._chunk_map_start_location = struct.unpack("Q", self.fh.read(8))[0]
  224. return self._chunk_map_start_location
  225. def _read_chunk(self, chunk_location):
  226. """
  227. Gets the data for a given chunk pointer
  228. """
  229. self.fh.seek(chunk_location)
  230. chunk_data = self._read_chunk_metadata()
  231. header, relative_offset, data_length = self._parse_chunk_metadata(chunk_data)
  232. return self._read_chunk_data(chunk_location, relative_offset, data_length)
  233. def _read_chunk_metadata(self):
  234. """
  235. Gets the chunks metadata, which is always 16 bytes
  236. """
  237. return self.fh.read(16)
  238. def _read_chunk_data(self, chunk_location, relative_offset, data_length):
  239. """
  240. Reads the actual data for a given chunk
  241. """
  242. # We start at the location of the chunk metadata, skip over the metadata, and then proceed to the
  243. # start of the actual data field, which is at some arbitrary place after the metadata.
  244. self.fh.seek(chunk_location + 16 + relative_offset)
  245. return self.fh.read(data_length)
  246. @staticmethod
  247. def _parse_chunk_metadata(chunk_data):
  248. """
  249. Finds out everything about a given chunk. Every chunk begins with the same value, so if that's ever
  250. different we can assume the file has suffered some kind of damage.
  251. """
  252. header, relative_offset, data_length = struct.unpack("IIQ", chunk_data)
  253. if header != 0xabeceda:
  254. raise ValueError("The ND2 file seems to be corrupted.")
  255. return header, relative_offset, data_length
  256. def _get_raw_chunk_map_text(self):
  257. """
  258. Reads the entire chunk map and returns it as a string.
  259. """
  260. self.fh.seek(self.chunk_map_start_location)
  261. return self.fh.read(-1)
  262. @staticmethod
  263. def as_numpy_array(arr):
  264. return np.frombuffer(arr)
  265. def _z_level_count(self):
  266. name = "CustomData|Z!"
  267. st = self._read_chunk(self._label_map[name].location)
  268. res = array.array("d", st)
  269. return len(res)
  270. def read_lv_encoding(self, data, count):
  271. data = StringIO(data)
  272. res = {}
  273. total_count = 0
  274. for c in range(count):
  275. lastpos = data.tell()
  276. total_count += 1
  277. hdr = data.read(2)
  278. if not hdr:
  279. break
  280. typ = ord(hdr[0])
  281. bname = data.read(2*ord(hdr[1]))
  282. name = bname.decode("utf16")[:-1].encode("utf8")
  283. if typ == 1:
  284. value, = struct.unpack("B", data.read(1))
  285. elif typ in [2, 3]:
  286. value, = struct.unpack("I", data.read(4))
  287. elif typ == 5:
  288. value, = struct.unpack("Q", data.read(8))
  289. elif typ == 6:
  290. value, = struct.unpack("d", data.read(8))
  291. elif typ == 8:
  292. value = data.read(2)
  293. while value[-2:] != "\x00\x00":
  294. value += data.read(2)
  295. value = value.decode("utf16")[:-1].encode("utf8")
  296. elif typ == 9:
  297. cnt, = struct.unpack("Q", data.read(8))
  298. value = array.array("B", data.read(cnt))
  299. elif typ == 11:
  300. newcount, length = struct.unpack("<IQ", data.read(12))
  301. length -= data.tell()-lastpos
  302. nextdata = data.read(length)
  303. value = self.read_lv_encoding(nextdata, newcount)
  304. # Skip some offsets
  305. data.read(newcount * 8)
  306. else:
  307. assert 0, "%s hdr %x:%x unknown" % (name, ord(hdr[0]), ord(hdr[1]))
  308. if not name in res:
  309. res[name] = value
  310. else:
  311. if not isinstance(res[name], list):
  312. res[name] = [res[name]]
  313. res[name].append(value)
  314. x = data.read()
  315. assert not x, "skip %d %s" % (len(x), repr(x[:30]))
  316. return res