You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

527 lines
15 KiB

7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
9 years ago
9 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
  1. import re
  2. from nd2reader.common import read_chunk, read_array, read_metadata, parse_date
  3. import xmltodict
  4. import six
  5. import numpy as np
  6. def ignore_missing(func):
  7. """
  8. Ignore missing properties
  9. Args:
  10. func: function to decorate
  11. Returns:
  12. function: a wrapper function
  13. """
  14. def wrapper(*args, **kwargs):
  15. """
  16. Wrapper function to ignore missing class properties
  17. Args:
  18. *args:
  19. **kwargs:
  20. Returns:
  21. """
  22. try:
  23. return func(*args, **kwargs)
  24. except:
  25. return None
  26. return wrapper
  27. class RawMetadata(object):
  28. """
  29. RawMetadata class parses and stores the raw metadata that is read from the binary file in dict format
  30. """
  31. def __init__(self, fh, label_map):
  32. self._fh = fh
  33. self._label_map = label_map
  34. self._metadata_parsed = None
  35. @property
  36. def __dict__(self):
  37. """Returns the parsed metadata in dictionary form
  38. Returns:
  39. dict: the parsed metadata
  40. """
  41. return self.get_parsed_metadata()
  42. def get_parsed_metadata(self):
  43. """ Returns the parsed metadata in dictionary form
  44. Returns:
  45. dict: the parsed metadata
  46. """
  47. if self._metadata_parsed is not None:
  48. return self._metadata_parsed
  49. self._metadata_parsed = {
  50. "height": self.image_attributes[six.b('SLxImageAttributes')][six.b('uiHeight')],
  51. "width": self.image_attributes[six.b('SLxImageAttributes')][six.b('uiWidth')],
  52. "date": parse_date(self.image_text_info[six.b('SLxImageTextInfo')]),
  53. "fields_of_view": self._parse_fields_of_view(),
  54. "frames": self._parse_frames(),
  55. "z_levels": self._parse_z_levels(),
  56. "total_images_per_channel": self._parse_total_images_per_channel(),
  57. "channels": self._parse_channels(),
  58. "pixel_microns": self.image_calibration.get(six.b('SLxCalibration'), {}).get(six.b('dCalibration')),
  59. }
  60. self._metadata_parsed['num_frames'] = len(self._metadata_parsed['frames'])
  61. self._parse_roi_metadata()
  62. self._parse_experiment_metadata()
  63. return self._metadata_parsed
  64. def _parse_channels(self):
  65. """
  66. These are labels created by the NIS Elements user. Typically they may a short description of the filter cube
  67. used (e.g. "bright field", "GFP", etc.)
  68. Returns:
  69. list: the color channels
  70. """
  71. channels = []
  72. metadata = self.image_metadata_sequence[six.b('SLxPictureMetadata')][six.b('sPicturePlanes')]
  73. try:
  74. validity = self.image_metadata[six.b('SLxExperiment')][six.b('ppNextLevelEx')][six.b('')][0][
  75. six.b('ppNextLevelEx')][six.b('')][0][six.b('pItemValid')]
  76. except (KeyError, TypeError):
  77. # If none of the channels have been deleted, there is no validity list, so we just make one
  78. validity = [True for _ in metadata]
  79. # Channel information is contained in dictionaries with the keys a0, a1...an where the number
  80. # indicates the order in which the channel is stored. So by sorting the dicts alphabetically
  81. # we get the correct order.
  82. for (label, chan), valid in zip(sorted(metadata[six.b('sPlaneNew')].items()), validity):
  83. if not valid:
  84. continue
  85. channels.append(chan[six.b('sDescription')].decode("utf8"))
  86. return channels
  87. def _parse_fields_of_view(self):
  88. """
  89. The metadata contains information about fields of view, but it contains it even if some fields
  90. of view were cropped. We can't find anything that states which fields of view are actually
  91. in the image data, so we have to calculate it. There probably is something somewhere, since
  92. NIS Elements can figure it out, but we haven't found it yet.
  93. """
  94. return self._parse_dimension(r""".*?XY\((\d+)\).*?""")
  95. def _parse_frames(self):
  96. """The number of cycles.
  97. Returns:
  98. list: list of all the frame numbers
  99. """
  100. return self._parse_dimension(r""".*?T'?\((\d+)\).*?""")
  101. def _parse_z_levels(self):
  102. """The different levels in the Z-plane.
  103. Returns:
  104. list: the z levels, just a sequence from 0 to n.
  105. """
  106. return self._parse_dimension(r""".*?Z\((\d+)\).*?""")
  107. def _parse_dimension_text(self):
  108. """
  109. While there are metadata values that represent a lot of what we want to capture, they seem to be unreliable.
  110. Sometimes certain elements don't exist, or change their data type randomly. However, the human-readable text
  111. is always there and in the same exact format, so we just parse that instead.
  112. """
  113. dimension_text = six.b("")
  114. textinfo = self.image_text_info[six.b('SLxImageTextInfo')].values()
  115. for line in textinfo:
  116. if six.b("Dimensions:") in line:
  117. entries = line.split(six.b("\r\n"))
  118. for entry in entries:
  119. if entry.startswith(six.b("Dimensions:")):
  120. return entry
  121. return dimension_text
  122. def _parse_dimension(self, pattern):
  123. dimension_text = self._parse_dimension_text()
  124. if six.PY3:
  125. dimension_text = dimension_text.decode("utf8")
  126. match = re.match(pattern, dimension_text)
  127. if not match:
  128. return [0]
  129. count = int(match.group(1))
  130. return list(range(count))
  131. def _parse_total_images_per_channel(self):
  132. """The total number of images per channel.
  133. Warning: this may be inaccurate as it includes "gap" images.
  134. """
  135. return self.image_attributes[six.b('SLxImageAttributes')][six.b('uiSequenceCount')]
  136. def _parse_roi_metadata(self):
  137. """Parse the raw ROI metadata.
  138. """
  139. if self.roi_metadata is None or not six.b('RoiMetadata_v1') in self.roi_metadata:
  140. return
  141. raw_roi_data = self.roi_metadata[six.b('RoiMetadata_v1')]
  142. number_of_rois = raw_roi_data[six.b('m_vectGlobal_Size')]
  143. roi_objects = []
  144. for i in range(number_of_rois):
  145. current_roi = raw_roi_data[six.b('m_vectGlobal_%d' % i)]
  146. roi_objects.append(self._parse_roi(current_roi))
  147. self._metadata_parsed['rois'] = roi_objects
  148. def _parse_roi(self, raw_roi_dict):
  149. """Extract the vector animation parameters from the ROI.
  150. This includes the position and size at the given timepoints.
  151. Args:
  152. raw_roi_dict:
  153. Returns:
  154. """
  155. number_of_timepoints = raw_roi_dict[six.b('m_vectAnimParams_Size')]
  156. roi_dict = {
  157. "timepoints": [],
  158. "positions": [],
  159. "sizes": [],
  160. "shape": self._parse_roi_shape(raw_roi_dict[six.b('m_sInfo')][six.b('m_uiShapeType')]),
  161. "type": self._parse_roi_type(raw_roi_dict[six.b('m_sInfo')][six.b('m_uiInterpType')])
  162. }
  163. for i in range(number_of_timepoints):
  164. roi_dict = self._parse_vect_anim(roi_dict, raw_roi_dict[six.b('m_vectAnimParams_%d' % i)])
  165. # convert to NumPy arrays
  166. roi_dict["timepoints"] = np.array(roi_dict["timepoints"], dtype=np.float)
  167. roi_dict["positions"] = np.array(roi_dict["positions"], dtype=np.float)
  168. roi_dict["sizes"] = np.array(roi_dict["sizes"], dtype=np.float)
  169. return roi_dict
  170. @staticmethod
  171. def _parse_roi_shape(shape):
  172. if shape == 3:
  173. return 'rectangle'
  174. elif shape == 9:
  175. return 'circle'
  176. return None
  177. @staticmethod
  178. def _parse_roi_type(type_no):
  179. if type_no == 4:
  180. return 'stimulation'
  181. elif type_no == 3:
  182. return 'reference'
  183. elif type_no == 2:
  184. return 'background'
  185. return None
  186. def _parse_vect_anim(self, roi_dict, animation_dict):
  187. """
  188. Parses a ROI vector animation object and adds it to the global list of timepoints and positions.
  189. Args:
  190. roi_dict:
  191. animation_dict:
  192. Returns:
  193. """
  194. roi_dict["timepoints"].append(animation_dict[six.b('m_dTimeMs')])
  195. image_width = self._metadata_parsed["width"] * self._metadata_parsed["pixel_microns"]
  196. image_height = self._metadata_parsed["height"] * self._metadata_parsed["pixel_microns"]
  197. # positions are taken from the center of the image as a fraction of the half width/height of the image
  198. position = np.array((0.5 * image_width * (1 + animation_dict[six.b('m_dCenterX')]),
  199. 0.5 * image_height * (1 + animation_dict[six.b('m_dCenterY')]),
  200. animation_dict[six.b('m_dCenterZ')]))
  201. roi_dict["positions"].append(position)
  202. size_dict = animation_dict[six.b('m_sBoxShape')]
  203. # sizes are fractions of the half width/height of the image
  204. roi_dict["sizes"].append((size_dict[six.b('m_dSizeX')] * 0.25 * image_width,
  205. size_dict[six.b('m_dSizeY')] * 0.25 * image_height,
  206. size_dict[six.b('m_dSizeZ')]))
  207. return roi_dict
  208. def _parse_experiment_metadata(self):
  209. """Parse the metadata of the ND experiment
  210. """
  211. if not six.b('SLxExperiment') in self.image_metadata:
  212. return
  213. raw_data = self.image_metadata[six.b('SLxExperiment')]
  214. experimental_data = {
  215. 'description': 'unknown',
  216. 'loops': []
  217. }
  218. if six.b('wsApplicationDesc') in raw_data:
  219. experimental_data['description'] = raw_data[six.b('wsApplicationDesc')].decode('utf8')
  220. if six.b('uLoopPars') in raw_data:
  221. experimental_data['loops'] = self._parse_loop_data(raw_data[six.b('uLoopPars')])
  222. self._metadata_parsed['experiment'] = experimental_data
  223. @staticmethod
  224. def _parse_loop_data(loop_data):
  225. """
  226. Parse the experimental loop data
  227. Args:
  228. loop_data:
  229. Returns:
  230. """
  231. loops = [loop_data]
  232. if six.b('uiPeriodCount') in loop_data and loop_data[six.b('uiPeriodCount')] > 0:
  233. # special ND experiment
  234. if six.b('pPeriod') not in loop_data:
  235. return []
  236. # take the first dictionary element, it contains all loop data
  237. loops = loop_data[six.b('pPeriod')][list(loop_data[six.b('pPeriod')].keys())[0]]
  238. # take into account the absolute time in ms
  239. time_offset = 0
  240. parsed_loops = []
  241. for loop in loops:
  242. # duration of this loop
  243. duration = loop[six.b('dDuration')]
  244. # uiLoopType == 6 is a stimulation loop
  245. is_stimulation = False
  246. if six.b('uiLoopType') in loop:
  247. is_stimulation = loop[six.b('uiLoopType')] == 6
  248. # sampling interval in ms
  249. interval = loop[six.b('dAvgPeriodDiff')]
  250. parsed_loop = {
  251. 'start': time_offset,
  252. 'duration': duration,
  253. 'stimulation': is_stimulation,
  254. 'sampling_interval': interval
  255. }
  256. parsed_loops.append(parsed_loop)
  257. # increase the time offset
  258. time_offset += duration
  259. return parsed_loops
  260. @property
  261. @ignore_missing
  262. def image_text_info(self):
  263. """
  264. Returns:
  265. """
  266. return read_metadata(read_chunk(self._fh, self._label_map.image_text_info), 1)
  267. @property
  268. @ignore_missing
  269. def image_metadata_sequence(self):
  270. """
  271. Returns:
  272. """
  273. return read_metadata(read_chunk(self._fh, self._label_map.image_metadata_sequence), 1)
  274. @property
  275. @ignore_missing
  276. def image_calibration(self):
  277. """
  278. The amount of pixels per micron.
  279. Returns:
  280. float: pixels per micron
  281. """
  282. return read_metadata(read_chunk(self._fh, self._label_map.image_calibration), 1)
  283. @property
  284. @ignore_missing
  285. def image_attributes(self):
  286. """
  287. Returns:
  288. """
  289. return read_metadata(read_chunk(self._fh, self._label_map.image_attributes), 1)
  290. @property
  291. @ignore_missing
  292. def x_data(self):
  293. """
  294. Returns:
  295. """
  296. return read_array(self._fh, 'double', self._label_map.x_data)
  297. @property
  298. @ignore_missing
  299. def y_data(self):
  300. """
  301. Returns:
  302. """
  303. return read_array(self._fh, 'double', self._label_map.y_data)
  304. @property
  305. @ignore_missing
  306. def z_data(self):
  307. """
  308. Returns:
  309. """
  310. return read_array(self._fh, 'double', self._label_map.z_data)
  311. @property
  312. @ignore_missing
  313. def roi_metadata(self):
  314. """
  315. Contains information about the defined ROIs: shape, position and type (reference/background/stimulation).
  316. Returns:
  317. dict: ROI metadata dictionary
  318. """
  319. return read_metadata(read_chunk(self._fh, self._label_map.roi_metadata), 1)
  320. @property
  321. @ignore_missing
  322. def pfs_status(self):
  323. """
  324. Returns:
  325. """
  326. return read_array(self._fh, 'int', self._label_map.pfs_status)
  327. @property
  328. @ignore_missing
  329. def pfs_offset(self):
  330. """
  331. Returns:
  332. """
  333. return read_array(self._fh, 'int', self._label_map.pfs_offset)
  334. @property
  335. @ignore_missing
  336. def camera_exposure_time(self):
  337. """
  338. Returns:
  339. """
  340. return read_array(self._fh, 'double', self._label_map.camera_exposure_time)
  341. @property
  342. @ignore_missing
  343. def lut_data(self):
  344. """
  345. Returns:
  346. """
  347. return xmltodict.parse(read_chunk(self._fh, self._label_map.lut_data))
  348. @property
  349. @ignore_missing
  350. def grabber_settings(self):
  351. """
  352. Returns:
  353. """
  354. return xmltodict.parse(read_chunk(self._fh, self._label_map.grabber_settings))
  355. @property
  356. @ignore_missing
  357. def custom_data(self):
  358. """
  359. Returns:
  360. """
  361. return xmltodict.parse(read_chunk(self._fh, self._label_map.custom_data))
  362. @property
  363. @ignore_missing
  364. def app_info(self):
  365. """
  366. Returns:
  367. """
  368. return xmltodict.parse(read_chunk(self._fh, self._label_map.app_info))
  369. @property
  370. @ignore_missing
  371. def camera_temp(self):
  372. """
  373. Yields:
  374. float: the temperature
  375. """
  376. camera_temp = read_array(self._fh, 'double', self._label_map.camera_temp)
  377. if camera_temp:
  378. for temp in map(lambda x: round(x * 100.0, 2), camera_temp):
  379. yield temp
  380. @property
  381. @ignore_missing
  382. def acquisition_times(self):
  383. """
  384. Yields:
  385. float: the acquisition time
  386. """
  387. acquisition_times = read_array(self._fh, 'double', self._label_map.acquisition_times)
  388. if acquisition_times:
  389. for acquisition_time in map(lambda x: x / 1000.0, acquisition_times):
  390. yield acquisition_time
  391. @property
  392. @ignore_missing
  393. def image_metadata(self):
  394. """
  395. Returns:
  396. """
  397. if self._label_map.image_metadata:
  398. return read_metadata(read_chunk(self._fh, self._label_map.image_metadata), 1)