You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

725 lines
23 KiB

6 years ago
7 years ago
6 years ago
5 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
4 years ago
4 years ago
5 years ago
6 years ago
4 years ago
4 years ago
9 years ago
4 years ago
9 years ago
4 years ago
9 years ago
5 years ago
7 years ago
7 years ago
7 years ago
6 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
6 years ago
6 years ago
6 years ago
6 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
5 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
  1. import re
  2. import xmltodict
  3. import six
  4. import numpy as np
  5. import warnings
  6. from nd2reader2.common import read_chunk, read_array, read_metadata, parse_date, get_from_dict_if_exists
  7. from nd2reader2.common_raw_metadata import parse_dimension_text_line, parse_if_not_none, parse_roi_shape, parse_roi_type, get_loops_from_data, determine_sampling_interval
  8. class RawMetadata(object):
  9. """RawMetadata class parses and stores the raw metadata that is read from the binary file in dict format.
  10. """
  11. def __init__(self, fh, label_map):
  12. self._fh = fh
  13. self._label_map = label_map
  14. self._metadata_parsed = None
  15. @property
  16. def __dict__(self):
  17. """Returns the parsed metadata in dictionary form.
  18. Returns:
  19. dict: the parsed metadata
  20. """
  21. return self.get_parsed_metadata()
  22. def get_parsed_metadata(self):
  23. """Returns the parsed metadata in dictionary form.
  24. Returns:
  25. dict: the parsed metadata
  26. """
  27. if self._metadata_parsed is not None:
  28. return self._metadata_parsed
  29. frames_per_channel = self._parse_total_images_per_channel()
  30. self._metadata_parsed = {
  31. "height": parse_if_not_none(self.image_attributes, self._parse_height),
  32. "width": parse_if_not_none(self.image_attributes, self._parse_width),
  33. "date": parse_if_not_none(self.image_text_info, self._parse_date),
  34. "fields_of_view": self._parse_fields_of_view(),
  35. "frames": self._parse_frames(),
  36. "z_levels": self._parse_z_levels(),
  37. "z_coordinates": parse_if_not_none(self.z_data, self._parse_z_coordinates),
  38. "x_coordinates": parse_if_not_none(self.x_data, self._parse_x_coordinates),
  39. "y_coordinates": parse_if_not_none(self.y_data, self._parse_y_coordinates),
  40. "total_images_per_channel": frames_per_channel,
  41. "channels": self._parse_channels(),
  42. "pixel_microns": parse_if_not_none(self.image_calibration, self._parse_calibration),
  43. "camera_stage_angle": parse_if_not_none(self.image_metadata_sequence, self._parse_camera_angle),
  44. "camera_stage_matrix": parse_if_not_none(self.image_metadata_sequence, self._parse_camera_matrix)
  45. }
  46. self._set_default_if_not_empty('fields_of_view')
  47. self._set_default_if_not_empty('frames')
  48. self._metadata_parsed['num_frames'] = len(self._metadata_parsed['frames'])
  49. self._parse_roi_metadata()
  50. self._parse_experiment_metadata()
  51. self._parse_events()
  52. return self._metadata_parsed
  53. def _set_default_if_not_empty(self, entry):
  54. total_images = self._metadata_parsed['total_images_per_channel'] \
  55. if self._metadata_parsed['total_images_per_channel'] is not None else 0
  56. if len(self._metadata_parsed[entry]) == 0 and total_images > 0:
  57. # if the file is not empty, we always have one of this entry
  58. self._metadata_parsed[entry] = [0]
  59. def _parse_width_or_height(self, key):
  60. try:
  61. length = self.image_attributes[six.b('SLxImageAttributes')][six.b(key)]
  62. except KeyError:
  63. length = None
  64. return length
  65. def _parse_height(self):
  66. return self._parse_width_or_height('uiHeight')
  67. def _parse_width(self):
  68. return self._parse_width_or_height('uiWidth')
  69. def _parse_date(self):
  70. try:
  71. return parse_date(self.image_text_info[six.b('SLxImageTextInfo')])
  72. except KeyError:
  73. return None
  74. def _parse_calibration(self):
  75. try:
  76. return self.image_calibration.get(six.b('SLxCalibration'), {}).get(six.b('dCalibration'))
  77. except KeyError:
  78. return None
  79. def _parse_frames(self):
  80. """The number of cycles.
  81. Returns:
  82. list: list of all the frame numbers
  83. """
  84. return self._parse_dimension(r""".*?T'?\((\d+)\).*?""")
  85. def _parse_channels(self):
  86. """These are labels created by the NIS Elements user. Typically they may a short description of the filter cube
  87. used (e.g. 'bright field', 'GFP', etc.)
  88. Returns:
  89. list: the color channels
  90. """
  91. if self.image_metadata_sequence is None:
  92. return []
  93. try:
  94. metadata = self.image_metadata_sequence[six.b('SLxPictureMetadata')][six.b('sPicturePlanes')]
  95. except KeyError:
  96. return []
  97. channels = self._process_channels_metadata(metadata)
  98. return channels
  99. def _process_channels_metadata(self, metadata):
  100. validity = self._get_channel_validity_list(metadata)
  101. # Channel information is contained in dictionaries with the keys a0, a1...an where the number
  102. # indicates the order in which the channel is stored. So by sorting the dicts alphabetically
  103. # we get the correct order.
  104. channels = []
  105. for valid, (label, chan) in zip(validity, sorted(metadata[six.b('sPlaneNew')].items())):
  106. if not valid:
  107. continue
  108. if chan[six.b('sDescription')] is not None:
  109. channels.append(chan[six.b('sDescription')].decode("utf8"))
  110. else:
  111. channels.append('Unknown')
  112. return channels
  113. def _get_channel_validity_list(self, metadata):
  114. try:
  115. validity = self.image_metadata[six.b('SLxExperiment')][six.b('ppNextLevelEx')][six.b('')][0][
  116. six.b('ppNextLevelEx')][six.b('')][0][six.b('pItemValid')]
  117. except (KeyError, TypeError):
  118. # If none of the channels have been deleted, there is no validity list, so we just make one
  119. validity = [True for _ in metadata]
  120. return validity
  121. def _parse_fields_of_view(self):
  122. """The metadata contains information about fields of view, but it contains it even if some fields
  123. of view were cropped. We can't find anything that states which fields of view are actually
  124. in the image data, so we have to calculate it. There probably is something somewhere, since
  125. NIS Elements can figure it out, but we haven't found it yet.
  126. """
  127. return self._parse_dimension(r""".*?XY\((\d+)\).*?""")
  128. def _parse_z_levels(self):
  129. """The different levels in the Z-plane.
  130. If they are not available from the _parse_dimension function AND there
  131. is NO 'Dimensions: ' textinfo item in the file, we return a range with
  132. the length of z_coordinates if available, otherwise an empty list.
  133. Returns:
  134. list: the z levels, just a sequence from 0 to n.
  135. """
  136. # get the dimension text to check if we should apply the fallback or not
  137. dimension_text = self._parse_dimension_text()
  138. # this returns range(len(z_levels))
  139. z_levels = self._parse_dimension(r""".*?Z\((\d+)\).*?""", dimension_text)
  140. if len(z_levels) > 0 or len(dimension_text) > 0:
  141. # Either we have found the z_levels (first condition) so return, or
  142. # don't fallback, because Z is apparently not in Dimensions, so
  143. # there should be no z_levels
  144. return z_levels
  145. # Not available from dimension, get from z_coordinates
  146. z_levels = parse_if_not_none(self.z_data, self._parse_z_coordinates)
  147. if z_levels is None:
  148. # No z coordinates, return empty list
  149. return []
  150. warnings.warn("Z-levels details missing in metadata. Using Z-coordinates instead.")
  151. return range(len(z_levels))
  152. def _parse_z_coordinates(self):
  153. """The coordinate in micron for all z planes.
  154. Returns:
  155. list: the z coordinates in micron
  156. """
  157. return self.z_data.tolist()
  158. def _parse_x_coordinates(self):
  159. """The coordinate in micron for all x frames.
  160. Returns:
  161. list: the x coordinates in micron
  162. """
  163. return self.x_data.tolist()
  164. def _parse_y_coordinates(self):
  165. """The coordinate in micron for all y frames.
  166. Returns:
  167. list: the y coordinates in micron
  168. """
  169. return self.y_data.tolist()
  170. def _parse_camera_angle(self):
  171. if self.image_metadata_sequence is None:
  172. return []
  173. try:
  174. metadata = self.image_metadata_sequence[six.b('SLxPictureMetadata')]
  175. except KeyError:
  176. return []
  177. try:
  178. return metadata[b'dAngle']
  179. except KeyError:
  180. return None
  181. def _parse_camera_matrix(self):
  182. if self.image_metadata_sequence is None:
  183. return []
  184. try:
  185. metadata = self.image_metadata_sequence[six.b('SLxPictureMetadata')][b'sPicturePlanes']
  186. except KeyError:
  187. return []
  188. validity = self._get_channel_validity_list(metadata)
  189. channels = []
  190. for valid, (label, chan) in zip(validity, sorted(metadata[b'sSampleSetting'].items())):
  191. if not valid:
  192. continue
  193. if chan[b'matCameraToStage'] is not None:
  194. mat_data = chan[b'matCameraToStage'][b'Data']
  195. mat_rows = chan[b'matCameraToStage'][b'Rows']
  196. mat_columns = chan[b'matCameraToStage'][b'Columns']
  197. mat = np.frombuffer(mat_data, dtype=np.float64).reshape([mat_rows, mat_columns])
  198. channels.append(mat)
  199. else:
  200. channels.append(None)
  201. return channels
  202. def _parse_dimension_text(self):
  203. """While there are metadata values that represent a lot of what we want to capture, they seem to be unreliable.
  204. Sometimes certain elements don't exist, or change their data type randomly. However, the human-readable text
  205. is always there and in the same exact format, so we just parse that instead.
  206. """
  207. dimension_text = six.b("")
  208. if self.image_text_info is None:
  209. return dimension_text
  210. try:
  211. textinfo = self.image_text_info[six.b('SLxImageTextInfo')].values()
  212. except KeyError:
  213. return dimension_text
  214. for line in textinfo:
  215. entry = parse_dimension_text_line(line)
  216. if entry is not None:
  217. return entry
  218. return dimension_text
  219. def _parse_dimension(self, pattern, dimension_text=None):
  220. dimension_text = self._parse_dimension_text() if dimension_text is None else dimension_text
  221. if dimension_text is None:
  222. return []
  223. if six.PY3:
  224. dimension_text = dimension_text.decode("utf8")
  225. match = re.match(pattern, dimension_text)
  226. if not match:
  227. return []
  228. count = int(match.group(1))
  229. return range(count)
  230. def _parse_total_images_per_channel(self):
  231. """The total number of images per channel.
  232. Warning: this may be inaccurate as it includes 'gap' images.
  233. """
  234. if self.image_attributes is None:
  235. return 0
  236. try:
  237. total_images = self.image_attributes[six.b('SLxImageAttributes')][six.b('uiSequenceCount')]
  238. except KeyError:
  239. total_images = None
  240. return total_images
  241. def _parse_roi_metadata(self):
  242. """Parse the raw ROI metadata.
  243. """
  244. if self.roi_metadata is None or not six.b('RoiMetadata_v1') in self.roi_metadata:
  245. return
  246. raw_roi_data = self.roi_metadata[six.b('RoiMetadata_v1')]
  247. if not six.b('m_vectGlobal_Size') in raw_roi_data:
  248. return
  249. number_of_rois = raw_roi_data[six.b('m_vectGlobal_Size')]
  250. roi_objects = []
  251. for i in range(number_of_rois):
  252. current_roi = raw_roi_data[six.b('m_vectGlobal_%d' % i)]
  253. roi_objects.append(self._parse_roi(current_roi))
  254. self._metadata_parsed['rois'] = roi_objects
  255. def _parse_roi(self, raw_roi_dict):
  256. """Extract the vector animation parameters from the ROI.
  257. This includes the position and size at the given timepoints.
  258. Args:
  259. raw_roi_dict: dictionary of raw roi metadata
  260. Returns:
  261. dict: the parsed ROI metadata
  262. """
  263. number_of_timepoints = raw_roi_dict[six.b('m_vectAnimParams_Size')]
  264. roi_dict = {
  265. "timepoints": [],
  266. "positions": [],
  267. "sizes": [],
  268. "shape": parse_roi_shape(raw_roi_dict[six.b('m_sInfo')][six.b('m_uiShapeType')]),
  269. "type": parse_roi_type(raw_roi_dict[six.b('m_sInfo')][six.b('m_uiInterpType')])
  270. }
  271. for i in range(number_of_timepoints):
  272. roi_dict = self._parse_vect_anim(roi_dict, raw_roi_dict[six.b('m_vectAnimParams_%d' % i)])
  273. # convert to NumPy arrays
  274. roi_dict["timepoints"] = np.array(roi_dict["timepoints"], dtype=np.float)
  275. roi_dict["positions"] = np.array(roi_dict["positions"], dtype=np.float)
  276. roi_dict["sizes"] = np.array(roi_dict["sizes"], dtype=np.float)
  277. return roi_dict
  278. def _parse_vect_anim(self, roi_dict, animation_dict):
  279. """
  280. Parses a ROI vector animation object and adds it to the global list of timepoints and positions.
  281. Args:
  282. roi_dict: the raw roi dictionary
  283. animation_dict: the raw animation dictionary
  284. Returns:
  285. dict: the parsed metadata
  286. """
  287. roi_dict["timepoints"].append(animation_dict[six.b('m_dTimeMs')])
  288. image_width = self._metadata_parsed["width"] * self._metadata_parsed["pixel_microns"]
  289. image_height = self._metadata_parsed["height"] * self._metadata_parsed["pixel_microns"]
  290. # positions are taken from the center of the image as a fraction of the half width/height of the image
  291. position = np.array((0.5 * image_width * (1 + animation_dict[six.b('m_dCenterX')]),
  292. 0.5 * image_height * (1 + animation_dict[six.b('m_dCenterY')]),
  293. animation_dict[six.b('m_dCenterZ')]))
  294. roi_dict["positions"].append(position)
  295. size_dict = animation_dict[six.b('m_sBoxShape')]
  296. # sizes are fractions of the half width/height of the image
  297. roi_dict["sizes"].append((size_dict[six.b('m_dSizeX')] * 0.25 * image_width,
  298. size_dict[six.b('m_dSizeY')] * 0.25 * image_height,
  299. size_dict[six.b('m_dSizeZ')]))
  300. return roi_dict
  301. def _parse_experiment_metadata(self):
  302. """Parse the metadata of the ND experiment
  303. """
  304. self._metadata_parsed['experiment'] = {
  305. 'description': 'unknown',
  306. 'loops': []
  307. }
  308. if self.image_metadata is None or six.b('SLxExperiment') not in self.image_metadata:
  309. return
  310. raw_data = self.image_metadata[six.b('SLxExperiment')]
  311. if six.b('wsApplicationDesc') in raw_data:
  312. self._metadata_parsed['experiment']['description'] = raw_data[six.b('wsApplicationDesc')].decode('utf8')
  313. if six.b('uLoopPars') in raw_data:
  314. self._metadata_parsed['experiment']['loops'] = self._parse_loop_data(raw_data[six.b('uLoopPars')])
  315. def _parse_loop_data(self, loop_data):
  316. """Parse the experimental loop data
  317. Args:
  318. loop_data: dictionary of experiment loops
  319. Returns:
  320. list: list of the parsed loops
  321. """
  322. loops = get_loops_from_data(loop_data)
  323. # take into account the absolute time in ms
  324. time_offset = 0
  325. parsed_loops = []
  326. for loop in loops:
  327. # duration of this loop
  328. duration = get_from_dict_if_exists('dDuration', loop) or 0
  329. interval = determine_sampling_interval(duration, loop)
  330. # if duration is not saved, infer it
  331. duration = self.get_duration_from_interval_and_loops(duration, interval, loop)
  332. # uiLoopType == 6 is a stimulation loop
  333. is_stimulation = get_from_dict_if_exists('uiLoopType', loop) == 6
  334. parsed_loop = {
  335. 'start': time_offset,
  336. 'duration': duration,
  337. 'stimulation': is_stimulation,
  338. 'sampling_interval': interval
  339. }
  340. parsed_loops.append(parsed_loop)
  341. # increase the time offset
  342. time_offset += duration
  343. return parsed_loops
  344. def get_duration_from_interval_and_loops(self, duration, interval, loop):
  345. """Infers the duration of the loop from the number of measurements and the interval
  346. Args:
  347. duration: loop duration in milliseconds
  348. duration: measurement interval in milliseconds
  349. loop: loop dictionary
  350. Returns:
  351. float: the loop duration in milliseconds
  352. """
  353. if duration == 0 and interval > 0:
  354. number_of_loops = get_from_dict_if_exists('uiCount', loop)
  355. number_of_loops = number_of_loops if number_of_loops is not None and number_of_loops > 0 else 1
  356. duration = interval * number_of_loops
  357. return duration
  358. def _parse_events(self):
  359. """Extract events
  360. """
  361. # list of event names manually extracted from an ND2 file that contains all manually
  362. # insertable events from NIS-Elements software (4.60.00 (Build 1171) Patch 02)
  363. event_names = {
  364. 1: 'Autofocus',
  365. 7: 'Command Executed',
  366. 9: 'Experiment Paused',
  367. 10: 'Experiment Resumed',
  368. 11: 'Experiment Stopped by User',
  369. 13: 'Next Phase Moved by User',
  370. 14: 'Experiment Paused for Refocusing',
  371. 16: 'External Stimulation',
  372. 33: 'User 1',
  373. 34: 'User 2',
  374. 35: 'User 3',
  375. 36: 'User 4',
  376. 37: 'User 5',
  377. 38: 'User 6',
  378. 39: 'User 7',
  379. 40: 'User 8',
  380. 44: 'No Acquisition Phase Start',
  381. 45: 'No Acquisition Phase End',
  382. 46: 'Hardware Error',
  383. 47: 'N-STORM',
  384. 48: 'Incubation Info',
  385. 49: 'Incubation Error'
  386. }
  387. self._metadata_parsed['events'] = []
  388. events = read_metadata(read_chunk(self._fh, self._label_map.image_events), 1)
  389. if events is None or six.b('RLxExperimentRecord') not in events:
  390. return
  391. events = events[six.b('RLxExperimentRecord')][six.b('pEvents')]
  392. if len(events) == 0:
  393. return
  394. for event in events[six.b('')]:
  395. event_info = {
  396. 'index': event[six.b('I')],
  397. 'time': event[six.b('T')],
  398. 'type': event[six.b('M')],
  399. }
  400. if event_info['type'] in event_names.keys():
  401. event_info['name'] = event_names[event_info['type']]
  402. self._metadata_parsed['events'].append(event_info)
  403. @property
  404. def image_text_info(self):
  405. """Textual image information
  406. Returns:
  407. dict: containing the textual image info
  408. """
  409. return read_metadata(read_chunk(self._fh, self._label_map.image_text_info), 1)
  410. @property
  411. def image_metadata_sequence(self):
  412. """Image metadata of the sequence
  413. Returns:
  414. dict: containing the metadata
  415. """
  416. return read_metadata(read_chunk(self._fh, self._label_map.image_metadata_sequence), 1)
  417. @property
  418. def image_calibration(self):
  419. """The amount of pixels per micron.
  420. Returns:
  421. dict: pixels per micron
  422. """
  423. return read_metadata(read_chunk(self._fh, self._label_map.image_calibration), 1)
  424. @property
  425. def image_attributes(self):
  426. """Image attributes
  427. Returns:
  428. dict: containing the image attributes
  429. """
  430. return read_metadata(read_chunk(self._fh, self._label_map.image_attributes), 1)
  431. @property
  432. def x_data(self):
  433. """X data
  434. Returns:
  435. dict: x_data
  436. """
  437. return read_array(self._fh, 'double', self._label_map.x_data)
  438. @property
  439. def y_data(self):
  440. """Y data
  441. Returns:
  442. dict: y_data
  443. """
  444. return read_array(self._fh, 'double', self._label_map.y_data)
  445. @property
  446. def z_data(self):
  447. """Z data
  448. Returns:
  449. dict: z_data
  450. """
  451. try:
  452. return read_array(self._fh, 'double', self._label_map.z_data)
  453. except ValueError:
  454. # Depending on the file format/exact settings, this value is
  455. # sometimes saved as float instead of double
  456. return read_array(self._fh, 'float', self._label_map.z_data)
  457. @property
  458. def roi_metadata(self):
  459. """Contains information about the defined ROIs: shape, position and type (reference/background/stimulation).
  460. Returns:
  461. dict: ROI metadata dictionary
  462. """
  463. return read_metadata(read_chunk(self._fh, self._label_map.roi_metadata), 1)
  464. @property
  465. def pfs_status(self):
  466. """Perfect focus system (PFS) status
  467. Returns:
  468. dict: Perfect focus system (PFS) status
  469. """
  470. return read_array(self._fh, 'int', self._label_map.pfs_status)
  471. @property
  472. def pfs_offset(self):
  473. """Perfect focus system (PFS) offset
  474. Returns:
  475. dict: Perfect focus system (PFS) offset
  476. """
  477. return read_array(self._fh, 'int', self._label_map.pfs_offset)
  478. @property
  479. def camera_exposure_time(self):
  480. """Exposure time information
  481. Returns:
  482. dict: Camera exposure time
  483. """
  484. return read_array(self._fh, 'double', self._label_map.camera_exposure_time)
  485. @property
  486. def lut_data(self):
  487. """LUT information
  488. Returns:
  489. dict: LUT information
  490. """
  491. return xmltodict.parse(read_chunk(self._fh, self._label_map.lut_data))
  492. @property
  493. def grabber_settings(self):
  494. """Grabber settings
  495. Returns:
  496. dict: Acquisition settings
  497. """
  498. return xmltodict.parse(read_chunk(self._fh, self._label_map.grabber_settings))
  499. @property
  500. def custom_data(self):
  501. """Custom user data
  502. Returns:
  503. dict: custom user data
  504. """
  505. return xmltodict.parse(read_chunk(self._fh, self._label_map.custom_data))
  506. @property
  507. def app_info(self):
  508. """NIS elements application info
  509. Returns:
  510. dict: (Version) information of the NIS Elements application
  511. """
  512. return xmltodict.parse(read_chunk(self._fh, self._label_map.app_info))
  513. @property
  514. def camera_temp(self):
  515. """Camera temperature
  516. Yields:
  517. float: the temperature
  518. """
  519. camera_temp = read_array(self._fh, 'double', self._label_map.camera_temp)
  520. if camera_temp:
  521. for temp in map(lambda x: round(x * 100.0, 2), camera_temp):
  522. yield temp
  523. @property
  524. def acquisition_times(self):
  525. """Acquisition times
  526. Yields:
  527. float: the acquisition time
  528. """
  529. acquisition_times = read_array(self._fh, 'double', self._label_map.acquisition_times)
  530. if acquisition_times:
  531. for acquisition_time in map(lambda x: x / 1000.0, acquisition_times):
  532. yield acquisition_time
  533. @property
  534. def image_metadata(self):
  535. """Image metadata
  536. Returns:
  537. dict: Extra image metadata
  538. """
  539. if self._label_map.image_metadata:
  540. return read_metadata(read_chunk(self._fh, self._label_map.image_metadata), 1)
  541. @property
  542. def image_events(self):
  543. """Image events
  544. Returns:
  545. dict: Image events
  546. """
  547. if self._label_map.image_metadata:
  548. for event in self._metadata_parsed["events"]:
  549. yield event