You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

556 lines
18 KiB

6 years ago
8 years ago
6 years ago
6 years ago
8 years ago
8 years ago
8 years ago
8 years ago
7 years ago
8 years ago
7 years ago
7 years ago
6 years ago
9 years ago
9 years ago
8 years ago
8 years ago
8 years ago
6 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
6 years ago
6 years ago
7 years ago
7 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
  1. import re
  2. import xmltodict
  3. import six
  4. import numpy as np
  5. from nd2reader.common import read_chunk, read_array, read_metadata, parse_date, get_from_dict_if_exists
  6. from nd2reader.common_raw_metadata import parse_dimension_text_line, parse_if_not_none, parse_roi_shape, parse_roi_type, get_loops_from_data, determine_sampling_interval
  7. class RawMetadata(object):
  8. """RawMetadata class parses and stores the raw metadata that is read from the binary file in dict format.
  9. """
  10. def __init__(self, fh, label_map):
  11. self._fh = fh
  12. self._label_map = label_map
  13. self._metadata_parsed = None
  14. @property
  15. def __dict__(self):
  16. """Returns the parsed metadata in dictionary form.
  17. Returns:
  18. dict: the parsed metadata
  19. """
  20. return self.get_parsed_metadata()
  21. def get_parsed_metadata(self):
  22. """Returns the parsed metadata in dictionary form.
  23. Returns:
  24. dict: the parsed metadata
  25. """
  26. if self._metadata_parsed is not None:
  27. return self._metadata_parsed
  28. frames_per_channel = self._parse_total_images_per_channel()
  29. self._metadata_parsed = {
  30. "height": parse_if_not_none(self.image_attributes, self._parse_height),
  31. "width": parse_if_not_none(self.image_attributes, self._parse_width),
  32. "date": parse_if_not_none(self.image_text_info, self._parse_date),
  33. "fields_of_view": self._parse_fields_of_view(),
  34. "frames": self._parse_frames(),
  35. "z_levels": self._parse_z_levels(),
  36. "total_images_per_channel": frames_per_channel,
  37. "channels": self._parse_channels(),
  38. "pixel_microns": parse_if_not_none(self.image_calibration, self._parse_calibration),
  39. }
  40. self._set_default_if_not_empty('fields_of_view')
  41. self._set_default_if_not_empty('frames')
  42. self._metadata_parsed['num_frames'] = len(self._metadata_parsed['frames'])
  43. self._parse_roi_metadata()
  44. self._parse_experiment_metadata()
  45. return self._metadata_parsed
  46. def _set_default_if_not_empty(self, entry):
  47. total_images = self._metadata_parsed['total_images_per_channel'] \
  48. if self._metadata_parsed['total_images_per_channel'] is not None else 0
  49. if len(self._metadata_parsed[entry]) == 0 and total_images > 0:
  50. # if the file is not empty, we always have one of this entry
  51. self._metadata_parsed[entry] = [0]
  52. def _parse_width_or_height(self, key):
  53. try:
  54. length = self.image_attributes[six.b('SLxImageAttributes')][six.b(key)]
  55. except KeyError:
  56. length = None
  57. return length
  58. def _parse_height(self):
  59. return self._parse_width_or_height('uiHeight')
  60. def _parse_width(self):
  61. return self._parse_width_or_height('uiWidth')
  62. def _parse_date(self):
  63. try:
  64. return parse_date(self.image_text_info[six.b('SLxImageTextInfo')])
  65. except KeyError:
  66. return None
  67. def _parse_calibration(self):
  68. try:
  69. return self.image_calibration.get(six.b('SLxCalibration'), {}).get(six.b('dCalibration'))
  70. except KeyError:
  71. return None
  72. def _parse_frames(self):
  73. """The number of cycles.
  74. Returns:
  75. list: list of all the frame numbers
  76. """
  77. return self._parse_dimension(r""".*?T'?\((\d+)\).*?""")
  78. def _parse_channels(self):
  79. """These are labels created by the NIS Elements user. Typically they may a short description of the filter cube
  80. used (e.g. 'bright field', 'GFP', etc.)
  81. Returns:
  82. list: the color channels
  83. """
  84. if self.image_metadata_sequence is None:
  85. return []
  86. try:
  87. metadata = self.image_metadata_sequence[six.b('SLxPictureMetadata')][six.b('sPicturePlanes')]
  88. except KeyError:
  89. return []
  90. channels = self._process_channels_metadata(metadata)
  91. return channels
  92. def _process_channels_metadata(self, metadata):
  93. validity = self._get_channel_validity_list(metadata)
  94. # Channel information is contained in dictionaries with the keys a0, a1...an where the number
  95. # indicates the order in which the channel is stored. So by sorting the dicts alphabetically
  96. # we get the correct order.
  97. channels = []
  98. for valid, (label, chan) in zip(validity, sorted(metadata[six.b('sPlaneNew')].items())):
  99. if not valid:
  100. continue
  101. if chan[six.b('sDescription')] is not None:
  102. channels.append(chan[six.b('sDescription')].decode("utf8"))
  103. else:
  104. channels.append('Unknown')
  105. return channels
  106. def _get_channel_validity_list(self, metadata):
  107. try:
  108. validity = self.image_metadata[six.b('SLxExperiment')][six.b('ppNextLevelEx')][six.b('')][0][
  109. six.b('ppNextLevelEx')][six.b('')][0][six.b('pItemValid')]
  110. except (KeyError, TypeError):
  111. # If none of the channels have been deleted, there is no validity list, so we just make one
  112. validity = [True for _ in metadata]
  113. return validity
  114. def _parse_fields_of_view(self):
  115. """The metadata contains information about fields of view, but it contains it even if some fields
  116. of view were cropped. We can't find anything that states which fields of view are actually
  117. in the image data, so we have to calculate it. There probably is something somewhere, since
  118. NIS Elements can figure it out, but we haven't found it yet.
  119. """
  120. return self._parse_dimension(r""".*?XY\((\d+)\).*?""")
  121. def _parse_z_levels(self):
  122. """The different levels in the Z-plane.
  123. Returns:
  124. list: the z levels, just a sequence from 0 to n.
  125. """
  126. return self._parse_dimension(r""".*?Z\((\d+)\).*?""")
  127. def _parse_dimension_text(self):
  128. """While there are metadata values that represent a lot of what we want to capture, they seem to be unreliable.
  129. Sometimes certain elements don't exist, or change their data type randomly. However, the human-readable text
  130. is always there and in the same exact format, so we just parse that instead.
  131. """
  132. dimension_text = six.b("")
  133. if self.image_text_info is None:
  134. return dimension_text
  135. try:
  136. textinfo = self.image_text_info[six.b('SLxImageTextInfo')].values()
  137. except KeyError:
  138. return dimension_text
  139. for line in textinfo:
  140. entry = parse_dimension_text_line(line)
  141. if entry is not None:
  142. return entry
  143. return dimension_text
  144. def _parse_dimension(self, pattern):
  145. dimension_text = self._parse_dimension_text()
  146. if dimension_text is None:
  147. return []
  148. if six.PY3:
  149. dimension_text = dimension_text.decode("utf8")
  150. match = re.match(pattern, dimension_text)
  151. if not match:
  152. return []
  153. count = int(match.group(1))
  154. return list(range(count))
  155. def _parse_total_images_per_channel(self):
  156. """The total number of images per channel.
  157. Warning: this may be inaccurate as it includes 'gap' images.
  158. """
  159. if self.image_attributes is None:
  160. return 0
  161. try:
  162. total_images = self.image_attributes[six.b('SLxImageAttributes')][six.b('uiSequenceCount')]
  163. except KeyError:
  164. total_images = None
  165. return total_images
  166. def _parse_roi_metadata(self):
  167. """Parse the raw ROI metadata.
  168. """
  169. if self.roi_metadata is None or not six.b('RoiMetadata_v1') in self.roi_metadata:
  170. return
  171. raw_roi_data = self.roi_metadata[six.b('RoiMetadata_v1')]
  172. if not six.b('m_vectGlobal_Size') in raw_roi_data:
  173. return
  174. number_of_rois = raw_roi_data[six.b('m_vectGlobal_Size')]
  175. roi_objects = []
  176. for i in range(number_of_rois):
  177. current_roi = raw_roi_data[six.b('m_vectGlobal_%d' % i)]
  178. roi_objects.append(self._parse_roi(current_roi))
  179. self._metadata_parsed['rois'] = roi_objects
  180. def _parse_roi(self, raw_roi_dict):
  181. """Extract the vector animation parameters from the ROI.
  182. This includes the position and size at the given timepoints.
  183. Args:
  184. raw_roi_dict: dictionary of raw roi metadata
  185. Returns:
  186. dict: the parsed ROI metadata
  187. """
  188. number_of_timepoints = raw_roi_dict[six.b('m_vectAnimParams_Size')]
  189. roi_dict = {
  190. "timepoints": [],
  191. "positions": [],
  192. "sizes": [],
  193. "shape": parse_roi_shape(raw_roi_dict[six.b('m_sInfo')][six.b('m_uiShapeType')]),
  194. "type": parse_roi_type(raw_roi_dict[six.b('m_sInfo')][six.b('m_uiInterpType')])
  195. }
  196. for i in range(number_of_timepoints):
  197. roi_dict = self._parse_vect_anim(roi_dict, raw_roi_dict[six.b('m_vectAnimParams_%d' % i)])
  198. # convert to NumPy arrays
  199. roi_dict["timepoints"] = np.array(roi_dict["timepoints"], dtype=np.float)
  200. roi_dict["positions"] = np.array(roi_dict["positions"], dtype=np.float)
  201. roi_dict["sizes"] = np.array(roi_dict["sizes"], dtype=np.float)
  202. return roi_dict
  203. def _parse_vect_anim(self, roi_dict, animation_dict):
  204. """
  205. Parses a ROI vector animation object and adds it to the global list of timepoints and positions.
  206. Args:
  207. roi_dict: the raw roi dictionary
  208. animation_dict: the raw animation dictionary
  209. Returns:
  210. dict: the parsed metadata
  211. """
  212. roi_dict["timepoints"].append(animation_dict[six.b('m_dTimeMs')])
  213. image_width = self._metadata_parsed["width"] * self._metadata_parsed["pixel_microns"]
  214. image_height = self._metadata_parsed["height"] * self._metadata_parsed["pixel_microns"]
  215. # positions are taken from the center of the image as a fraction of the half width/height of the image
  216. position = np.array((0.5 * image_width * (1 + animation_dict[six.b('m_dCenterX')]),
  217. 0.5 * image_height * (1 + animation_dict[six.b('m_dCenterY')]),
  218. animation_dict[six.b('m_dCenterZ')]))
  219. roi_dict["positions"].append(position)
  220. size_dict = animation_dict[six.b('m_sBoxShape')]
  221. # sizes are fractions of the half width/height of the image
  222. roi_dict["sizes"].append((size_dict[six.b('m_dSizeX')] * 0.25 * image_width,
  223. size_dict[six.b('m_dSizeY')] * 0.25 * image_height,
  224. size_dict[six.b('m_dSizeZ')]))
  225. return roi_dict
  226. def _parse_experiment_metadata(self):
  227. """Parse the metadata of the ND experiment
  228. """
  229. self._metadata_parsed['experiment'] = {
  230. 'description': 'unknown',
  231. 'loops': []
  232. }
  233. if self.image_metadata is None or six.b('SLxExperiment') not in self.image_metadata:
  234. return
  235. raw_data = self.image_metadata[six.b('SLxExperiment')]
  236. if six.b('wsApplicationDesc') in raw_data:
  237. self._metadata_parsed['experiment']['description'] = raw_data[six.b('wsApplicationDesc')].decode('utf8')
  238. if six.b('uLoopPars') in raw_data:
  239. self._metadata_parsed['experiment']['loops'] = self._parse_loop_data(raw_data[six.b('uLoopPars')])
  240. def _parse_loop_data(self, loop_data):
  241. """Parse the experimental loop data
  242. Args:
  243. loop_data: dictionary of experiment loops
  244. Returns:
  245. list: list of the parsed loops
  246. """
  247. loops = get_loops_from_data(loop_data)
  248. # take into account the absolute time in ms
  249. time_offset = 0
  250. parsed_loops = []
  251. for loop in loops:
  252. # duration of this loop
  253. duration = get_from_dict_if_exists('dDuration', loop) or 0
  254. interval = determine_sampling_interval(duration, loop)
  255. # if duration is not saved, infer it
  256. duration = self.get_duration_from_interval_and_loops(duration, interval, loop)
  257. # uiLoopType == 6 is a stimulation loop
  258. is_stimulation = get_from_dict_if_exists('uiLoopType', loop) == 6
  259. parsed_loop = {
  260. 'start': time_offset,
  261. 'duration': duration,
  262. 'stimulation': is_stimulation,
  263. 'sampling_interval': interval
  264. }
  265. parsed_loops.append(parsed_loop)
  266. # increase the time offset
  267. time_offset += duration
  268. return parsed_loops
  269. def get_duration_from_interval_and_loops(self, duration, interval, loop):
  270. """Infers the duration of the loop from the number of measurements and the interval
  271. Args:
  272. duration: loop duration in milliseconds
  273. duration: measurement interval in milliseconds
  274. loop: loop dictionary
  275. Returns:
  276. float: the loop duration in milliseconds
  277. """
  278. if duration == 0 and interval > 0:
  279. number_of_loops = get_from_dict_if_exists('uiCount', loop)
  280. number_of_loops = number_of_loops if number_of_loops is not None and number_of_loops > 0 else 1
  281. duration = interval * number_of_loops
  282. return duration
  283. @property
  284. def image_text_info(self):
  285. """Textual image information
  286. Returns:
  287. dict: containing the textual image info
  288. """
  289. return read_metadata(read_chunk(self._fh, self._label_map.image_text_info), 1)
  290. @property
  291. def image_metadata_sequence(self):
  292. """Image metadata of the sequence
  293. Returns:
  294. dict: containing the metadata
  295. """
  296. return read_metadata(read_chunk(self._fh, self._label_map.image_metadata_sequence), 1)
  297. @property
  298. def image_calibration(self):
  299. """The amount of pixels per micron.
  300. Returns:
  301. dict: pixels per micron
  302. """
  303. return read_metadata(read_chunk(self._fh, self._label_map.image_calibration), 1)
  304. @property
  305. def image_attributes(self):
  306. """Image attributes
  307. Returns:
  308. dict: containing the image attributes
  309. """
  310. return read_metadata(read_chunk(self._fh, self._label_map.image_attributes), 1)
  311. @property
  312. def x_data(self):
  313. """X data
  314. Returns:
  315. dict: x_data
  316. """
  317. return read_array(self._fh, 'double', self._label_map.x_data)
  318. @property
  319. def y_data(self):
  320. """Y data
  321. Returns:
  322. dict: y_data
  323. """
  324. return read_array(self._fh, 'double', self._label_map.y_data)
  325. @property
  326. def z_data(self):
  327. """Z data
  328. Returns:
  329. dict: z_data
  330. """
  331. return read_array(self._fh, 'double', self._label_map.z_data)
  332. @property
  333. def roi_metadata(self):
  334. """Contains information about the defined ROIs: shape, position and type (reference/background/stimulation).
  335. Returns:
  336. dict: ROI metadata dictionary
  337. """
  338. return read_metadata(read_chunk(self._fh, self._label_map.roi_metadata), 1)
  339. @property
  340. def pfs_status(self):
  341. """Perfect focus system (PFS) status
  342. Returns:
  343. dict: Perfect focus system (PFS) status
  344. """
  345. return read_array(self._fh, 'int', self._label_map.pfs_status)
  346. @property
  347. def pfs_offset(self):
  348. """Perfect focus system (PFS) offset
  349. Returns:
  350. dict: Perfect focus system (PFS) offset
  351. """
  352. return read_array(self._fh, 'int', self._label_map.pfs_offset)
  353. @property
  354. def camera_exposure_time(self):
  355. """Exposure time information
  356. Returns:
  357. dict: Camera exposure time
  358. """
  359. return read_array(self._fh, 'double', self._label_map.camera_exposure_time)
  360. @property
  361. def lut_data(self):
  362. """LUT information
  363. Returns:
  364. dict: LUT information
  365. """
  366. return xmltodict.parse(read_chunk(self._fh, self._label_map.lut_data))
  367. @property
  368. def grabber_settings(self):
  369. """Grabber settings
  370. Returns:
  371. dict: Acquisition settings
  372. """
  373. return xmltodict.parse(read_chunk(self._fh, self._label_map.grabber_settings))
  374. @property
  375. def custom_data(self):
  376. """Custom user data
  377. Returns:
  378. dict: custom user data
  379. """
  380. return xmltodict.parse(read_chunk(self._fh, self._label_map.custom_data))
  381. @property
  382. def app_info(self):
  383. """NIS elements application info
  384. Returns:
  385. dict: (Version) information of the NIS Elements application
  386. """
  387. return xmltodict.parse(read_chunk(self._fh, self._label_map.app_info))
  388. @property
  389. def camera_temp(self):
  390. """Camera temperature
  391. Yields:
  392. float: the temperature
  393. """
  394. camera_temp = read_array(self._fh, 'double', self._label_map.camera_temp)
  395. if camera_temp:
  396. for temp in map(lambda x: round(x * 100.0, 2), camera_temp):
  397. yield temp
  398. @property
  399. def acquisition_times(self):
  400. """Acquisition times
  401. Yields:
  402. float: the acquisition time
  403. """
  404. acquisition_times = read_array(self._fh, 'double', self._label_map.acquisition_times)
  405. if acquisition_times:
  406. for acquisition_time in map(lambda x: x / 1000.0, acquisition_times):
  407. yield acquisition_time
  408. @property
  409. def image_metadata(self):
  410. """Image metadata
  411. Returns:
  412. dict: Extra image metadata
  413. """
  414. if self._label_map.image_metadata:
  415. return read_metadata(read_chunk(self._fh, self._label_map.image_metadata), 1)