You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

611 lines
19 KiB

7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
9 years ago
9 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
  1. import re
  2. from nd2reader.common import read_chunk, read_array, read_metadata, parse_date, get_from_dict_if_exists
  3. import xmltodict
  4. import six
  5. import numpy as np
  6. class RawMetadata(object):
  7. """RawMetadata class parses and stores the raw metadata that is read from the binary file in dict format.
  8. """
  9. def __init__(self, fh, label_map):
  10. self._fh = fh
  11. self._label_map = label_map
  12. self._metadata_parsed = None
  13. @property
  14. def __dict__(self):
  15. """Returns the parsed metadata in dictionary form.
  16. Returns:
  17. dict: the parsed metadata
  18. """
  19. return self.get_parsed_metadata()
  20. def get_parsed_metadata(self):
  21. """Returns the parsed metadata in dictionary form.
  22. Returns:
  23. dict: the parsed metadata
  24. """
  25. if self._metadata_parsed is not None:
  26. return self._metadata_parsed
  27. frames_per_channel = self._parse_total_images_per_channel()
  28. self._metadata_parsed = {
  29. "height": self._parse_if_not_none(self.image_attributes, self._parse_height),
  30. "width": self._parse_if_not_none(self.image_attributes, self._parse_width),
  31. "date": self._parse_if_not_none(self.image_text_info, self._parse_date),
  32. "fields_of_view": self._parse_fields_of_view(),
  33. "frames": self._parse_frames(),
  34. "z_levels": self._parse_z_levels(),
  35. "total_images_per_channel": frames_per_channel,
  36. "channels": self._parse_channels(),
  37. "pixel_microns": self._parse_if_not_none(self.image_calibration, self._parse_calibration),
  38. }
  39. self._set_default_if_not_empty('fields_of_view')
  40. self._set_default_if_not_empty('frames')
  41. self._metadata_parsed['num_frames'] = len(self._metadata_parsed['frames'])
  42. self._parse_roi_metadata()
  43. self._parse_experiment_metadata()
  44. return self._metadata_parsed
  45. def _set_default_if_not_empty(self, entry):
  46. total_images = self._metadata_parsed['total_images_per_channel'] \
  47. if self._metadata_parsed['total_images_per_channel'] is not None else 0
  48. if len(self._metadata_parsed[entry]) == 0 and total_images > 0:
  49. # if the file is not empty, we always have one of this entry
  50. self._metadata_parsed[entry] = [0]
  51. @staticmethod
  52. def _parse_if_not_none(to_check, callback):
  53. if to_check is not None:
  54. return callback()
  55. return None
  56. def _parse_width_or_height(self, key):
  57. try:
  58. length = self.image_attributes[six.b('SLxImageAttributes')][six.b(key)]
  59. except KeyError:
  60. length = None
  61. return length
  62. def _parse_height(self):
  63. return self._parse_width_or_height('uiHeight')
  64. def _parse_width(self):
  65. return self._parse_width_or_height('uiWidth')
  66. def _parse_date(self):
  67. try:
  68. return parse_date(self.image_text_info[six.b('SLxImageTextInfo')])
  69. except KeyError:
  70. return None
  71. def _parse_calibration(self):
  72. try:
  73. return self.image_calibration.get(six.b('SLxCalibration'), {}).get(six.b('dCalibration'))
  74. except KeyError:
  75. return None
  76. def _parse_frames(self):
  77. """The number of cycles.
  78. Returns:
  79. list: list of all the frame numbers
  80. """
  81. return self._parse_dimension(r""".*?T'?\((\d+)\).*?""")
  82. def _parse_channels(self):
  83. """These are labels created by the NIS Elements user. Typically they may a short description of the filter cube
  84. used (e.g. 'bright field', 'GFP', etc.)
  85. Returns:
  86. list: the color channels
  87. """
  88. if self.image_metadata_sequence is None:
  89. return []
  90. try:
  91. metadata = self.image_metadata_sequence[six.b('SLxPictureMetadata')][six.b('sPicturePlanes')]
  92. except KeyError:
  93. return []
  94. channels = self._process_channels_metadata(metadata)
  95. return channels
  96. def _process_channels_metadata(self, metadata):
  97. try:
  98. validity = self.image_metadata[six.b('SLxExperiment')][six.b('ppNextLevelEx')][six.b('')][0][
  99. six.b('ppNextLevelEx')][six.b('')][0][six.b('pItemValid')]
  100. except (KeyError, TypeError):
  101. # If none of the channels have been deleted, there is no validity list, so we just make one
  102. validity = [True for _ in metadata]
  103. # Channel information is contained in dictionaries with the keys a0, a1...an where the number
  104. # indicates the order in which the channel is stored. So by sorting the dicts alphabetically
  105. # we get the correct order.
  106. channels = []
  107. for (label, chan), valid in zip(sorted(metadata[six.b('sPlaneNew')].items()), validity):
  108. if not valid:
  109. continue
  110. channels.append(chan[six.b('sDescription')].decode("utf8"))
  111. return channels
  112. def _parse_fields_of_view(self):
  113. """The metadata contains information about fields of view, but it contains it even if some fields
  114. of view were cropped. We can't find anything that states which fields of view are actually
  115. in the image data, so we have to calculate it. There probably is something somewhere, since
  116. NIS Elements can figure it out, but we haven't found it yet.
  117. """
  118. return self._parse_dimension(r""".*?XY\((\d+)\).*?""")
  119. def _parse_z_levels(self):
  120. """The different levels in the Z-plane.
  121. Returns:
  122. list: the z levels, just a sequence from 0 to n.
  123. """
  124. return self._parse_dimension(r""".*?Z\((\d+)\).*?""")
  125. def _parse_dimension_text(self):
  126. """While there are metadata values that represent a lot of what we want to capture, they seem to be unreliable.
  127. Sometimes certain elements don't exist, or change their data type randomly. However, the human-readable text
  128. is always there and in the same exact format, so we just parse that instead.
  129. """
  130. dimension_text = six.b("")
  131. if self.image_text_info is None:
  132. return dimension_text
  133. try:
  134. textinfo = self.image_text_info[six.b('SLxImageTextInfo')].values()
  135. except KeyError:
  136. return dimension_text
  137. for line in textinfo:
  138. entry = self._parse_dimension_text_line(line)
  139. if entry is not None:
  140. return entry
  141. return dimension_text
  142. @staticmethod
  143. def _parse_dimension_text_line(line):
  144. if six.b("Dimensions:") in line:
  145. entries = line.split(six.b("\r\n"))
  146. for entry in entries:
  147. if entry.startswith(six.b("Dimensions:")):
  148. return entry
  149. return None
  150. def _parse_dimension(self, pattern):
  151. dimension_text = self._parse_dimension_text()
  152. if dimension_text is None:
  153. return []
  154. if six.PY3:
  155. dimension_text = dimension_text.decode("utf8")
  156. match = re.match(pattern, dimension_text)
  157. if not match:
  158. return []
  159. count = int(match.group(1))
  160. return list(range(count))
  161. def _parse_total_images_per_channel(self):
  162. """The total number of images per channel.
  163. Warning: this may be inaccurate as it includes 'gap' images.
  164. """
  165. if self.image_attributes is None:
  166. return 0
  167. try:
  168. total_images = self.image_attributes[six.b('SLxImageAttributes')][six.b('uiSequenceCount')]
  169. except KeyError:
  170. total_images = None
  171. return total_images
  172. def _parse_roi_metadata(self):
  173. """Parse the raw ROI metadata.
  174. """
  175. if self.roi_metadata is None or not six.b('RoiMetadata_v1') in self.roi_metadata:
  176. return
  177. raw_roi_data = self.roi_metadata[six.b('RoiMetadata_v1')]
  178. if not six.b('m_vectGlobal_Size') in raw_roi_data:
  179. return
  180. number_of_rois = raw_roi_data[six.b('m_vectGlobal_Size')]
  181. roi_objects = []
  182. for i in range(number_of_rois):
  183. current_roi = raw_roi_data[six.b('m_vectGlobal_%d' % i)]
  184. roi_objects.append(self._parse_roi(current_roi))
  185. self._metadata_parsed['rois'] = roi_objects
  186. def _parse_roi(self, raw_roi_dict):
  187. """Extract the vector animation parameters from the ROI.
  188. This includes the position and size at the given timepoints.
  189. Args:
  190. raw_roi_dict: dictionary of raw roi metadata
  191. Returns:
  192. dict: the parsed ROI metadata
  193. """
  194. number_of_timepoints = raw_roi_dict[six.b('m_vectAnimParams_Size')]
  195. roi_dict = {
  196. "timepoints": [],
  197. "positions": [],
  198. "sizes": [],
  199. "shape": self._parse_roi_shape(raw_roi_dict[six.b('m_sInfo')][six.b('m_uiShapeType')]),
  200. "type": self._parse_roi_type(raw_roi_dict[six.b('m_sInfo')][six.b('m_uiInterpType')])
  201. }
  202. for i in range(number_of_timepoints):
  203. roi_dict = self._parse_vect_anim(roi_dict, raw_roi_dict[six.b('m_vectAnimParams_%d' % i)])
  204. # convert to NumPy arrays
  205. roi_dict["timepoints"] = np.array(roi_dict["timepoints"], dtype=np.float)
  206. roi_dict["positions"] = np.array(roi_dict["positions"], dtype=np.float)
  207. roi_dict["sizes"] = np.array(roi_dict["sizes"], dtype=np.float)
  208. return roi_dict
  209. @staticmethod
  210. def _parse_roi_shape(shape):
  211. if shape == 3:
  212. return 'rectangle'
  213. elif shape == 9:
  214. return 'circle'
  215. return None
  216. @staticmethod
  217. def _parse_roi_type(type_no):
  218. if type_no == 4:
  219. return 'stimulation'
  220. elif type_no == 3:
  221. return 'reference'
  222. elif type_no == 2:
  223. return 'background'
  224. return None
  225. def _parse_vect_anim(self, roi_dict, animation_dict):
  226. """
  227. Parses a ROI vector animation object and adds it to the global list of timepoints and positions.
  228. Args:
  229. roi_dict: the raw roi dictionary
  230. animation_dict: the raw animation dictionary
  231. Returns:
  232. dict: the parsed metadata
  233. """
  234. roi_dict["timepoints"].append(animation_dict[six.b('m_dTimeMs')])
  235. image_width = self._metadata_parsed["width"] * self._metadata_parsed["pixel_microns"]
  236. image_height = self._metadata_parsed["height"] * self._metadata_parsed["pixel_microns"]
  237. # positions are taken from the center of the image as a fraction of the half width/height of the image
  238. position = np.array((0.5 * image_width * (1 + animation_dict[six.b('m_dCenterX')]),
  239. 0.5 * image_height * (1 + animation_dict[six.b('m_dCenterY')]),
  240. animation_dict[six.b('m_dCenterZ')]))
  241. roi_dict["positions"].append(position)
  242. size_dict = animation_dict[six.b('m_sBoxShape')]
  243. # sizes are fractions of the half width/height of the image
  244. roi_dict["sizes"].append((size_dict[six.b('m_dSizeX')] * 0.25 * image_width,
  245. size_dict[six.b('m_dSizeY')] * 0.25 * image_height,
  246. size_dict[six.b('m_dSizeZ')]))
  247. return roi_dict
  248. def _parse_experiment_metadata(self):
  249. """Parse the metadata of the ND experiment
  250. """
  251. if self.image_metadata is None or six.b('SLxExperiment') not in self.image_metadata:
  252. return
  253. raw_data = self.image_metadata[six.b('SLxExperiment')]
  254. experimental_data = {
  255. 'description': 'unknown',
  256. 'loops': []
  257. }
  258. if six.b('wsApplicationDesc') in raw_data:
  259. experimental_data['description'] = raw_data[six.b('wsApplicationDesc')].decode('utf8')
  260. if six.b('uLoopPars') in raw_data:
  261. experimental_data['loops'] = self._parse_loop_data(raw_data[six.b('uLoopPars')])
  262. self._metadata_parsed['experiment'] = experimental_data
  263. @staticmethod
  264. def _get_loops_from_data(loop_data):
  265. loops = [loop_data]
  266. if six.b('uiPeriodCount') in loop_data and loop_data[six.b('uiPeriodCount')] > 0:
  267. # special ND experiment
  268. if six.b('pPeriod') not in loop_data:
  269. return []
  270. # take the first dictionary element, it contains all loop data
  271. loops = loop_data[six.b('pPeriod')][list(loop_data[six.b('pPeriod')].keys())[0]]
  272. return loops
  273. def _parse_loop_data(self, loop_data):
  274. """Parse the experimental loop data
  275. Args:
  276. loop_data: dictionary of experiment loops
  277. Returns:
  278. list: list of the parsed loops
  279. """
  280. loops = self._get_loops_from_data(loop_data)
  281. # take into account the absolute time in ms
  282. time_offset = 0
  283. parsed_loops = []
  284. for loop in loops:
  285. # duration of this loop
  286. duration = get_from_dict_if_exists('dDuration', loop) or 0
  287. # uiLoopType == 6 is a stimulation loop
  288. is_stimulation = get_from_dict_if_exists('uiLoopType', loop) == 6
  289. interval = self._determine_sampling_interval(duration, loop)
  290. parsed_loop = {
  291. 'start': time_offset,
  292. 'duration': duration,
  293. 'stimulation': is_stimulation,
  294. 'sampling_interval': interval
  295. }
  296. parsed_loops.append(parsed_loop)
  297. # increase the time offset
  298. time_offset += duration
  299. return parsed_loops
  300. @staticmethod
  301. def _determine_sampling_interval(duration, loop):
  302. """Determines the loop sampling interval in milliseconds
  303. Args:
  304. duration: loop duration in milliseconds
  305. loop: loop dictionary
  306. Returns:
  307. float: the sampling interval in milliseconds
  308. """
  309. interval = get_from_dict_if_exists('dPeriod', loop)
  310. if interval is None or interval <= 0:
  311. # Use a fallback if it is still not found
  312. interval = get_from_dict_if_exists('dAvgPeriodDiff', loop)
  313. if interval is None or interval <= 0:
  314. # In some cases, both keys are not saved. Then try to calculate it.
  315. interval = RawMetadata._guess_sampling_from_loops(duration, loop)
  316. return interval
  317. @staticmethod
  318. def _guess_sampling_from_loops(duration, loop):
  319. """ In some cases, both keys are not saved. Then try to calculate it.
  320. Args:
  321. duration: the total duration of the loop
  322. loop: the raw loop data
  323. Returns:
  324. float: the guessed sampling interval in milliseconds
  325. """
  326. number_of_loops = get_from_dict_if_exists('uiCount', loop)
  327. number_of_loops = number_of_loops if number_of_loops is not None and number_of_loops > 0 else 1
  328. interval = duration / number_of_loops
  329. return interval
  330. @property
  331. def image_text_info(self):
  332. """Textual image information
  333. Returns:
  334. dict: containing the textual image info
  335. """
  336. return read_metadata(read_chunk(self._fh, self._label_map.image_text_info), 1)
  337. @property
  338. def image_metadata_sequence(self):
  339. """Image metadata of the sequence
  340. Returns:
  341. dict: containing the metadata
  342. """
  343. return read_metadata(read_chunk(self._fh, self._label_map.image_metadata_sequence), 1)
  344. @property
  345. def image_calibration(self):
  346. """The amount of pixels per micron.
  347. Returns:
  348. dict: pixels per micron
  349. """
  350. return read_metadata(read_chunk(self._fh, self._label_map.image_calibration), 1)
  351. @property
  352. def image_attributes(self):
  353. """Image attributes
  354. Returns:
  355. dict: containing the image attributes
  356. """
  357. return read_metadata(read_chunk(self._fh, self._label_map.image_attributes), 1)
  358. @property
  359. def x_data(self):
  360. """X data
  361. Returns:
  362. dict: x_data
  363. """
  364. return read_array(self._fh, 'double', self._label_map.x_data)
  365. @property
  366. def y_data(self):
  367. """Y data
  368. Returns:
  369. dict: y_data
  370. """
  371. return read_array(self._fh, 'double', self._label_map.y_data)
  372. @property
  373. def z_data(self):
  374. """Z data
  375. Returns:
  376. dict: z_data
  377. """
  378. return read_array(self._fh, 'double', self._label_map.z_data)
  379. @property
  380. def roi_metadata(self):
  381. """Contains information about the defined ROIs: shape, position and type (reference/background/stimulation).
  382. Returns:
  383. dict: ROI metadata dictionary
  384. """
  385. return read_metadata(read_chunk(self._fh, self._label_map.roi_metadata), 1)
  386. @property
  387. def pfs_status(self):
  388. """Perfect focus system (PFS) status
  389. Returns:
  390. dict: Perfect focus system (PFS) status
  391. """
  392. return read_array(self._fh, 'int', self._label_map.pfs_status)
  393. @property
  394. def pfs_offset(self):
  395. """Perfect focus system (PFS) offset
  396. Returns:
  397. dict: Perfect focus system (PFS) offset
  398. """
  399. return read_array(self._fh, 'int', self._label_map.pfs_offset)
  400. @property
  401. def camera_exposure_time(self):
  402. """Exposure time information
  403. Returns:
  404. dict: Camera exposure time
  405. """
  406. return read_array(self._fh, 'double', self._label_map.camera_exposure_time)
  407. @property
  408. def lut_data(self):
  409. """LUT information
  410. Returns:
  411. dict: LUT information
  412. """
  413. return xmltodict.parse(read_chunk(self._fh, self._label_map.lut_data))
  414. @property
  415. def grabber_settings(self):
  416. """Grabber settings
  417. Returns:
  418. dict: Acquisition settings
  419. """
  420. return xmltodict.parse(read_chunk(self._fh, self._label_map.grabber_settings))
  421. @property
  422. def custom_data(self):
  423. """Custom user data
  424. Returns:
  425. dict: custom user data
  426. """
  427. return xmltodict.parse(read_chunk(self._fh, self._label_map.custom_data))
  428. @property
  429. def app_info(self):
  430. """NIS elements application info
  431. Returns:
  432. dict: (Version) information of the NIS Elements application
  433. """
  434. return xmltodict.parse(read_chunk(self._fh, self._label_map.app_info))
  435. @property
  436. def camera_temp(self):
  437. """Camera temperature
  438. Yields:
  439. float: the temperature
  440. """
  441. camera_temp = read_array(self._fh, 'double', self._label_map.camera_temp)
  442. if camera_temp:
  443. for temp in map(lambda x: round(x * 100.0, 2), camera_temp):
  444. yield temp
  445. @property
  446. def acquisition_times(self):
  447. """Acquisition times
  448. Yields:
  449. float: the acquisition time
  450. """
  451. acquisition_times = read_array(self._fh, 'double', self._label_map.acquisition_times)
  452. if acquisition_times:
  453. for acquisition_time in map(lambda x: x / 1000.0, acquisition_times):
  454. yield acquisition_time
  455. @property
  456. def image_metadata(self):
  457. """Image metadata
  458. Returns:
  459. dict: Extra image metadata
  460. """
  461. if self._label_map.image_metadata:
  462. return read_metadata(read_chunk(self._fh, self._label_map.image_metadata), 1)