You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

492 lines
15 KiB

8 years ago
9 years ago
9 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
  1. import re
  2. from nd2reader.common import read_chunk, read_array, read_metadata, parse_date
  3. import xmltodict
  4. import six
  5. import numpy as np
  6. class RawMetadata(object):
  7. """RawMetadata class parses and stores the raw metadata that is read from the binary file in dict format.
  8. """
  9. def __init__(self, fh, label_map):
  10. self._fh = fh
  11. self._label_map = label_map
  12. self._metadata_parsed = None
  13. @property
  14. def __dict__(self):
  15. """Returns the parsed metadata in dictionary form.
  16. Returns:
  17. dict: the parsed metadata
  18. """
  19. return self.get_parsed_metadata()
  20. def get_parsed_metadata(self):
  21. """Returns the parsed metadata in dictionary form.
  22. Returns:
  23. dict: the parsed metadata
  24. """
  25. if self._metadata_parsed is not None:
  26. return self._metadata_parsed
  27. self._metadata_parsed = {
  28. "height": self.image_attributes[six.b('SLxImageAttributes')][six.b('uiHeight')],
  29. "width": self.image_attributes[six.b('SLxImageAttributes')][six.b('uiWidth')],
  30. "date": parse_date(self.image_text_info[six.b('SLxImageTextInfo')]),
  31. "fields_of_view": self._parse_fields_of_view(),
  32. "frames": self._parse_frames(),
  33. "z_levels": self._parse_z_levels(),
  34. "total_images_per_channel": self._parse_total_images_per_channel(),
  35. "channels": self._parse_channels(),
  36. "pixel_microns": self.image_calibration.get(six.b('SLxCalibration'), {}).get(six.b('dCalibration')),
  37. }
  38. self._metadata_parsed['num_frames'] = len(self._metadata_parsed['frames'])
  39. self._parse_roi_metadata()
  40. self._parse_experiment_metadata()
  41. return self._metadata_parsed
  42. def _parse_channels(self):
  43. """These are labels created by the NIS Elements user. Typically they may a short description of the filter cube
  44. used (e.g. 'bright field', 'GFP', etc.)
  45. Returns:
  46. list: the color channels
  47. """
  48. channels = []
  49. metadata = self.image_metadata_sequence[six.b('SLxPictureMetadata')][six.b('sPicturePlanes')]
  50. try:
  51. validity = self.image_metadata[six.b('SLxExperiment')][six.b('ppNextLevelEx')][six.b('')][0][
  52. six.b('ppNextLevelEx')][six.b('')][0][six.b('pItemValid')]
  53. except (KeyError, TypeError):
  54. # If none of the channels have been deleted, there is no validity list, so we just make one
  55. validity = [True for _ in metadata]
  56. # Channel information is contained in dictionaries with the keys a0, a1...an where the number
  57. # indicates the order in which the channel is stored. So by sorting the dicts alphabetically
  58. # we get the correct order.
  59. for (label, chan), valid in zip(sorted(metadata[six.b('sPlaneNew')].items()), validity):
  60. if not valid:
  61. continue
  62. channels.append(chan[six.b('sDescription')].decode("utf8"))
  63. return channels
  64. def _parse_fields_of_view(self):
  65. """The metadata contains information about fields of view, but it contains it even if some fields
  66. of view were cropped. We can't find anything that states which fields of view are actually
  67. in the image data, so we have to calculate it. There probably is something somewhere, since
  68. NIS Elements can figure it out, but we haven't found it yet.
  69. """
  70. return self._parse_dimension(r""".*?XY\((\d+)\).*?""")
  71. def _parse_frames(self):
  72. """The number of cycles.
  73. Returns:
  74. list: list of all the frame numbers
  75. """
  76. return self._parse_dimension(r""".*?T'?\((\d+)\).*?""")
  77. def _parse_z_levels(self):
  78. """The different levels in the Z-plane.
  79. Returns:
  80. list: the z levels, just a sequence from 0 to n.
  81. """
  82. return self._parse_dimension(r""".*?Z\((\d+)\).*?""")
  83. def _parse_dimension_text(self):
  84. """While there are metadata values that represent a lot of what we want to capture, they seem to be unreliable.
  85. Sometimes certain elements don't exist, or change their data type randomly. However, the human-readable text
  86. is always there and in the same exact format, so we just parse that instead.
  87. """
  88. dimension_text = six.b("")
  89. textinfo = self.image_text_info[six.b('SLxImageTextInfo')].values()
  90. for line in textinfo:
  91. if six.b("Dimensions:") in line:
  92. entries = line.split(six.b("\r\n"))
  93. for entry in entries:
  94. if entry.startswith(six.b("Dimensions:")):
  95. return entry
  96. return dimension_text
  97. def _parse_dimension(self, pattern):
  98. dimension_text = self._parse_dimension_text()
  99. if six.PY3:
  100. dimension_text = dimension_text.decode("utf8")
  101. match = re.match(pattern, dimension_text)
  102. if not match:
  103. return [0]
  104. count = int(match.group(1))
  105. return list(range(count))
  106. def _parse_total_images_per_channel(self):
  107. """The total number of images per channel.
  108. Warning: this may be inaccurate as it includes 'gap' images.
  109. """
  110. return self.image_attributes[six.b('SLxImageAttributes')][six.b('uiSequenceCount')]
  111. def _parse_roi_metadata(self):
  112. """Parse the raw ROI metadata.
  113. """
  114. if self.roi_metadata is None or not six.b('RoiMetadata_v1') in self.roi_metadata:
  115. return
  116. raw_roi_data = self.roi_metadata[six.b('RoiMetadata_v1')]
  117. number_of_rois = raw_roi_data[six.b('m_vectGlobal_Size')]
  118. roi_objects = []
  119. for i in range(number_of_rois):
  120. current_roi = raw_roi_data[six.b('m_vectGlobal_%d' % i)]
  121. roi_objects.append(self._parse_roi(current_roi))
  122. self._metadata_parsed['rois'] = roi_objects
  123. def _parse_roi(self, raw_roi_dict):
  124. """Extract the vector animation parameters from the ROI.
  125. This includes the position and size at the given timepoints.
  126. Args:
  127. raw_roi_dict: dictionary of raw roi metadata
  128. Returns:
  129. dict: the parsed ROI metadata
  130. """
  131. number_of_timepoints = raw_roi_dict[six.b('m_vectAnimParams_Size')]
  132. roi_dict = {
  133. "timepoints": [],
  134. "positions": [],
  135. "sizes": [],
  136. "shape": self._parse_roi_shape(raw_roi_dict[six.b('m_sInfo')][six.b('m_uiShapeType')]),
  137. "type": self._parse_roi_type(raw_roi_dict[six.b('m_sInfo')][six.b('m_uiInterpType')])
  138. }
  139. for i in range(number_of_timepoints):
  140. roi_dict = self._parse_vect_anim(roi_dict, raw_roi_dict[six.b('m_vectAnimParams_%d' % i)])
  141. # convert to NumPy arrays
  142. roi_dict["timepoints"] = np.array(roi_dict["timepoints"], dtype=np.float)
  143. roi_dict["positions"] = np.array(roi_dict["positions"], dtype=np.float)
  144. roi_dict["sizes"] = np.array(roi_dict["sizes"], dtype=np.float)
  145. return roi_dict
  146. @staticmethod
  147. def _parse_roi_shape(shape):
  148. if shape == 3:
  149. return 'rectangle'
  150. elif shape == 9:
  151. return 'circle'
  152. return None
  153. @staticmethod
  154. def _parse_roi_type(type_no):
  155. if type_no == 4:
  156. return 'stimulation'
  157. elif type_no == 3:
  158. return 'reference'
  159. elif type_no == 2:
  160. return 'background'
  161. return None
  162. def _parse_vect_anim(self, roi_dict, animation_dict):
  163. """
  164. Parses a ROI vector animation object and adds it to the global list of timepoints and positions.
  165. Args:
  166. roi_dict: the raw roi dictionary
  167. animation_dict: the raw animation dictionary
  168. Returns:
  169. dict: the parsed metadata
  170. """
  171. roi_dict["timepoints"].append(animation_dict[six.b('m_dTimeMs')])
  172. image_width = self._metadata_parsed["width"] * self._metadata_parsed["pixel_microns"]
  173. image_height = self._metadata_parsed["height"] * self._metadata_parsed["pixel_microns"]
  174. # positions are taken from the center of the image as a fraction of the half width/height of the image
  175. position = np.array((0.5 * image_width * (1 + animation_dict[six.b('m_dCenterX')]),
  176. 0.5 * image_height * (1 + animation_dict[six.b('m_dCenterY')]),
  177. animation_dict[six.b('m_dCenterZ')]))
  178. roi_dict["positions"].append(position)
  179. size_dict = animation_dict[six.b('m_sBoxShape')]
  180. # sizes are fractions of the half width/height of the image
  181. roi_dict["sizes"].append((size_dict[six.b('m_dSizeX')] * 0.25 * image_width,
  182. size_dict[six.b('m_dSizeY')] * 0.25 * image_height,
  183. size_dict[six.b('m_dSizeZ')]))
  184. return roi_dict
  185. def _parse_experiment_metadata(self):
  186. """Parse the metadata of the ND experiment
  187. """
  188. if not six.b('SLxExperiment') in self.image_metadata:
  189. return
  190. raw_data = self.image_metadata[six.b('SLxExperiment')]
  191. experimental_data = {
  192. 'description': 'unknown',
  193. 'loops': []
  194. }
  195. if six.b('wsApplicationDesc') in raw_data:
  196. experimental_data['description'] = raw_data[six.b('wsApplicationDesc')].decode('utf8')
  197. if six.b('uLoopPars') in raw_data:
  198. experimental_data['loops'] = self._parse_loop_data(raw_data[six.b('uLoopPars')])
  199. self._metadata_parsed['experiment'] = experimental_data
  200. @staticmethod
  201. def _parse_loop_data(loop_data):
  202. """Parse the experimental loop data
  203. Args:
  204. loop_data: dictionary of experiment loops
  205. Returns:
  206. list: list of the parsed loops
  207. """
  208. loops = [loop_data]
  209. if six.b('uiPeriodCount') in loop_data and loop_data[six.b('uiPeriodCount')] > 0:
  210. # special ND experiment
  211. if six.b('pPeriod') not in loop_data:
  212. return []
  213. # take the first dictionary element, it contains all loop data
  214. loops = loop_data[six.b('pPeriod')][list(loop_data[six.b('pPeriod')].keys())[0]]
  215. # take into account the absolute time in ms
  216. time_offset = 0
  217. parsed_loops = []
  218. for loop in loops:
  219. # duration of this loop
  220. duration = loop[six.b('dDuration')]
  221. # uiLoopType == 6 is a stimulation loop
  222. is_stimulation = False
  223. if six.b('uiLoopType') in loop:
  224. is_stimulation = loop[six.b('uiLoopType')] == 6
  225. # sampling interval in ms
  226. interval = loop[six.b('dAvgPeriodDiff')]
  227. parsed_loop = {
  228. 'start': time_offset,
  229. 'duration': duration,
  230. 'stimulation': is_stimulation,
  231. 'sampling_interval': interval
  232. }
  233. parsed_loops.append(parsed_loop)
  234. # increase the time offset
  235. time_offset += duration
  236. return parsed_loops
  237. @property
  238. def image_text_info(self):
  239. """Textual image information
  240. Returns:
  241. dict: containing the textual image info
  242. """
  243. return read_metadata(read_chunk(self._fh, self._label_map.image_text_info), 1)
  244. @property
  245. def image_metadata_sequence(self):
  246. """Image metadata of the sequence
  247. Returns:
  248. dict: containing the metadata
  249. """
  250. return read_metadata(read_chunk(self._fh, self._label_map.image_metadata_sequence), 1)
  251. @property
  252. def image_calibration(self):
  253. """The amount of pixels per micron.
  254. Returns:
  255. dict: pixels per micron
  256. """
  257. return read_metadata(read_chunk(self._fh, self._label_map.image_calibration), 1)
  258. @property
  259. def image_attributes(self):
  260. """Image attributes
  261. Returns:
  262. dict: containing the image attributes
  263. """
  264. return read_metadata(read_chunk(self._fh, self._label_map.image_attributes), 1)
  265. @property
  266. def x_data(self):
  267. """X data
  268. Returns:
  269. dict: x_data
  270. """
  271. return read_array(self._fh, 'double', self._label_map.x_data)
  272. @property
  273. def y_data(self):
  274. """Y data
  275. Returns:
  276. dict: y_data
  277. """
  278. return read_array(self._fh, 'double', self._label_map.y_data)
  279. @property
  280. def z_data(self):
  281. """Z data
  282. Returns:
  283. dict: z_data
  284. """
  285. return read_array(self._fh, 'double', self._label_map.z_data)
  286. @property
  287. def roi_metadata(self):
  288. """Contains information about the defined ROIs: shape, position and type (reference/background/stimulation).
  289. Returns:
  290. dict: ROI metadata dictionary
  291. """
  292. return read_metadata(read_chunk(self._fh, self._label_map.roi_metadata), 1)
  293. @property
  294. def pfs_status(self):
  295. """Perfect focus system (PFS) status
  296. Returns:
  297. dict: Perfect focus system (PFS) status
  298. """
  299. return read_array(self._fh, 'int', self._label_map.pfs_status)
  300. @property
  301. def pfs_offset(self):
  302. """Perfect focus system (PFS) offset
  303. Returns:
  304. dict: Perfect focus system (PFS) offset
  305. """
  306. return read_array(self._fh, 'int', self._label_map.pfs_offset)
  307. @property
  308. def camera_exposure_time(self):
  309. """Exposure time information
  310. Returns:
  311. dict: Camera exposure time
  312. """
  313. return read_array(self._fh, 'double', self._label_map.camera_exposure_time)
  314. @property
  315. def lut_data(self):
  316. """LUT information
  317. Returns:
  318. dict: LUT information
  319. """
  320. return xmltodict.parse(read_chunk(self._fh, self._label_map.lut_data))
  321. @property
  322. def grabber_settings(self):
  323. """Grabber settings
  324. Returns:
  325. dict: Acquisition settings
  326. """
  327. return xmltodict.parse(read_chunk(self._fh, self._label_map.grabber_settings))
  328. @property
  329. def custom_data(self):
  330. """Custom user data
  331. Returns:
  332. dict: custom user data
  333. """
  334. return xmltodict.parse(read_chunk(self._fh, self._label_map.custom_data))
  335. @property
  336. def app_info(self):
  337. """NIS elements application info
  338. Returns:
  339. dict: (Version) information of the NIS Elements application
  340. """
  341. return xmltodict.parse(read_chunk(self._fh, self._label_map.app_info))
  342. @property
  343. def camera_temp(self):
  344. """Camera temperature
  345. Yields:
  346. float: the temperature
  347. """
  348. camera_temp = read_array(self._fh, 'double', self._label_map.camera_temp)
  349. if camera_temp:
  350. for temp in map(lambda x: round(x * 100.0, 2), camera_temp):
  351. yield temp
  352. @property
  353. def acquisition_times(self):
  354. """Acquisition times
  355. Yields:
  356. float: the acquisition time
  357. """
  358. acquisition_times = read_array(self._fh, 'double', self._label_map.acquisition_times)
  359. if acquisition_times:
  360. for acquisition_time in map(lambda x: x / 1000.0, acquisition_times):
  361. yield acquisition_time
  362. @property
  363. def image_metadata(self):
  364. """Image metadata
  365. Returns:
  366. dict: Extra image metadata
  367. """
  368. if self._label_map.image_metadata:
  369. return read_metadata(read_chunk(self._fh, self._label_map.image_metadata), 1)