You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

311 lines
8.0 KiB

8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
8 years ago
  1. import struct
  2. import array
  3. from datetime import datetime
  4. import six
  5. import re
  6. from nd2reader.exceptions import InvalidVersionError
  7. def get_version(fh):
  8. """Determines what version the ND2 is.
  9. Args:
  10. fh: File handle of the .nd2 file
  11. Returns:
  12. tuple: Major and minor version
  13. """
  14. # the first 16 bytes seem to have no meaning, so we skip them
  15. fh.seek(16)
  16. # the next 38 bytes contain the string that we want to parse. Unlike most of the ND2, this is in UTF-8
  17. data = fh.read(38).decode("utf8")
  18. return parse_version(data)
  19. def parse_version(data):
  20. """Parses a string with the version data in it.
  21. Args:
  22. data (unicode): the 19th through 54th byte of the ND2, representing the version
  23. Returns:
  24. tuple: Major and minor version
  25. """
  26. match = re.search(r"""^ND2 FILE SIGNATURE CHUNK NAME01!Ver(?P<major>\d)\.(?P<minor>\d)$""", data)
  27. if match:
  28. # We haven't seen a lot of ND2s but the ones we have seen conform to this
  29. return int(match.group('major')), int(match.group('minor'))
  30. raise InvalidVersionError("The version of the ND2 you specified is not supported.")
  31. def read_chunk(fh, chunk_location):
  32. """Reads a piece of data given the location of its pointer.
  33. Args:
  34. fh: an open file handle to the ND2
  35. chunk_location (int): location to read
  36. Returns:
  37. bytes: the data at the chunk location
  38. """
  39. if chunk_location is None:
  40. return None
  41. fh.seek(chunk_location)
  42. # The chunk metadata is always 16 bytes long
  43. chunk_metadata = fh.read(16)
  44. header, relative_offset, data_length = struct.unpack("IIQ", chunk_metadata)
  45. if header != 0xabeceda:
  46. raise ValueError("The ND2 file seems to be corrupted.")
  47. # We start at the location of the chunk metadata, skip over the metadata, and then proceed to the
  48. # start of the actual data field, which is at some arbitrary place after the metadata.
  49. fh.seek(chunk_location + 16 + relative_offset)
  50. return fh.read(data_length)
  51. def read_array(fh, kind, chunk_location):
  52. """
  53. Args:
  54. fh: File handle of the nd2 file
  55. kind: data type, can be one of 'double', 'int' or 'float'
  56. chunk_location: the location of the array chunk in the binary nd2 file
  57. Returns:
  58. array.array: an array of the data
  59. """
  60. kinds = {'double': 'd',
  61. 'int': 'i',
  62. 'float': 'f'}
  63. if kind not in kinds:
  64. raise ValueError('You attempted to read an array of an unknown type.')
  65. raw_data = read_chunk(fh, chunk_location)
  66. if raw_data is None:
  67. return None
  68. return array.array(kinds[kind], raw_data)
  69. def _parse_unsigned_char(data):
  70. """
  71. Args:
  72. data: binary data
  73. Returns:
  74. char: the data converted to unsigned char
  75. """
  76. return struct.unpack("B", data.read(1))[0]
  77. def _parse_unsigned_int(data):
  78. """
  79. Args:
  80. data: binary data
  81. Returns:
  82. int: the data converted to unsigned int
  83. """
  84. return struct.unpack("I", data.read(4))[0]
  85. def _parse_unsigned_long(data):
  86. """
  87. Args:
  88. data: binary data
  89. Returns:
  90. long: the data converted to unsigned long
  91. """
  92. return struct.unpack("Q", data.read(8))[0]
  93. def _parse_double(data):
  94. """
  95. Args:
  96. data: binary data
  97. Returns:
  98. double: the data converted to double
  99. """
  100. return struct.unpack("d", data.read(8))[0]
  101. def _parse_string(data):
  102. """
  103. Args:
  104. data: binary data
  105. Returns:
  106. string: the data converted to string
  107. """
  108. value = data.read(2)
  109. while not value.endswith(six.b("\x00\x00")):
  110. # the string ends at the first instance of \x00\x00
  111. value += data.read(2)
  112. return value.decode("utf16")[:-1].encode("utf8")
  113. def _parse_char_array(data):
  114. """
  115. Args:
  116. data: binary data
  117. Returns:
  118. array.array: the data converted to an array
  119. """
  120. array_length = struct.unpack("Q", data.read(8))[0]
  121. return array.array("B", data.read(array_length))
  122. def parse_date(text_info):
  123. """
  124. The date and time when acquisition began.
  125. Args:
  126. text_info: the text that contains the date and time information
  127. Returns:
  128. datetime: the date and time of the acquisition
  129. """
  130. for line in text_info.values():
  131. line = line.decode("utf8")
  132. # ND2s seem to randomly switch between 12- and 24-hour representations.
  133. try:
  134. absolute_start = datetime.strptime(line, "%m/%d/%Y %H:%M:%S")
  135. except (TypeError, ValueError):
  136. try:
  137. absolute_start = datetime.strptime(line, "%m/%d/%Y %I:%M:%S %p")
  138. except (TypeError, ValueError):
  139. absolute_start = None
  140. return absolute_start
  141. def _parse_metadata_item(data, cursor_position):
  142. """Reads hierarchical data, analogous to a Python dict.
  143. Args:
  144. data: the binary data that needs to be parsed
  145. cursor_position: the position in the binary nd2 file
  146. Returns:
  147. dict: a dictionary containing the metadata item
  148. """
  149. new_count, length = struct.unpack("<IQ", data.read(12))
  150. length -= data.tell() - cursor_position
  151. next_data_length = data.read(length)
  152. value = read_metadata(next_data_length, new_count)
  153. # Skip some offsets
  154. data.read(new_count * 8)
  155. return value
  156. def _get_value(data, data_type, cursor_position):
  157. """ND2s use various codes to indicate different data types, which we translate here.
  158. Args:
  159. data: the binary data
  160. data_type: the data type (unsigned char = 1, unsigned int = 2 or 3, unsigned long = 5, double = 6, string = 8,
  161. char array = 9, metadata item = 11)
  162. cursor_position: the cursor position in the binary nd2 file
  163. Returns:
  164. mixed: the parsed value
  165. """
  166. parser = {1: _parse_unsigned_char,
  167. 2: _parse_unsigned_int,
  168. 3: _parse_unsigned_int,
  169. 5: _parse_unsigned_long,
  170. 6: _parse_double,
  171. 8: _parse_string,
  172. 9: _parse_char_array,
  173. 11: _parse_metadata_item}
  174. return parser[data_type](data) if data_type < 11 else parser[data_type](data, cursor_position)
  175. def read_metadata(data, count):
  176. """
  177. Iterates over each element of some section of the metadata and parses it.
  178. Args:
  179. data: the metadata in binary form
  180. count: the number of metadata elements
  181. Returns:
  182. dict: a dictionary containing the parsed metadata
  183. """
  184. if data is None:
  185. return None
  186. data = six.BytesIO(data)
  187. metadata = {}
  188. for _ in range(count):
  189. cursor_position = data.tell()
  190. header = data.read(2)
  191. if not header:
  192. # We've reached the end of some hierarchy of data
  193. break
  194. if six.PY3:
  195. header = header.decode("utf8")
  196. data_type, name_length = map(ord, header)
  197. name = data.read(name_length * 2).decode("utf16")[:-1].encode("utf8")
  198. value = _get_value(data, data_type, cursor_position)
  199. metadata = _add_to_metadata(metadata, name, value)
  200. return metadata
  201. def _add_to_metadata(metadata, name, value):
  202. """
  203. Add the name value pair to the metadata dict
  204. Args:
  205. metadata (dict): a dictionary containing the metadata
  206. name (string): the dictionary key
  207. value: the value to add
  208. Returns:
  209. dict: the new metadata dictionary
  210. """
  211. if name not in metadata.keys():
  212. metadata[name] = value
  213. else:
  214. if not isinstance(metadata[name], list):
  215. # We have encountered this key exactly once before. Since we're seeing it again, we know we
  216. # need to convert it to a list before proceeding.
  217. metadata[name] = [metadata[name]]
  218. # We've encountered this key before so we're guaranteed to be dealing with a list. Thus we append
  219. # the value to the already-existing list.
  220. metadata[name].append(value)
  221. return metadata