|
|
@ -14,6 +14,7 @@ field_of_view = namedtuple('FOV', ['number', 'x', 'y', 'z', 'pfs_offset']) |
|
|
|
class Nd2Parser(object): |
|
|
|
""" |
|
|
|
Reads .nd2 files, provides an interface to the metadata, and generates numpy arrays from the image data. |
|
|
|
You should not ever need to instantiate this class manually unless you're a developer. |
|
|
|
|
|
|
|
""" |
|
|
|
CHUNK_HEADER = 0xabeceda |
|
|
@ -37,20 +38,48 @@ class Nd2Parser(object): |
|
|
|
self._fh = open(self._filename, "rb") |
|
|
|
return self._fh |
|
|
|
|
|
|
|
def _get_raw_image_data(self, image_set_number, channel_offset): |
|
|
|
chunk = self._label_map["ImageDataSeq|%d!" % image_set_number] |
|
|
|
def _get_raw_image_data(self, image_group_number, channel_offset): |
|
|
|
""" |
|
|
|
Reads the raw bytes and the timestamp of an image. |
|
|
|
|
|
|
|
:param image_group_number: groups are made of images with the same time index, field of view and z-level. |
|
|
|
:type image_group_number: int |
|
|
|
:param channel_offset: the offset in the array where the bytes for this image are found. |
|
|
|
:type channel_offset: int |
|
|
|
|
|
|
|
:return: (int, array.array()) or None |
|
|
|
|
|
|
|
""" |
|
|
|
chunk = self._label_map["ImageDataSeq|%d!" % image_group_number] |
|
|
|
data = self._read_chunk(chunk) |
|
|
|
# All images in the same image group share the same timestamp! So if you have complicated image data, |
|
|
|
# your timestamps may not be entirely accurate. Practically speaking though, they'll only be off by a few |
|
|
|
# seconds unless you're doing something super weird. |
|
|
|
timestamp = struct.unpack("d", data[:8])[0] |
|
|
|
# The images for the various channels are interleaved within each other. |
|
|
|
image_set_data = array.array("H", data) |
|
|
|
image_group_data = array.array("H", data) |
|
|
|
image_data_start = 4 + channel_offset |
|
|
|
image_data = image_set_data[image_data_start::self._channel_count] |
|
|
|
# The images for the various channels are interleaved within the same array. For example, the second image |
|
|
|
# of a four image group will be composed of bytes 2, 6, 10, etc. If you understand why someone would design |
|
|
|
# a data structure that way, please send the author of this library a message. |
|
|
|
image_data = image_group_data[image_data_start::self._channel_count] |
|
|
|
# Skip images that are all zeros! This is important, since NIS Elements creates blank "gap" images if you |
|
|
|
# don't have the same number of images each cycle. We discovered this because we only took GFP images every |
|
|
|
# other cycle to reduce phototoxicity, but NIS Elements still allocated memory as if we were going to take them |
|
|
|
# every cyle. |
|
|
|
if np.any(image_data): |
|
|
|
return timestamp, image_data |
|
|
|
return None |
|
|
|
|
|
|
|
@property |
|
|
|
def _dimensions(self): |
|
|
|
""" |
|
|
|
While there are metadata values that represent a lot of what we want to capture, they seem to be unreliable. |
|
|
|
Sometimes certain elements don't exist, or change their data type randomly. However, the human-readable text |
|
|
|
is always there and in the same exact format, so we just parse that instead. |
|
|
|
|
|
|
|
:rtype: str |
|
|
|
|
|
|
|
""" |
|
|
|
if self._dimension_text is None: |
|
|
|
for line in self.metadata['ImageTextInfo']['SLxImageTextInfo'].values(): |
|
|
|
if "Dimensions:" in line: |
|
|
@ -68,6 +97,13 @@ class Nd2Parser(object): |
|
|
|
|
|
|
|
@property |
|
|
|
def _channels(self): |
|
|
|
""" |
|
|
|
These are labels created by the NIS Elements user. Typically they may a short description of the filter cube |
|
|
|
used (e.g. "bright field", "GFP", etc.) |
|
|
|
|
|
|
|
:rtype: str |
|
|
|
|
|
|
|
""" |
|
|
|
metadata = self.metadata['ImageMetadataSeq']['SLxPictureMetadata']['sPicturePlanes'] |
|
|
|
try: |
|
|
|
validity = self.metadata['ImageMetadata']['SLxExperiment']['ppNextLevelEx'][''][0]['ppNextLevelEx'][''][0]['pItemValid'] |
|
|
@ -82,15 +118,26 @@ class Nd2Parser(object): |
|
|
|
continue |
|
|
|
yield chan['sDescription'] |
|
|
|
|
|
|
|
def _calculate_image_set_number(self, time_index, fov, z_level): |
|
|
|
def _calculate_image_group_number(self, time_index, fov, z_level): |
|
|
|
""" |
|
|
|
Images are grouped together if they share the same time index, field of view, and z-level. |
|
|
|
|
|
|
|
:type time_index: int |
|
|
|
:type fov: int |
|
|
|
:type z_level: int |
|
|
|
|
|
|
|
:rtype: int |
|
|
|
|
|
|
|
""" |
|
|
|
return time_index * self._field_of_view_count * self._z_level_count + (fov * self._z_level_count + z_level) |
|
|
|
|
|
|
|
@property |
|
|
|
def _channel_offset(self): |
|
|
|
""" |
|
|
|
Image data is interleaved for each image set. That is, if there are four images in a set, the first image |
|
|
|
will consist of pixels 1, 5, 9, etc, the second will be pixels 2, 6, 10, and so forth. Why this would be the |
|
|
|
case is beyond me, but that's how it works. |
|
|
|
will consist of pixels 1, 5, 9, etc, the second will be pixels 2, 6, 10, and so forth. |
|
|
|
|
|
|
|
:rtype: int |
|
|
|
|
|
|
|
""" |
|
|
|
channel_offset = {} |
|
|
@ -100,6 +147,12 @@ class Nd2Parser(object): |
|
|
|
|
|
|
|
@property |
|
|
|
def _absolute_start(self): |
|
|
|
""" |
|
|
|
The date and time when acquisition began. |
|
|
|
|
|
|
|
:rtype: datetime.datetime() |
|
|
|
|
|
|
|
""" |
|
|
|
for line in self.metadata['ImageTextInfo']['SLxImageTextInfo'].values(): |
|
|
|
absolute_start_12 = None |
|
|
|
absolute_start_24 = None |
|
|
@ -119,6 +172,12 @@ class Nd2Parser(object): |
|
|
|
|
|
|
|
@property |
|
|
|
def _channel_count(self): |
|
|
|
""" |
|
|
|
The number of different channels used, including bright field. |
|
|
|
|
|
|
|
:rtype: int |
|
|
|
|
|
|
|
""" |
|
|
|
pattern = r""".*?λ\((\d+)\).*?""" |
|
|
|
try: |
|
|
|
count = int(re.match(pattern, self._dimensions).group(1)) |
|
|
@ -135,6 +194,8 @@ class Nd2Parser(object): |
|
|
|
in the image data, so we have to calculate it. There probably is something somewhere, since |
|
|
|
NIS Elements can figure it out, but we haven't found it yet. |
|
|
|
|
|
|
|
:rtype: int |
|
|
|
|
|
|
|
""" |
|
|
|
pattern = r""".*?XY\((\d+)\).*?""" |
|
|
|
try: |
|
|
@ -162,6 +223,12 @@ class Nd2Parser(object): |
|
|
|
|
|
|
|
@property |
|
|
|
def _z_level_count(self): |
|
|
|
""" |
|
|
|
The number of different levels in the Z-plane. |
|
|
|
|
|
|
|
:rtype: int |
|
|
|
|
|
|
|
""" |
|
|
|
pattern = r""".*?Z\((\d+)\).*?""" |
|
|
|
try: |
|
|
|
count = int(re.match(pattern, self._dimensions).group(1)) |
|
|
@ -172,13 +239,19 @@ class Nd2Parser(object): |
|
|
|
|
|
|
|
@property |
|
|
|
def _image_count(self): |
|
|
|
return self.metadata['ImageAttributes']['SLxImageAttributes']['uiSequenceCount'] |
|
|
|
""" |
|
|
|
The total number of images in the ND2. Warning: this may be inaccurate as it includes "gap" images. |
|
|
|
|
|
|
|
@property |
|
|
|
def _sequence_count(self): |
|
|
|
return self.metadata['ImageEvents']['RLxExperimentRecord']['uiCount'] |
|
|
|
:rtype: int |
|
|
|
|
|
|
|
""" |
|
|
|
return self.metadata['ImageAttributes']['SLxImageAttributes']['uiSequenceCount'] |
|
|
|
|
|
|
|
def _parse_metadata(self): |
|
|
|
""" |
|
|
|
Reads all metadata. |
|
|
|
|
|
|
|
""" |
|
|
|
for label in self._label_map.keys(): |
|
|
|
if label.endswith("LV!") or "LV|" in label: |
|
|
|
data = self._read_chunk(self._label_map[label]) |
|
|
@ -248,6 +321,10 @@ class Nd2Parser(object): |
|
|
|
return array.array("B", data.read(array_length)) |
|
|
|
|
|
|
|
def _parse_metadata_item(self, data): |
|
|
|
""" |
|
|
|
Reads hierarchical data, analogous to a Python dict. |
|
|
|
|
|
|
|
""" |
|
|
|
new_count, length = struct.unpack("<IQ", data.read(12)) |
|
|
|
length -= data.tell() - self._cursor_position |
|
|
|
next_data_length = data.read(length) |
|
|
@ -257,6 +334,10 @@ class Nd2Parser(object): |
|
|
|
return value |
|
|
|
|
|
|
|
def _get_value(self, data, data_type): |
|
|
|
""" |
|
|
|
ND2s use various codes to indicate different data types, which we translate here. |
|
|
|
|
|
|
|
""" |
|
|
|
parser = {1: self._parse_unsigned_char, |
|
|
|
2: self._parse_unsigned_int, |
|
|
|
3: self._parse_unsigned_int, |
|
|
@ -268,12 +349,17 @@ class Nd2Parser(object): |
|
|
|
return parser[data_type](data) |
|
|
|
|
|
|
|
def _read_metadata(self, data, count): |
|
|
|
""" |
|
|
|
Iterates over each element some section of the metadata and parses it. |
|
|
|
|
|
|
|
""" |
|
|
|
data = StringIO(data) |
|
|
|
metadata = {} |
|
|
|
for _ in xrange(count): |
|
|
|
self._cursor_position = data.tell() |
|
|
|
header = data.read(2) |
|
|
|
if not header: |
|
|
|
# We've reached the end of some hierarchy of data |
|
|
|
break |
|
|
|
data_type, name_length = map(ord, header) |
|
|
|
name = data.read(name_length * 2).decode("utf16")[:-1].encode("utf8") |
|
|
|