Browse Source

simplified even more

zolfa-add_slices_loading
Jim Rybarski 10 years ago
parent
commit
de8915fd6b
2 changed files with 98 additions and 128 deletions
  1. +22
    -42
      nd2reader/__init__.py
  2. +76
    -86
      nd2reader/reader.py

+ 22
- 42
nd2reader/__init__.py View File

@ -1,16 +1,14 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
from collections import namedtuple
from nd2reader.model import Channel from nd2reader.model import Channel
from datetime import datetime
import logging import logging
from nd2reader.model import Image, ImageSet from nd2reader.model import Image, ImageSet
from nd2reader.reader import Nd2FileReader from nd2reader.reader import Nd2FileReader
chunk = namedtuple('Chunk', ['location', 'length'])
field_of_view = namedtuple('FOV', ['number', 'x', 'y', 'z', 'pfs_offset'])
print(__name__)
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
log.addHandler(logging.StreamHandler())
log.setLevel(logging.WARN) log.setLevel(logging.WARN)
@ -18,11 +16,6 @@ class Nd2(Nd2FileReader):
def __init__(self, filename): def __init__(self, filename):
super(Nd2, self).__init__(filename) super(Nd2, self).__init__(filename)
def get_image(self, time_index, fov, channel_name, z_level):
image_set_number = self._calculate_image_set_number(time_index, fov, z_level)
timestamp, raw_image_data = self.get_raw_image_data(image_set_number, self.channel_offset[channel_name])
return Image(timestamp, raw_image_data, fov, channel_name, z_level, self.height, self.width)
def __iter__(self): def __iter__(self):
""" """
Just return every image in order (might not be exactly the order that the images were physically taken, but it will Just return every image in order (might not be exactly the order that the images were physically taken, but it will
@ -57,20 +50,8 @@ class Nd2(Nd2FileReader):
self._channel_offset = None self._channel_offset = None
@property @property
def height(self):
"""
:return: height of each image, in pixels
"""
return self._metadata['ImageAttributes']['SLxImageAttributes']['uiHeight']
@property
def width(self):
"""
:return: width of each image, in pixels
"""
return self._metadata['ImageAttributes']['SLxImageAttributes']['uiWidth']
def metadata(self):
return self._metadata
@property @property
def channels(self): def channels(self):
@ -103,22 +84,21 @@ class Nd2(Nd2FileReader):
yield channel.name yield channel.name
@property @property
def _image_count(self):
return self._metadata['ImageAttributes']['SLxImageAttributes']['uiSequenceCount']
@property
def _sequence_count(self):
return self._metadata['ImageEvents']['RLxExperimentRecord']['uiCount']
@property
def channel_offset(self):
if self._channel_offset is None:
self._channel_offset = {}
for n, channel in enumerate(self.channels):
self._channel_offset[channel.name] = n
return self._channel_offset
def _calculate_image_set_number(self, time_index, fov, z_level):
return time_index * self.field_of_view_count * self.z_level_count + (fov * self.z_level_count + z_level)
def absolute_start(self):
if self._absolute_start is None:
for line in self._metadata['ImageTextInfo']['SLxImageTextInfo'].values():
absolute_start_12 = None
absolute_start_24 = None
# ND2s seem to randomly switch between 12- and 24-hour representations.
try:
absolute_start_24 = datetime.strptime(line, "%m/%d/%Y %H:%M:%S")
except ValueError:
pass
try:
absolute_start_12 = datetime.strptime(line, "%m/%d/%Y %I:%M:%S %p")
except ValueError:
pass
if not absolute_start_12 and not absolute_start_24:
continue
self._absolute_start = absolute_start_12 if absolute_start_12 else absolute_start_24
return self._absolute_start

+ 76
- 86
nd2reader/reader.py View File

@ -1,11 +1,15 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
from abc import abstractproperty
import array import array
from collections import namedtuple
import numpy as np import numpy as np
import struct import struct
import re import re
from StringIO import StringIO from StringIO import StringIO
from datetime import datetime
from nd2reader.model import Image
field_of_view = namedtuple('FOV', ['number', 'x', 'y', 'z', 'pfs_offset'])
class Nd2FileReader(object): class Nd2FileReader(object):
@ -17,6 +21,7 @@ class Nd2FileReader(object):
self._absolute_start = None self._absolute_start = None
self._filename = filename self._filename = filename
self._file_handler = None self._file_handler = None
self._channel_offset = None
self._chunk_map_start_location = None self._chunk_map_start_location = None
self._label_map = {} self._label_map = {}
self._metadata = {} self._metadata = {}
@ -24,6 +29,31 @@ class Nd2FileReader(object):
self._parse_dict_data() self._parse_dict_data()
self.__dimensions = None self.__dimensions = None
def get_image(self, time_index, fov, channel_name, z_level):
image_set_number = self._calculate_image_set_number(time_index, fov, z_level)
timestamp, raw_image_data = self.get_raw_image_data(image_set_number, self.channel_offset[channel_name])
return Image(timestamp, raw_image_data, fov, channel_name, z_level, self.height, self.width)
@abstractproperty
def channels(self):
raise NotImplemented
@property
def height(self):
"""
:return: height of each image, in pixels
"""
return self._metadata['ImageAttributes']['SLxImageAttributes']['uiHeight']
@property
def width(self):
"""
:return: width of each image, in pixels
"""
return self._metadata['ImageAttributes']['SLxImageAttributes']['uiWidth']
@property @property
def _dimensions(self): def _dimensions(self):
if self.__dimensions is None: if self.__dimensions is None:
@ -40,30 +70,6 @@ class Nd2FileReader(object):
break break
return self.__dimensions return self.__dimensions
@property
def absolute_start(self):
if self._absolute_start is None:
for line in self._metadata['ImageTextInfo']['SLxImageTextInfo'].values():
absolute_start_12 = None
absolute_start_24 = None
# ND2s seem to randomly switch between 12- and 24-hour representations.
try:
absolute_start_24 = datetime.strptime(line, "%m/%d/%Y %H:%M:%S")
except ValueError:
pass
try:
absolute_start_12 = datetime.strptime(line, "%m/%d/%Y %I:%M:%S %p")
except ValueError:
pass
if not absolute_start_12 and not absolute_start_24:
continue
self._absolute_start = absolute_start_12 if absolute_start_12 else absolute_start_24
return self._absolute_start
@property @property
def fh(self): def fh(self):
@ -74,8 +80,11 @@ class Nd2FileReader(object):
@property @property
def time_index_count(self): def time_index_count(self):
""" """
The number of images for a given field of view, channel, and z_level combination.
Effectively the number of frames.
The number of image sets. If images were acquired using some kind of cycle, all images at each step in the
program will have the same timestamp (even though they may have varied by a few seconds in reality). For example,
if you have four fields of view that you're constantly monitoring, and you take a bright field and GFP image of
each, and you repeat that process 100 times, you'll have 800 individual images. But there will only be 400
time indexes.
:rtype: int :rtype: int
@ -125,35 +134,49 @@ class Nd2FileReader(object):
else: else:
return count return count
@property
def _image_count(self):
return self._metadata['ImageAttributes']['SLxImageAttributes']['uiSequenceCount']
@property
def _sequence_count(self):
return self._metadata['ImageEvents']['RLxExperimentRecord']['uiCount']
@property
def channel_offset(self):
"""
Image data is interleaved for each image set. That is, if there are four images in a set, the first image
will consist of pixels 1, 5, 9, etc, the second will be pixels 2, 6, 10, and so forth. Why this would be the
case is beyond me, but that's how it works.
"""
if self._channel_offset is None:
self._channel_offset = {}
for n, channel in enumerate(self.channels):
self._channel_offset[channel.name] = n
return self._channel_offset
def _calculate_image_set_number(self, time_index, fov, z_level):
return time_index * self.field_of_view_count * self.z_level_count + (fov * self.z_level_count + z_level)
def get_raw_image_data(self, image_set_number, channel_offset): def get_raw_image_data(self, image_set_number, channel_offset):
chunk = self._label_map["ImageDataSeq|%d!" % image_set_number] chunk = self._label_map["ImageDataSeq|%d!" % image_set_number]
data = self._read_chunk(chunk.location)
data = self._read_chunk(chunk)
timestamp = struct.unpack("d", data[:8])[0] timestamp = struct.unpack("d", data[:8])[0]
# The images for the various channels are interleaved within each other. Yes, this is an incredibly unintuitive and nonsensical way
# to store data.
# The images for the various channels are interleaved within each other.
image_data = array.array("H", data) image_data = array.array("H", data)
image_data_start = 4 + channel_offset image_data_start = 4 + channel_offset
return timestamp, image_data[image_data_start::self.channel_count] return timestamp, image_data[image_data_start::self.channel_count]
def _parse_dict_data(self): def _parse_dict_data(self):
# TODO: Don't like this name # TODO: Don't like this name
for label in self._top_level_dict_labels:
chunk_location = self._label_map[label].location
data = self._read_chunk(chunk_location)
for label in self._label_map.keys():
if not label.endswith("LV!") or "LV|" in label:
continue
data = self._read_chunk(self._label_map[label])
stop = label.index("LV") stop = label.index("LV")
self._metadata[label[:stop]] = self.read_lv_encoding(data, 1) self._metadata[label[:stop]] = self.read_lv_encoding(data, 1)
@property
def metadata(self):
return self._metadata
@property
def _top_level_dict_labels(self):
# TODO: I don't like this name either
for label in self._label_map.keys():
if label.endswith("LV!") or "LV|" in label:
yield label
def _read_map(self): def _read_map(self):
""" """
Every label ends with an exclamation point, however, we can't directly search for those to find all the labels Every label ends with an exclamation point, however, we can't directly search for those to find all the labels
@ -171,13 +194,10 @@ class Nd2FileReader(object):
data_start = raw_text.index("!", label_start) + 1 data_start = raw_text.index("!", label_start) + 1
key = raw_text[label_start: data_start] key = raw_text[label_start: data_start]
location, length = struct.unpack("QQ", raw_text[data_start: data_start + 16]) location, length = struct.unpack("QQ", raw_text[data_start: data_start + 16])
label, value = key, chunk(location=location, length=length)
if label == "ND2 CHUNK MAP SIGNATURE 0000001!":
if key == "ND2 CHUNK MAP SIGNATURE 0000001!":
# We've reached the end of the chunk map # We've reached the end of the chunk map
break break
self._label_map[label] = value
self._label_map[key] = location
label_start = data_start + 16 label_start = data_start + 16
def _read_chunk(self, chunk_location): def _read_chunk(self, chunk_location):
@ -186,53 +206,23 @@ class Nd2FileReader(object):
""" """
self.fh.seek(chunk_location) self.fh.seek(chunk_location)
chunk_data = self._read_chunk_metadata()
header, relative_offset, data_length = self._parse_chunk_metadata(chunk_data)
return self._read_chunk_data(chunk_location, relative_offset, data_length)
def _read_chunk_metadata(self):
"""
Gets the chunks metadata, which is always 16 bytes
"""
return self.fh.read(16)
def _read_chunk_data(self, chunk_location, relative_offset, data_length):
"""
Reads the actual data for a given chunk
"""
# The chunk metadata is always 16 bytes long
chunk_metadata = self.fh.read(16)
header, relative_offset, data_length = struct.unpack("IIQ", chunk_metadata)
if header != 0xabeceda:
raise ValueError("The ND2 file seems to be corrupted.")
# We start at the location of the chunk metadata, skip over the metadata, and then proceed to the # We start at the location of the chunk metadata, skip over the metadata, and then proceed to the
# start of the actual data field, which is at some arbitrary place after the metadata. # start of the actual data field, which is at some arbitrary place after the metadata.
self.fh.seek(chunk_location + 16 + relative_offset) self.fh.seek(chunk_location + 16 + relative_offset)
return self.fh.read(data_length) return self.fh.read(data_length)
@staticmethod
def _parse_chunk_metadata(chunk_data):
"""
Finds out everything about a given chunk. Every chunk begins with the same value, so if that's ever
different we can assume the file has suffered some kind of damage.
"""
header, relative_offset, data_length = struct.unpack("IIQ", chunk_data)
if header != 0xabeceda:
raise ValueError("The ND2 file seems to be corrupted.")
return header, relative_offset, data_length
def _get_raw_chunk_map_text(self):
"""
Reads the entire chunk map and returns it as a string.
"""
@staticmethod @staticmethod
def as_numpy_array(arr): def as_numpy_array(arr):
return np.frombuffer(arr) return np.frombuffer(arr)
def _z_level_count(self): def _z_level_count(self):
name = "CustomData|Z!" name = "CustomData|Z!"
st = self._read_chunk(self._label_map[name].location)
st = self._read_chunk(self._label_map[name])
res = array.array("d", st) res = array.array("d", st)
return len(res) return len(res)
@ -282,4 +272,4 @@ class Nd2FileReader(object):
res[name].append(value) res[name].append(value)
x = data.read() x = data.read()
assert not x, "skip %d %s" % (len(x), repr(x[:30])) assert not x, "skip %d %s" % (len(x), repr(x[:30]))
return res
return res

Loading…
Cancel
Save