Browse Source

Merge pull request #20 from jimrybarski/11-refactor

11 refactor
zolfa-add_slices_loading
Jim Rybarski 10 years ago
parent
commit
2c0981f3cb
7 changed files with 370 additions and 662 deletions
  1. +24
    -0
      Dockerfile
  2. +163
    -26
      nd2reader/__init__.py
  3. +1
    -192
      nd2reader/model/__init__.py
  4. +179
    -0
      nd2reader/parser.py
  5. +0
    -439
      nd2reader/service/__init__.py
  6. +1
    -0
      requirements.txt
  7. +2
    -5
      setup.py

+ 24
- 0
Dockerfile View File

@ -0,0 +1,24 @@
FROM ubuntu
MAINTAINER Jim Rybarski <jim@rybarski.com>
RUN apt-get update && apt-get install -y \
gcc \
gfortran \
libblas-dev \
liblapack-dev \
libatlas-dev \
tk \
tk-dev \
libpng12-dev \
python \
python-dev \
python-pip \
libfreetype6-dev \
python-skimage
RUN pip install numpy
RUN pip install --upgrade scikit-image
COPY . /opt/nd2reader
WORKDIR /opt/nd2reader
RUN python setup.py install

+ 163
- 26
nd2reader/__init__.py View File

@ -1,26 +1,30 @@
# -*- coding: utf-8 -*-
import array
from datetime import datetime
import logging
from nd2reader.service import BaseNd2
from nd2reader.model import Image, ImageSet
from nd2reader.parser import Nd2Parser
import re
import struct
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
log.addHandler(logging.StreamHandler())
log.setLevel(logging.WARN)
class Nd2(BaseNd2):
def __init__(self, filename):
class Nd2(Nd2Parser):
def __init__(self, filename, image_sets=False):
super(Nd2, self).__init__(filename)
def get_image(self, time_index, fov, channel_name, z_level):
image_set_number = self._calculate_image_set_number(time_index, fov, z_level)
timestamp, raw_image_data = self._reader.get_raw_image_data(image_set_number, self.channel_offset[channel_name])
return Image(timestamp, raw_image_data, fov, channel_name, z_level, self.height, self.width)
self._use_image_sets = image_sets
def __iter__(self):
"""
Just return every image in order (might not be exactly the order that the images were physically taken, but it will
be within a few seconds). A better explanation is probably needed here.
if self._use_image_sets:
return self.image_sets()
else:
return self.images()
"""
def images(self):
for i in range(self._image_count):
for fov in range(self.field_of_view_count):
for z_level in range(self.z_level_count):
@ -29,19 +33,152 @@ class Nd2(BaseNd2):
if image.is_valid:
yield image
def image_sets(self, field_of_view, time_indices=None, channels=None, z_levels=None):
def image_sets(self):
for time_index in xrange(self.time_index_count):
image_set = ImageSet()
for fov in range(self.field_of_view_count):
for channel_name in self.channels:
for z_level in xrange(self.z_level_count):
image = self.get_image(time_index, fov, channel_name, z_level)
if image.is_valid:
image_set.add(image)
yield image_set
def get_image(self, time_index, fov, channel_name, z_level):
image_set_number = self._calculate_image_set_number(time_index, fov, z_level)
timestamp, raw_image_data = self._get_raw_image_data(image_set_number, self._channel_offset[channel_name])
return Image(timestamp, raw_image_data, fov, channel_name, z_level, self.height, self.width)
@property
def channels(self):
metadata = self.metadata['ImageMetadataSeq']['SLxPictureMetadata']['sPicturePlanes']
try:
validity = self.metadata['ImageMetadata']['SLxExperiment']['ppNextLevelEx'][''][0]['ppNextLevelEx'][''][0]['pItemValid']
except KeyError:
# If none of the channels have been deleted, there is no validity list, so we just make one
validity = [True for _ in metadata]
# Channel information is contained in dictionaries with the keys a0, a1...an where the number
# indicates the order in which the channel is stored. So by sorting the dicts alphabetically
# we get the correct order.
for (label, chan), valid in zip(sorted(metadata['sPlaneNew'].items()), validity):
if not valid:
continue
yield chan['sDescription']
@property
def height(self):
"""
Gets all the images for a given field of view and
:return: height of each image, in pixels
"""
timepoint_set = xrange(self.time_index_count) if time_indices is None else time_indices
channel_set = [channel.name for channel in self.channels] if channels is None else channels
z_level_set = xrange(self.z_level_count) if z_levels is None else z_levels
return self.metadata['ImageAttributes']['SLxImageAttributes']['uiHeight']
for timepoint in timepoint_set:
image_set = ImageSet()
for channel_name in channel_set:
for z_level in z_level_set:
image = self.get_image(timepoint, field_of_view, channel_name, z_level)
if image.is_valid:
image_set.add(image)
yield image_set
@property
def width(self):
"""
:return: width of each image, in pixels
"""
return self.metadata['ImageAttributes']['SLxImageAttributes']['uiWidth']
@property
def absolute_start(self):
if self._absolute_start is None:
for line in self.metadata['ImageTextInfo']['SLxImageTextInfo'].values():
absolute_start_12 = None
absolute_start_24 = None
# ND2s seem to randomly switch between 12- and 24-hour representations.
try:
absolute_start_24 = datetime.strptime(line, "%m/%d/%Y %H:%M:%S")
except ValueError:
pass
try:
absolute_start_12 = datetime.strptime(line, "%m/%d/%Y %I:%M:%S %p")
except ValueError:
pass
if not absolute_start_12 and not absolute_start_24:
continue
self._absolute_start = absolute_start_12 if absolute_start_12 else absolute_start_24
return self._absolute_start
@property
def channel_count(self):
pattern = r""".*?λ\((\d+)\).*?"""
try:
count = int(re.match(pattern, self._dimensions).group(1))
except AttributeError:
return 1
else:
return count
@property
def field_of_view_count(self):
"""
The metadata contains information about fields of view, but it contains it even if some fields
of view were cropped. We can't find anything that states which fields of view are actually
in the image data, so we have to calculate it. There probably is something somewhere, since
NIS Elements can figure it out, but we haven't found it yet.
"""
pattern = r""".*?XY\((\d+)\).*?"""
try:
count = int(re.match(pattern, self._dimensions).group(1))
except AttributeError:
return 1
else:
return count
@property
def time_index_count(self):
"""
The number of image sets. If images were acquired using some kind of cycle, all images at each step in the
program will have the same timestamp (even though they may have varied by a few seconds in reality). For example,
if you have four fields of view that you're constantly monitoring, and you take a bright field and GFP image of
each, and you repeat that process 100 times, you'll have 800 individual images. But there will only be 400
time indexes.
:rtype: int
"""
pattern = r""".*?T'\((\d+)\).*?"""
try:
count = int(re.match(pattern, self._dimensions).group(1))
except AttributeError:
return 1
else:
return count
@property
def z_level_count(self):
pattern = r""".*?Z\((\d+)\).*?"""
try:
count = int(re.match(pattern, self._dimensions).group(1))
except AttributeError:
return 1
else:
return count
@property
def _channel_offset(self):
"""
Image data is interleaved for each image set. That is, if there are four images in a set, the first image
will consist of pixels 1, 5, 9, etc, the second will be pixels 2, 6, 10, and so forth. Why this would be the
case is beyond me, but that's how it works.
"""
channel_offset = {}
for n, channel in enumerate(self.channels):
self._channel_offset[channel.name] = n
return channel_offset
def _get_raw_image_data(self, image_set_number, channel_offset):
chunk = self._label_map["ImageDataSeq|%d!" % image_set_number]
data = self._read_chunk(chunk)
timestamp = struct.unpack("d", data[:8])[0]
# The images for the various channels are interleaved within each other.
image_data = array.array("H", data)
image_data_start = 4 + channel_offset
return timestamp, image_data[image_data_start::self.channel_count]
def _calculate_image_set_number(self, time_index, fov, z_level):
return time_index * self.field_of_view_count * self.z_level_count + (fov * self.z_level_count + z_level)

+ 1
- 192
nd2reader/model/__init__.py View File

@ -1,33 +1,10 @@
import numpy as np
import skimage.io
import logging
from io import BytesIO
import array
import struct
log = logging.getLogger(__name__)
class Channel(object):
def __init__(self, name, camera, exposure_time):
self._name = name
self._camera = camera
self._exposure_time = exposure_time
@property
def name(self):
return self._name
@property
def camera(self):
return self._camera
@property
def exposure_time(self):
return self._exposure_time
class ImageSet(object):
"""
A group of images that share the same timestamp. NIS Elements doesn't store a unique timestamp for every
@ -96,172 +73,4 @@ class Image(object):
def show(self):
skimage.io.imshow(self.data)
skimage.io.show()
class MetadataItem(object):
def __init__(self, start, data):
self._datatype = ord(data[start])
self._label_length = 2 * ord(data[start + 1])
self._data = data
@property
def is_valid(self):
return self._datatype > 0
@property
def key(self):
return self._data[2:self._label_length].decode("utf16").encode("utf8")
@property
def length(self):
return self._length
@property
def data_start(self):
return self._label_length + 2
@property
def _body(self):
"""
All data after the header.
"""
return self._data[self.data_start:]
def _get_bytes(self, count):
return self._data[self.data_start: self.data_start + count]
@property
def value(self):
parser = {1: self._parse_unsigned_char,
2: self._parse_unsigned_int,
3: self._parse_unsigned_int,
5: self._parse_unsigned_long,
6: self._parse_double,
8: self._parse_string,
9: self._parse_char_array,
11: self._parse_metadata_item
}
return parser[self._datatype]()
def _parse_unsigned_char(self):
self._length = 1
return self._unpack("B", self._get_bytes(self._length))
def _parse_unsigned_int(self):
self._length = 4
return self._unpack("I", self._get_bytes(self._length))
def _parse_unsigned_long(self):
self._length = 8
return self._unpack("Q", self._get_bytes(self._length))
def _parse_double(self):
self._length = 8
return self._unpack("d", self._get_bytes(self._length))
def _parse_string(self):
# the string is of unknown length but ends at the first instance of \x00\x00
stop = self._body.index("\x00\x00")
self._length = stop
return self._body[:stop - 1].decode("utf16").encode("utf8")
def _parse_char_array(self):
array_length = self._unpack("Q", self._get_bytes(8))
self._length = array_length + 8
return array.array("B", self._body[8:array_length])
def _parse_metadata_item(self):
count, length = struct.unpack("<IQ", self._get_bytes(12))
metadata_set = MetadataSet(self._body, 0, count)
def _unpack(self, kind, data):
"""
:param kind: the datatype to interpret the bytes as (see: https://docs.python.org/2/library/struct.html#struct-format-strings)
:type kind: str
:param data: the bytes to be converted
:type data: bytes
Parses a sequence of bytes and converts them to a Python data type.
struct.unpack() returns a tuple but we only want the first element.
"""
return struct.unpack(kind, data)[0]
class MetadataSet(object):
"""
A container of metadata items. Can contain other MetadataSet objects.
"""
def __init__(self, data, start, item_count):
self._items = []
self._parse(data, start, item_count)
def _parse(self, data, start, item_count):
for item in range(item_count):
metadata_item = MetadataItem(start, data)
if not metadata_item.is_valid:
break
start += metadata_item.length
class Chunkmap(object):
def __init__(self):
pass
def read(self, filename):
with open(filename, "rb") as f:
data = f.read(-1)
self.parse(data, 1)
def parse(self, data, count):
data = BytesIO(data)
res = {}
total_count = 0
for c in range(count):
lastpos = data.tell()
total_count += 1
hdr = data.read(2)
if not hdr:
break
typ = ord(hdr[0])
bname = data.read(2*ord(hdr[1]))
name = bname.decode("utf16")[:-1].encode("utf8")
if typ == 1:
value, = struct.unpack("B", data.read(1))
elif typ in [2, 3]:
value, = struct.unpack("I", data.read(4))
elif typ == 5:
value, = struct.unpack("Q", data.read(8))
elif typ == 6:
value, = struct.unpack("d", data.read(8))
elif typ == 8:
value = data.read(2)
while value[-2:] != "\x00\x00":
value += data.read(2)
value = value.decode("utf16")[:-1].encode("utf8")
elif typ == 9:
cnt, = struct.unpack("Q", data.read(8))
value = array.array("B", data.read(cnt))
elif typ == 11:
curpos = data.tell()
newcount, length = struct.unpack("<IQ", data.read(12))
curpos = data.tell()
length -= data.tell()-lastpos
nextdata = data.read(length)
value = self.parse(nextdata, newcount)
# Skip some offsets
data.read(newcount * 8)
else:
assert 0, "%s hdr %x:%x unknown" % (name, ord(hdr[0]), ord(hdr[1]))
if not name in res:
res[name] = value
else:
if not isinstance(res[name], list):
res[name] = [res[name]]
res[name].append(value)
x = data.read()
assert not x, "skip %d %s" % (len(x), repr(x[:30]))
return res
skimage.io.show()

+ 179
- 0
nd2reader/parser.py View File

@ -0,0 +1,179 @@
# -*- coding: utf-8 -*-
import array
from collections import namedtuple
import struct
from StringIO import StringIO
field_of_view = namedtuple('FOV', ['number', 'x', 'y', 'z', 'pfs_offset'])
class Nd2Parser(object):
CHUNK_HEADER = 0xabeceda
CHUNK_MAP_START = "ND2 FILEMAP SIGNATURE NAME 0001!"
CHUNK_MAP_END = "ND2 CHUNK MAP SIGNATURE 0000001!"
"""
Reads .nd2 files, provides an interface to the metadata, and generates numpy arrays from the image data.
"""
def __init__(self, filename):
self._absolute_start = None
self._filename = filename
self._fh = None
self._chunk_map_start_location = None
self._cursor_position = None
self._dimension_text = None
self._label_map = {}
self.metadata = {}
self._read_map()
self._parse_metadata()
@property
def _file_handle(self):
if self._fh is None:
self._fh = open(self._filename, "rb")
return self._fh
@property
def _dimensions(self):
if self._dimension_text is None:
for line in self.metadata['ImageTextInfo']['SLxImageTextInfo'].values():
if "Dimensions:" in line:
metadata = line
break
else:
raise ValueError("Could not parse metadata dimensions!")
for line in metadata.split("\r\n"):
if line.startswith("Dimensions:"):
self._dimension_text = line
break
else:
raise ValueError("Could not parse metadata dimensions!")
return self._dimension_text
@property
def _image_count(self):
return self.metadata['ImageAttributes']['SLxImageAttributes']['uiSequenceCount']
@property
def _sequence_count(self):
return self.metadata['ImageEvents']['RLxExperimentRecord']['uiCount']
def _parse_metadata(self):
for label in self._label_map.keys():
if not label.endswith("LV!") or "LV|" in label:
continue
data = self._read_chunk(self._label_map[label])
stop = label.index("LV")
self.metadata[label[:stop]] = self._read_metadata(data, 1)
def _read_map(self):
"""
Every label ends with an exclamation point, however, we can't directly search for those to find all the labels
as some of the bytes contain the value 33, which is the ASCII code for "!". So we iteratively find each label,
grab the subsequent data (always 16 bytes long), advance to the next label and repeat.
"""
self._file_handle.seek(-8, 2)
chunk_map_start_location = struct.unpack("Q", self._file_handle.read(8))[0]
self._file_handle.seek(chunk_map_start_location)
raw_text = self._file_handle.read(-1)
label_start = raw_text.index(Nd2Parser.CHUNK_MAP_START) + 32
while True:
data_start = raw_text.index("!", label_start) + 1
key = raw_text[label_start: data_start]
location, length = struct.unpack("QQ", raw_text[data_start: data_start + 16])
if key == Nd2Parser.CHUNK_MAP_END:
# We've reached the end of the chunk map
break
self._label_map[key] = location
label_start = data_start + 16
def _read_chunk(self, chunk_location):
"""
Gets the data for a given chunk pointer
"""
self._file_handle.seek(chunk_location)
# The chunk metadata is always 16 bytes long
chunk_metadata = self._file_handle.read(16)
header, relative_offset, data_length = struct.unpack("IIQ", chunk_metadata)
if header != Nd2Parser.CHUNK_HEADER:
raise ValueError("The ND2 file seems to be corrupted.")
# We start at the location of the chunk metadata, skip over the metadata, and then proceed to the
# start of the actual data field, which is at some arbitrary place after the metadata.
self._file_handle.seek(chunk_location + 16 + relative_offset)
return self._file_handle.read(data_length)
def _z_level_count(self):
st = self._read_chunk(self._label_map["CustomData|Z!"])
return len(array.array("d", st))
def _parse_unsigned_char(self, data):
return struct.unpack("B", data.read(1))[0]
def _parse_unsigned_int(self, data):
return struct.unpack("I", data.read(4))[0]
def _parse_unsigned_long(self, data):
return struct.unpack("Q", data.read(8))[0]
def _parse_double(self, data):
return struct.unpack("d", data.read(8))[0]
def _parse_string(self, data):
value = data.read(2)
while not value.endswith("\x00\x00"):
# the string ends at the first instance of \x00\x00
value += data.read(2)
return value.decode("utf16")[:-1].encode("utf8")
def _parse_char_array(self, data):
array_length = struct.unpack("Q", data.read(8))[0]
return array.array("B", data.read(array_length))
def _parse_metadata_item(self, args):
data, cursor_position = args
new_count, length = struct.unpack("<IQ", data.read(12))
length -= data.tell() - cursor_position
next_data_length = data.read(length)
value = self._read_metadata(next_data_length, new_count)
# Skip some offsets
data.read(new_count * 8)
return value
def _get_value(self, data, data_type):
parser = {1: {'method': self._parse_unsigned_char, 'args': data},
2: {'method': self._parse_unsigned_int, 'args': data},
3: {'method': self._parse_unsigned_int, 'args': data},
5: {'method': self._parse_unsigned_long, 'args': data},
6: {'method': self._parse_double, 'args': data},
8: {'method': self._parse_string, 'args': data},
9: {'method': self._parse_char_array, 'args': data},
11: {'method': self._parse_metadata_item, 'args': (data, self._cursor_position)}}
return parser[data_type]['method'](parser[data_type]['args'])
def _read_metadata(self, data, count):
data = StringIO(data)
metadata = {}
for _ in xrange(count):
self._cursor_position = data.tell()
header = data.read(2)
if not header:
break
data_type, name_length = map(ord, header)
name = data.read(name_length * 2).decode("utf16")[:-1].encode("utf8")
value = self._get_value(data, data_type)
if name not in metadata:
metadata[name] = value
else:
if not isinstance(metadata[name], list):
# We have encountered this key exactly once before. Since we're seeing it again, we know we
# need to convert it to a list before proceeding.
metadata[name] = [metadata[name]]
# We've encountered this key before so we're guaranteed to be dealing with a list. Thus we append
# the value to the already-existing list.
metadata[name].append(value)
return metadata

+ 0
- 439
nd2reader/service/__init__.py View File

@ -1,439 +0,0 @@
# -*- coding: utf-8 -*-
import array
import numpy as np
import struct
import re
from StringIO import StringIO
from collections import namedtuple
import logging
from nd2reader.model import Channel
from datetime import datetime
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
chunk = namedtuple('Chunk', ['location', 'length'])
field_of_view = namedtuple('FOV', ['number', 'x', 'y', 'z', 'pfs_offset'])
class BaseNd2(object):
def __init__(self, filename):
self._reader = Nd2Reader(filename)
self._channel_offset = None
@property
def height(self):
"""
:return: height of each image, in pixels
"""
return self._metadata['ImageAttributes']['SLxImageAttributes']['uiHeight']
@property
def width(self):
"""
:return: width of each image, in pixels
"""
return self._metadata['ImageAttributes']['SLxImageAttributes']['uiWidth']
@property
def absolute_start(self):
return self._reader.absolute_start
@property
def channels(self):
metadata = self._metadata['ImageMetadataSeq']['SLxPictureMetadata']['sPicturePlanes']
try:
validity = self._metadata['ImageMetadata']['SLxExperiment']['ppNextLevelEx'][''][0]['ppNextLevelEx'][''][0]['pItemValid']
except KeyError:
# If none of the channels have been deleted, there is no validity list, so we just make one
validity = [True for i in metadata]
# Channel information is contained in dictionaries with the keys a0, a1...an where the number
# indicates the order in which the channel is stored. So by sorting the dicts alphabetically
# we get the correct order.
for (label, chan), valid in zip(sorted(metadata['sPlaneNew'].items()), validity):
if not valid:
continue
name = chan['sDescription']
exposure_time = metadata['sSampleSetting'][label]['dExposureTime']
camera = metadata['sSampleSetting'][label]['pCameraSetting']['CameraUserName']
yield Channel(name, camera, exposure_time)
@property
def channel_names(self):
"""
A convenience method for getting an alphabetized list of channel names.
:return: list[str]
"""
for channel in sorted(self.channels, key=lambda x: x.name):
yield channel.name
@property
def _image_count(self):
return self._metadata['ImageAttributes']['SLxImageAttributes']['uiSequenceCount']
@property
def _sequence_count(self):
return self._metadata['ImageEvents']['RLxExperimentRecord']['uiCount']
@property
def time_index_count(self):
"""
The number of images for a given field of view, channel, and z_level combination.
Effectively the number of frames.
:rtype: int
"""
return self._reader.time_index_count
@property
def z_level_count(self):
return self._reader.z_level_count
@property
def field_of_view_count(self):
"""
The metadata contains information about fields of view, but it contains it even if some fields
of view were cropped. We can't find anything that states which fields of view are actually
in the image data, so we have to calculate it. There probably is something somewhere, since
NIS Elements can figure it out, but we haven't found it yet.
"""
return self._reader.field_of_view_count
@property
def channel_count(self):
return self._reader.channel_count
@property
def channel_offset(self):
if self._channel_offset is None:
self._channel_offset = {}
for n, channel in enumerate(self.channels):
self._channel_offset[channel.name] = n
return self._channel_offset
@property
def _metadata(self):
return self._reader.metadata
def _calculate_image_set_number(self, time_index, fov, z_level):
return time_index * self.field_of_view_count * self.z_level_count + (fov * self.z_level_count + z_level)
class Nd2Reader(object):
"""
Reads .nd2 files, provides an interface to the metadata, and generates numpy arrays from the image data.
"""
def __init__(self, filename):
self._absolute_start = None
self._filename = filename
self._file_handler = None
self._chunk_map_start_location = None
self._label_map = {}
self._metadata = {}
self._read_map()
self._parse_dict_data()
self.__dimensions = None
@property
def _dimensions(self):
if self.__dimensions is None:
# The particular slot that this data shows up in changes (seemingly) randomly
for line in self._metadata['ImageTextInfo']['SLxImageTextInfo'].values():
if "Dimensions:" in line:
metadata = line
break
else:
raise Exception("Could not parse metadata dimensions!")
for line in metadata.split("\r\n"):
if line.startswith("Dimensions:"):
self.__dimensions = line
break
return self.__dimensions
@property
def absolute_start(self):
if self._absolute_start is None:
for line in self._metadata['ImageTextInfo']['SLxImageTextInfo'].values():
absolute_start_12 = None
absolute_start_24 = None
# ND2s seem to randomly switch between 12- and 24-hour representations.
try:
absolute_start_24 = datetime.strptime(line, "%m/%d/%Y %H:%M:%S")
except ValueError:
pass
try:
absolute_start_12 = datetime.strptime(line, "%m/%d/%Y %I:%M:%S %p")
except ValueError:
pass
if not absolute_start_12 and not absolute_start_24:
continue
self._absolute_start = absolute_start_12 if absolute_start_12 else absolute_start_24
return self._absolute_start
@property
def fh(self):
if self._file_handler is None:
self._file_handler = open(self._filename, "rb")
return self._file_handler
@property
def time_index_count(self):
"""
The number of images for a given field of view, channel, and z_level combination.
Effectively the number of frames.
:rtype: int
"""
pattern = r""".*?T'\((\d+)\).*?"""
try:
count = int(re.match(pattern, self._dimensions).group(1))
except AttributeError:
return 1
else:
return count
@property
def z_level_count(self):
pattern = r""".*?Z\((\d+)\).*?"""
try:
count = int(re.match(pattern, self._dimensions).group(1))
except AttributeError:
return 1
else:
return count
@property
def field_of_view_count(self):
"""
The metadata contains information about fields of view, but it contains it even if some fields
of view were cropped. We can't find anything that states which fields of view are actually
in the image data, so we have to calculate it. There probably is something somewhere, since
NIS Elements can figure it out, but we haven't found it yet.
"""
pattern = r""".*?XY\((\d+)\).*?"""
try:
count = int(re.match(pattern, self._dimensions).group(1))
except AttributeError:
return 1
else:
return count
@property
def channel_count(self):
pattern = r""".*?λ\((\d+)\).*?"""
try:
count = int(re.match(pattern, self._dimensions).group(1))
except AttributeError:
return 1
else:
return count
def get_raw_image_data(self, image_set_number, channel_offset):
chunk = self._label_map["ImageDataSeq|%d!" % image_set_number]
data = self._read_chunk(chunk.location)
timestamp = struct.unpack("d", data[:8])[0]
# The images for the various channels are interleaved within each other. Yes, this is an incredibly unintuitive and nonsensical way
# to store data.
image_data = array.array("H", data)
image_data_start = 4 + channel_offset
return timestamp, image_data[image_data_start::self.channel_count]
def _parse_dict_data(self):
# TODO: Don't like this name
for label in self._top_level_dict_labels:
chunk_location = self._label_map[label].location
data = self._read_chunk(chunk_location)
stop = label.index("LV")
self._metadata[label[:stop]] = self.read_lv_encoding(data, 1)
@property
def metadata(self):
return self._metadata
@property
def _top_level_dict_labels(self):
# TODO: I don't like this name either
for label in self._label_map.keys():
if label.endswith("LV!") or "LV|" in label:
yield label
def _read_map(self):
"""
Every label ends with an exclamation point, however, we can't directly search for those to find all the labels
as some of the bytes contain the value 33, which is the ASCII code for "!". So we iteratively find each label,
grab the subsequent data (always 16 bytes long), advance to the next label and repeat.
"""
raw_text = self._get_raw_chunk_map_text()
label_start = self._find_first_label_offset(raw_text)
while True:
data_start = self._get_data_start(label_start, raw_text)
label, value = self._extract_map_key(label_start, data_start, raw_text)
if label == "ND2 CHUNK MAP SIGNATURE 0000001!":
# We've reached the end of the chunk map
break
self._label_map[label] = value
label_start = data_start + 16
@staticmethod
def _find_first_label_offset(raw_text):
"""
The chunk map starts with some number of (seemingly) useless bytes, followed
by "ND2 FILEMAP SIGNATURE NAME 0001!". We return the location of the first character after this sequence,
which is the actual beginning of the chunk map.
"""
return raw_text.index("ND2 FILEMAP SIGNATURE NAME 0001!") + 32
@staticmethod
def _get_data_start(label_start, raw_text):
"""
The data for a given label begins immediately after the first exclamation point
"""
return raw_text.index("!", label_start) + 1
@staticmethod
def _extract_map_key(label_start, data_start, raw_text):
"""
Chunk map entries are a string label of arbitrary length followed by 16 bytes of data, which represent
the byte offset from the beginning of the file where that data can be found.
"""
key = raw_text[label_start: data_start]
location, length = struct.unpack("QQ", raw_text[data_start: data_start + 16])
return key, chunk(location=location, length=length)
@property
def chunk_map_start_location(self):
"""
The position in bytes from the beginning of the file where the chunk map begins.
The chunk map is a series of string labels followed by the position (in bytes) of the respective data.
"""
if self._chunk_map_start_location is None:
# Put the cursor 8 bytes before the end of the file
self.fh.seek(-8, 2)
# Read the last 8 bytes of the file
self._chunk_map_start_location = struct.unpack("Q", self.fh.read(8))[0]
return self._chunk_map_start_location
def _read_chunk(self, chunk_location):
"""
Gets the data for a given chunk pointer
"""
self.fh.seek(chunk_location)
chunk_data = self._read_chunk_metadata()
header, relative_offset, data_length = self._parse_chunk_metadata(chunk_data)
return self._read_chunk_data(chunk_location, relative_offset, data_length)
def _read_chunk_metadata(self):
"""
Gets the chunks metadata, which is always 16 bytes
"""
return self.fh.read(16)
def _read_chunk_data(self, chunk_location, relative_offset, data_length):
"""
Reads the actual data for a given chunk
"""
# We start at the location of the chunk metadata, skip over the metadata, and then proceed to the
# start of the actual data field, which is at some arbitrary place after the metadata.
self.fh.seek(chunk_location + 16 + relative_offset)
return self.fh.read(data_length)
@staticmethod
def _parse_chunk_metadata(chunk_data):
"""
Finds out everything about a given chunk. Every chunk begins with the same value, so if that's ever
different we can assume the file has suffered some kind of damage.
"""
header, relative_offset, data_length = struct.unpack("IIQ", chunk_data)
if header != 0xabeceda:
raise ValueError("The ND2 file seems to be corrupted.")
return header, relative_offset, data_length
def _get_raw_chunk_map_text(self):
"""
Reads the entire chunk map and returns it as a string.
"""
self.fh.seek(self.chunk_map_start_location)
return self.fh.read(-1)
@staticmethod
def as_numpy_array(arr):
return np.frombuffer(arr)
def _z_level_count(self):
name = "CustomData|Z!"
st = self._read_chunk(self._label_map[name].location)
res = array.array("d", st)
return len(res)
def read_lv_encoding(self, data, count):
data = StringIO(data)
res = {}
total_count = 0
for c in range(count):
lastpos = data.tell()
total_count += 1
hdr = data.read(2)
if not hdr:
break
typ = ord(hdr[0])
bname = data.read(2*ord(hdr[1]))
name = bname.decode("utf16")[:-1].encode("utf8")
if typ == 1:
value, = struct.unpack("B", data.read(1))
elif typ in [2, 3]:
value, = struct.unpack("I", data.read(4))
elif typ == 5:
value, = struct.unpack("Q", data.read(8))
elif typ == 6:
value, = struct.unpack("d", data.read(8))
elif typ == 8:
value = data.read(2)
while value[-2:] != "\x00\x00":
value += data.read(2)
value = value.decode("utf16")[:-1].encode("utf8")
elif typ == 9:
cnt, = struct.unpack("Q", data.read(8))
value = array.array("B", data.read(cnt))
elif typ == 11:
newcount, length = struct.unpack("<IQ", data.read(12))
length -= data.tell()-lastpos
nextdata = data.read(length)
value = self.read_lv_encoding(nextdata, newcount)
# Skip some offsets
data.read(newcount * 8)
else:
assert 0, "%s hdr %x:%x unknown" % (name, ord(hdr[0]), ord(hdr[1]))
if not name in res:
res[name] = value
else:
if not isinstance(res[name], list):
res[name] = [res[name]]
res[name].append(value)
x = data.read()
assert not x, "skip %d %s" % (len(x), repr(x[:30]))
return res

+ 1
- 0
requirements.txt View File

@ -0,0 +1 @@
numpy

+ 2
- 5
setup.py View File

@ -3,8 +3,5 @@ from setuptools import setup, find_packages
setup(
name="nd2reader",
packages=find_packages(),
version="0.9.7",
install_requires=[
'numpy',
],
)
version="0.9.7"
)

Loading…
Cancel
Save