- use only one parser for v3 files - merge some classes - return metadata as raw metadata and dictionarymaster
@ -1,236 +0,0 @@ | |||||
# -*- coding: utf-8 -*- | |||||
from nd2reader.parser import get_parser, get_version | |||||
import six | |||||
class Nd2(object): | |||||
""" Allows easy access to NIS Elements .nd2 image files. """ | |||||
def __init__(self, filename): | |||||
self._filename = filename | |||||
self._fh = open(filename, "rb") | |||||
major_version, minor_version = get_version(self._fh) | |||||
self._parser = get_parser(self._fh, major_version, minor_version) | |||||
self._metadata = self._parser.metadata | |||||
self._roi_metadata = self._parser.roi_metadata | |||||
def __repr__(self): | |||||
return "\n".join(["<ND2 %s>" % self._filename, | |||||
"Created: %s" % (self.date if self.date is not None else "Unknown"), | |||||
"Image size: %sx%s (HxW)" % (self.height, self.width), | |||||
"Frames: %s" % len(self.frames), | |||||
"Channels: %s" % ", ".join(["%s" % str(channel) for channel in self.channels]), | |||||
"Fields of View: %s" % len(self.fields_of_view), | |||||
"Z-Levels: %s" % len(self.z_levels) | |||||
]) | |||||
def __enter__(self): | |||||
return self | |||||
def __exit__(self, exc_type, exc_val, exc_tb): | |||||
if self._fh is not None: | |||||
self._fh.close() | |||||
def __len__(self): | |||||
""" | |||||
This should be the total number of images in the ND2, but it may be inaccurate. If the ND2 contains a | |||||
different number of images in a cycle (i.e. there are "gap" images) it will be higher than reality. | |||||
:rtype: int | |||||
""" | |||||
return self._metadata.total_images_per_channel * len(self.channels) | |||||
def __getitem__(self, item): | |||||
""" | |||||
Allows slicing ND2s. | |||||
:type item: int or slice | |||||
:rtype: nd2reader.model.Image() or generator | |||||
""" | |||||
if isinstance(item, int): | |||||
try: | |||||
image = self._parser.driver.get_image(item) | |||||
except KeyError: | |||||
raise IndexError | |||||
else: | |||||
return image | |||||
elif isinstance(item, slice): | |||||
return self._slice(item.start, item.stop, item.step) | |||||
raise IndexError | |||||
def select(self, fields_of_view=None, channels=None, z_levels=None, start=0, stop=None): | |||||
""" | |||||
Iterates over images matching the given criteria. This can be 2-10 times faster than manually iterating over | |||||
the Nd2 and checking the attributes of each image, as this method skips disk reads for any images that don't | |||||
meet the criteria. | |||||
:type fields_of_view: int or tuple or list | |||||
:type channels: str or tuple or list | |||||
:type z_levels: int or tuple or list | |||||
:type start: int | |||||
:type stop: int | |||||
""" | |||||
fields_of_view = self._to_tuple(fields_of_view, self.fields_of_view) | |||||
channels = self._to_tuple(channels, self.channels) | |||||
z_levels = self._to_tuple(z_levels, self.z_levels) | |||||
# By default, we stop after the last image. Otherwise we make sure the user-provided value is valid | |||||
stop = len(self) if stop is None else max(0, min(stop, len(self))) | |||||
for frame in range(start, stop): | |||||
field_of_view, channel, z_level = self._parser.driver.calculate_image_properties(frame) | |||||
if field_of_view in fields_of_view and channel in channels and z_level in z_levels: | |||||
image = self._parser.driver.get_image(frame) | |||||
if image is not None: | |||||
yield image | |||||
@property | |||||
def height(self): | |||||
""" | |||||
The height of each image in pixels. | |||||
:rtype: int | |||||
""" | |||||
return self._metadata.height | |||||
@property | |||||
def width(self): | |||||
""" | |||||
The width of each image in pixels. | |||||
:rtype: int | |||||
""" | |||||
return self._metadata.width | |||||
@property | |||||
def z_levels(self): | |||||
""" | |||||
A list of integers that represent the different levels on the Z-axis that images were taken. Currently this is | |||||
just a list of numbers from 0 to N. For example, an ND2 where images were taken at -3µm, 0µm, and +5µm from a | |||||
set position would be represented by 0, 1 and 2, respectively. ND2s do store the actual offset of each image | |||||
in micrometers and in the future this will hopefully be available. For now, however, you will have to match up | |||||
the order yourself. | |||||
:return: list of int | |||||
""" | |||||
return self._metadata.z_levels | |||||
@property | |||||
def fields_of_view(self): | |||||
""" | |||||
A list of integers representing the various stage locations, in the order they were taken in the first round | |||||
of acquisition. | |||||
:return: list of int | |||||
""" | |||||
return self._metadata.fields_of_view | |||||
@property | |||||
def channels(self): | |||||
""" | |||||
A list of channel (i.e. wavelength) names. These are set by the user in NIS Elements. | |||||
:return: list of str | |||||
""" | |||||
return self._metadata.channels | |||||
@property | |||||
def frames(self): | |||||
""" | |||||
A list of integers representing groups of images. ND2s consider images to be part of the same frame if they | |||||
are in the same field of view and don't have the same channel. So if you take a bright field and GFP image at | |||||
four different fields of view over and over again, you'll have 8 images and 4 frames per cycle. | |||||
:return: list of int | |||||
""" | |||||
return self._metadata.frames | |||||
@property | |||||
def date(self): | |||||
""" | |||||
The date and time that the acquisition began. Not guaranteed to have been recorded. | |||||
:rtype: datetime.datetime() or None | |||||
""" | |||||
return self._metadata.date | |||||
@property | |||||
def pixel_microns(self): | |||||
""" | |||||
The width of a pixel in microns. Note that the user can override this in NIS Elements so it may not reflect reality. | |||||
:rtype: float | |||||
""" | |||||
return self._metadata.pixel_microns | |||||
def get_image(self, frame_number, field_of_view, channel_name, z_level): | |||||
""" | |||||
Attempts to return the image with the unique combination of given attributes. None will be returned if a match | |||||
is not found. | |||||
:type frame_number: int | |||||
:param field_of_view: the label for the place in the XY-plane where this image was taken. | |||||
:type field_of_view: int | |||||
:param channel_name: the name of the color of this image | |||||
:type channel_name: str | |||||
:param z_level: the label for the location in the Z-plane where this image was taken. | |||||
:type z_level: int | |||||
:rtype: nd2reader.model.Image() or None | |||||
""" | |||||
return self._parser.driver.get_image_by_attributes(frame_number, | |||||
field_of_view, | |||||
channel_name, | |||||
z_level, | |||||
self.height, | |||||
self.width) | |||||
def close(self): | |||||
""" | |||||
Closes the file handle to the image. This actually sometimes will prevent problems so it's good to do this or | |||||
use Nd2 as a context manager. | |||||
""" | |||||
self._fh.close() | |||||
def _slice(self, start, stop, step): | |||||
""" | |||||
Allows for iteration over a selection of the entire dataset. | |||||
:type start: int | |||||
:type stop: int | |||||
:type step: int | |||||
:rtype: nd2reader.model.Image() | |||||
""" | |||||
start = start if start is not None else 0 | |||||
step = step if step is not None else 1 | |||||
stop = stop if stop is not None else len(self) | |||||
# This weird thing with the step allows you to iterate backwards over the images | |||||
for i in range(start, stop)[::step]: | |||||
yield self[i] | |||||
def _to_tuple(self, value, default): | |||||
""" | |||||
Idempotently converts a value to a tuple. This allows users to pass in scalar values and iterables to | |||||
select(), which is more ergonomic than having to remember to pass in single-member lists | |||||
:type value: int or str or tuple or list | |||||
:type default: tuple or list | |||||
:rtype: tuple | |||||
""" | |||||
value = default if value is None else value | |||||
return (value,) if isinstance(value, int) or isinstance(value, six.string_types) else tuple(value) |
@ -1 +0,0 @@ | |||||
from nd2reader.model.image import Image |
@ -1,135 +0,0 @@ | |||||
# -*- coding: utf-8 -*- | |||||
import numpy as np | |||||
class Image(np.ndarray): | |||||
""" | |||||
Holds the raw pixel data of an image and provides access to some metadata. | |||||
""" | |||||
def __new__(cls, array): | |||||
return np.asarray(array).view(cls) | |||||
def __init__(self, array): | |||||
self._index = None | |||||
self._timestamp = None | |||||
self._frame_number = None | |||||
self._field_of_view = None | |||||
self._channel = None | |||||
self._z_level = None | |||||
def __array_wrap__(self, obj, *args): | |||||
if len(obj.shape) == 0: | |||||
return obj[()] | |||||
else: | |||||
return obj | |||||
def add_params(self, index, timestamp, frame_number, field_of_view, channel, z_level): | |||||
""" | |||||
:param index: The integer that can be used to directly index this image | |||||
:type index: int | |||||
:param timestamp: The number of milliseconds after the beginning of the acquisition that this image was taken. | |||||
:type timestamp: float | |||||
:param frame_number: The order in which this image was taken, with images of different channels/z-levels | |||||
at the same field of view treated as being in the same frame. | |||||
:type frame_number: int | |||||
:param field_of_view: The label for the place in the XY-plane where this image was taken. | |||||
:type field_of_view: int | |||||
:param channel: The name of the color of this image | |||||
:type channel: str | |||||
:param z_level: The label for the location in the Z-plane where this image was taken. | |||||
:type z_level: int | |||||
""" | |||||
self._index = index | |||||
self._timestamp = timestamp | |||||
self._frame_number = int(frame_number) | |||||
self._field_of_view = field_of_view | |||||
self._channel = channel | |||||
self._z_level = z_level | |||||
@property | |||||
def index(self): | |||||
return self._index | |||||
@property | |||||
def height(self): | |||||
""" | |||||
The height in pixels. | |||||
:rtype: int | |||||
""" | |||||
return self.shape[0] | |||||
@property | |||||
def width(self): | |||||
""" | |||||
The width in pixels. | |||||
:rtype: int | |||||
""" | |||||
return self.shape[1] | |||||
@property | |||||
def field_of_view(self): | |||||
""" | |||||
The index of the stage location where this image was acquired. | |||||
:rtype: int | |||||
""" | |||||
return self._field_of_view | |||||
@property | |||||
def timestamp(self): | |||||
""" | |||||
The number of seconds after the beginning of the acquisition that the image was taken. Note that for a given | |||||
field of view and z-level offset, if you have images of multiple channels, they will all be given the same | |||||
timestamp. That's just how ND2s are structured, so if your experiment depends on millisecond accuracy, | |||||
you need to find an alternative imaging system. | |||||
:rtype: float | |||||
""" | |||||
# data is actually stored in milliseconds | |||||
return self._timestamp / 1000.0 | |||||
@property | |||||
def frame_number(self): | |||||
""" | |||||
The index of the group of images taken sequentially that all have the same group number and field of view. | |||||
:rtype: int | |||||
""" | |||||
return self._frame_number | |||||
@property | |||||
def channel(self): | |||||
""" | |||||
The name of the filter used to acquire this image. These are user-supplied in NIS Elements. | |||||
:rtype: str | |||||
""" | |||||
return self._channel | |||||
@property | |||||
def z_level(self): | |||||
""" | |||||
The vertical offset of the image. These are simple integers starting from 0, where the 0 is the lowest | |||||
z-level and each subsequent level incremented by 1. | |||||
For example, if you acquired images at -3 µm, 0 µm, and +3 µm, your z-levels would be: | |||||
-3 µm: 0 | |||||
0 µm: 1 | |||||
+3 µm: 2 | |||||
:rtype: int | |||||
""" | |||||
return self._z_level |
@ -1,106 +0,0 @@ | |||||
class Metadata(object): | |||||
""" A simple container for ND2 metadata. """ | |||||
def __init__(self, height, width, channels, date, fields_of_view, frames, z_levels, total_images_per_channel, pixel_microns): | |||||
self._height = height | |||||
self._width = width | |||||
self._channels = channels | |||||
self._date = date | |||||
self._fields_of_view = fields_of_view | |||||
self._frames = frames | |||||
self._z_levels = z_levels | |||||
self._total_images_per_channel = total_images_per_channel | |||||
self._pixel_microns = pixel_microns | |||||
@property | |||||
def height(self): | |||||
""" | |||||
The image height in pixels. | |||||
:rtype: int | |||||
""" | |||||
return self._height | |||||
@property | |||||
def width(self): | |||||
""" | |||||
The image width in pixels. | |||||
:rtype: int | |||||
""" | |||||
return self._width | |||||
@property | |||||
def date(self): | |||||
""" | |||||
The date and time when acquisition began. | |||||
:rtype: datetime.datetime() or None | |||||
""" | |||||
return self._date | |||||
@property | |||||
def channels(self): | |||||
""" | |||||
These are labels created by the NIS Elements user. Typically they may a short description of the filter cube | |||||
used (e.g. "bright field", "GFP", etc.) | |||||
:rtype: list | |||||
""" | |||||
return self._channels | |||||
@property | |||||
def fields_of_view(self): | |||||
""" | |||||
The metadata contains information about fields of view, but it contains it even if some fields | |||||
of view were cropped. We can't find anything that states which fields of view are actually | |||||
in the image data, so we have to calculate it. There probably is something somewhere, since | |||||
NIS Elements can figure it out, but we haven't found it yet. | |||||
:rtype: list | |||||
""" | |||||
return self._fields_of_view | |||||
@property | |||||
def frames(self): | |||||
""" | |||||
The number of cycles. | |||||
:rtype: list | |||||
""" | |||||
return self._frames | |||||
@property | |||||
def z_levels(self): | |||||
""" | |||||
The different levels in the Z-plane. Just a sequence from 0 to n. | |||||
:rtype: list | |||||
""" | |||||
return self._z_levels | |||||
@property | |||||
def total_images_per_channel(self): | |||||
""" | |||||
The total number of images of a particular channel (wavelength, filter, etc) in the entire ND2. | |||||
:rtype: int | |||||
""" | |||||
return self._total_images_per_channel | |||||
@property | |||||
def pixel_microns(self): | |||||
""" | |||||
The width of a pixel in microns. | |||||
:rtype: float | |||||
""" | |||||
return self._pixel_microns |
@ -1,81 +0,0 @@ | |||||
import six | |||||
import numpy as np | |||||
class Roi(object): | |||||
""" | |||||
A ND2 ROI representation. | |||||
Coordinates are the center coordinates of the ROI in (x, y, z) order in micron. | |||||
Sizes are the sizes of the ROI in (x, y, z) order in micron. | |||||
Shapes are represented by numbers, defined by constants in this class. | |||||
All these properties can be set for multiple time points (in ms). | |||||
""" | |||||
SHAPE_RECTANGLE = 3 | |||||
SHAPE_CIRCLE = 9 | |||||
TYPE_BACKGROUND = 2 | |||||
def __init__(self, raw_roi_dict, metadata): | |||||
""" | |||||
:param raw_roi_dict: | |||||
:param metadata | |||||
""" | |||||
self.timepoints = [] | |||||
self.positions = [] | |||||
self.sizes = [] | |||||
self.shape = self.SHAPE_CIRCLE | |||||
self.type = self.TYPE_BACKGROUND | |||||
self._img_width_micron = metadata.width * metadata.pixel_microns | |||||
self._img_height_micron = metadata.height * metadata.pixel_microns | |||||
self._pixel_microns = metadata.pixel_microns | |||||
self._extract_vect_anims(raw_roi_dict) | |||||
def _extract_vect_anims(self, raw_roi_dict): | |||||
""" | |||||
Extract the vector animation parameters from the ROI. | |||||
This includes the position and size at the given timepoints. | |||||
:param raw_roi_dict: | |||||
:return: | |||||
""" | |||||
number_of_timepoints = raw_roi_dict[six.b('m_vectAnimParams_Size')] | |||||
for i in range(number_of_timepoints): | |||||
self._parse_vect_anim(raw_roi_dict[six.b('m_vectAnimParams_%d') % i]) | |||||
self.shape = raw_roi_dict[six.b('m_sInfo')][six.b('m_uiShapeType')] | |||||
self.type = raw_roi_dict[six.b('m_sInfo')][six.b('m_uiInterpType')] | |||||
# convert to NumPy arrays | |||||
self.timepoints = np.array(self.timepoints, dtype=np.float) | |||||
self.positions = np.array(self.positions, dtype=np.float) | |||||
self.sizes = np.array(self.sizes, dtype=np.float) | |||||
def _parse_vect_anim(self, animation_dict): | |||||
""" | |||||
Parses a ROI vector animation object and adds it to the global list of timepoints and positions. | |||||
:param animation_dict: | |||||
:return: | |||||
""" | |||||
self.timepoints.append(animation_dict[six.b('m_dTimeMs')]) | |||||
# positions are taken from the center of the image as a fraction of the half width/height of the image | |||||
position = np.array((0.5 * self._img_width_micron * (1 + animation_dict[six.b('m_dCenterX')]), | |||||
0.5 * self._img_height_micron * (1 + animation_dict[six.b('m_dCenterY')]), | |||||
animation_dict[six.b('m_dCenterZ')])) | |||||
self.positions.append(position) | |||||
size_dict = animation_dict[six.b('m_sBoxShape')] | |||||
# sizes are fractions of the half width/height of the image | |||||
self.sizes.append((size_dict[six.b('m_dSizeX')] * 0.25 * self._img_width_micron, | |||||
size_dict[six.b('m_dSizeY')] * 0.25 * self._img_height_micron, | |||||
size_dict[six.b('m_dSizeZ')])) | |||||
def is_circle(self): | |||||
return self.shape == self.SHAPE_CIRCLE | |||||
def is_rectangle(self): | |||||
return self.shape == self.SHAPE_RECTANGLE |
@ -1 +0,0 @@ | |||||
from . parser import get_parser, get_version, parse_version |
@ -1,17 +0,0 @@ | |||||
from abc import abstractproperty | |||||
class BaseParser(object): | |||||
def __init__(self, fh): | |||||
self._fh = fh | |||||
self.camera_metadata = None | |||||
self.metadata = None | |||||
self.roi_metadata = None | |||||
@abstractproperty | |||||
def driver(self): | |||||
""" | |||||
Must return an object that can look up and read images. | |||||
""" | |||||
raise NotImplementedError |
@ -1,55 +0,0 @@ | |||||
from nd2reader.parser.v3 import V3Parser | |||||
import re | |||||
from nd2reader.exc import InvalidVersionError | |||||
def get_parser(fh): | |||||
""" | |||||
Picks the appropriate parser based on the ND2 version. | |||||
:type fh: file | |||||
:type major_version: int | |||||
:type minor_version: int | |||||
:rtype: a parser object | |||||
""" | |||||
major_version, minor_version = get_version(fh) | |||||
parsers = {(3, None): V3Parser} | |||||
parser = parsers.get((major_version, minor_version)) or parsers.get((major_version, None)) | |||||
if not parser: | |||||
raise InvalidVersionError("No parser is available for that version.") | |||||
return parser(fh) | |||||
def get_version(fh): | |||||
""" | |||||
Determines what version the ND2 is. | |||||
:param fh: an open file handle to the ND2 | |||||
:type fh: file | |||||
""" | |||||
# the first 16 bytes seem to have no meaning, so we skip them | |||||
fh.seek(16) | |||||
# the next 38 bytes contain the string that we want to parse. Unlike most of the ND2, this is in UTF-8 | |||||
data = fh.read(38).decode("utf8") | |||||
return parse_version(data) | |||||
def parse_version(data): | |||||
""" | |||||
Parses a string with the version data in it. | |||||
:param data: the 19th through 54th byte of the ND2, representing the version | |||||
:type data: unicode | |||||
""" | |||||
match = re.search(r"""^ND2 FILE SIGNATURE CHUNK NAME01!Ver(?P<major>\d)\.(?P<minor>\d)$""", data) | |||||
if match: | |||||
# We haven't seen a lot of ND2s but the ones we have seen conform to this | |||||
return int(match.group('major')), int(match.group('minor')) | |||||
raise InvalidVersionError("The version of the ND2 you specified is not supported.") |
@ -1,5 +1,5 @@ | |||||
import unittest | import unittest | ||||
loader = unittest.TestLoader() | loader = unittest.TestLoader() | ||||
tests = loader.discover('tests', pattern='*.py', top_level_dir='.') | |||||
tests = loader.discover('tests', pattern='test_*.py', top_level_dir='.') | |||||
testRunner = unittest.TextTestRunner() | testRunner = unittest.TextTestRunner() | ||||
testRunner.run(tests) | testRunner.run(tests) |
@ -1,42 +0,0 @@ | |||||
from nd2reader.model.image import Image | |||||
import numpy as np | |||||
import unittest | |||||
class ImageTests(unittest.TestCase): | |||||
""" | |||||
Basically just tests that the Image API works and that Images act as Numpy arrays. There's very little going on | |||||
here other than simply storing data. | |||||
""" | |||||
def setUp(self): | |||||
array = np.array([[0, 1, 254], | |||||
[45, 12, 9], | |||||
[12, 12, 99]]) | |||||
self.image = Image(array) | |||||
self.image.add_params(1, 1200.314, 17, 2, 'GFP', 1) | |||||
def test_size(self): | |||||
self.assertEqual(self.image.height, 3) | |||||
self.assertEqual(self.image.width, 3) | |||||
def test_timestamp(self): | |||||
self.assertEqual(self.image.timestamp, 1.200314) | |||||
def test_frame_number(self): | |||||
self.assertEqual(self.image.frame_number, 17) | |||||
def test_fov(self): | |||||
self.assertEqual(self.image.field_of_view, 2) | |||||
def test_channel(self): | |||||
self.assertEqual(self.image.channel, 'GFP') | |||||
def test_z_level(self): | |||||
self.assertEqual(self.image.z_level, 1) | |||||
def test_slice(self): | |||||
subimage = self.image[:2, :2] | |||||
expected = np.array([[0, 1], | |||||
[45, 12]]) | |||||
self.assertTrue(np.array_equal(subimage, expected)) |