Browse Source

Merge pull request #137 from jimrybarski/2.0

2.0
master
Jim Rybarski 9 years ago
parent
commit
68b987c31a
26 changed files with 1087 additions and 634 deletions
  1. +1
    -0
      .gitignore
  2. +21
    -4
      CHANGELOG.md
  3. +10
    -4
      Dockerfile
  4. +13
    -10
      Makefile
  5. +52
    -3
      README.md
  6. +0
    -0
      ftest.py
  7. +95
    -6
      functional_tests/FYLM141111001.py
  8. +7
    -0
      functional_tests/single.py
  9. +3
    -1
      nd2reader/__init__.py
  10. +113
    -1
      nd2reader/common/v3.py
  11. +112
    -49
      nd2reader/driver/v3.py
  12. +0
    -165
      nd2reader/interface.py
  13. +232
    -0
      nd2reader/main.py
  14. +0
    -1
      nd2reader/model/__init__.py
  15. +0
    -37
      nd2reader/model/group.py
  16. +39
    -16
      nd2reader/model/image.py
  17. +125
    -0
      nd2reader/model/label.py
  18. +30
    -0
      nd2reader/model/metadata.py
  19. +8
    -3
      nd2reader/parser/base.py
  20. +10
    -0
      nd2reader/parser/parser.py
  21. +210
    -147
      nd2reader/parser/v3.py
  22. +1
    -0
      requirements.txt
  23. +3
    -2
      setup.py
  24. +1
    -1
      test.py
  25. +0
    -183
      tests/__init__.py
  26. +1
    -1
      tests/model/image.py

+ 1
- 0
.gitignore View File

@ -11,6 +11,7 @@ __pycache__/
# Distribution / packaging
.Python
env/
env27/
bin/
build/
develop-eggs/


+ 21
- 4
CHANGELOG.md View File

@ -1,3 +1,20 @@
## [2.0.0] - 2015-12-20
### ADDED
- `select()` method to rapidly iterate over a subset of images matching certain criteria
- We parse metadata relating to the physical camera used to produce the images
- Raw metadata can be accessed conveniently, to allow contributors to find more interesting things to add
- An XML parsing library was added since the raw metadata contains some XML blocks
- The version number is now available in the nd2reader module
- Created a DOI to allow citation of the code
### FIXED
- Channel names were not always being parsed properly
### REMOVED
- The `ImageGroup` container object
- The `data` attribute on Images. Images now inherit from ndarray, making this redundant
- The `image_sets` iterator
## [1.1.4] - 2015-10-27
### FIXED
- Implemented missing get_image_by_attributes method
@ -9,16 +26,16 @@
## [1.1.2] - 2015-10-09
### ADDED
- `Image` objects now have a `frame_number` attribute.
- `Nd2` can be used as a context manager.
- `Nd2` can be used as a context manager
- More unit tests and functional tests
### CHANGED
- `Image` objects now directly subclass Numpy arrays.
- `Image` objects now directly subclass Numpy arrays
- Refactored code to permit parsing of different versions of ND2s, which will allow us to add support for NIS Elements 3.x.
### DEPRECATED
- The `data` attribute is no longer needed since `Image` is now a Numpy array.
- The `image_sets` iterator will be removed in the near future. You should implement this yourself.
- The `data` attribute is no longer needed since `Image` is now a Numpy array
- The `image_sets` iterator will be removed in the near future. You should implement this yourself
## [1.1.1] - 2015-09-02
### FIXED


+ 10
- 4
Dockerfile View File

@ -19,20 +19,26 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
python-numpy \
python3-numpy \
libfreetype6-dev \
python-matplotlib \
python3-matplotlib \
libfreetype6-dev \
libpng-dev \
libjpeg-dev \
pkg-config \
python-skimage \
python3-skimage \
tk \
tk-dev \
python-tk \
python3-tk \
&& pip3 install -U cython \
&& pip install -U \
cython \
scikit-image \
xmltodict \
&& pip3 install -U \
cython \
scikit-image \
xmltodict \
&& rm -rf /var/lib/apt/lists/*
COPY . /opt/nd2reader
WORKDIR /opt/nd2reader
RUN python setup.py install
RUN python3 setup.py install

+ 13
- 10
Makefile View File

@ -1,34 +1,37 @@
.PHONY: info build shell py2 py3 test
.PHONY: info build shell py2 py3 test ftest publish
info:
@echo ""
@echo "Available Make Commands"
@echo ""
@echo "build: builds the image"
@echo "build: builds the image"
@echo "shell: starts a bash shell in the container
@echo "py2: maps ~/Documents/nd2s to /var/nd2s and runs a Python 2.7 interpreter"
@echo "py3: maps ~/Documents/nd2s to /var/nd2s and runs a Python 3.4 interpreter"
@echo "test: runs all unit tests (in Python 3.4)"
@echo "test: runs all unit tests (in Python 3.4)"
@echo "ftest: runs all functional tests (requires specific ND2 files that are not publicly available"
@echo "publish: publishes the code base to PyPI (maintainers only)"
@echo ""
build:
docker build -t jimrybarski/nd2reader .
shell:
xhost local:root; docker run --rm -v ~/nd2s:/var/nd2s -v /tmp/.X11-unix:/tmp/.X11-unix -e DISPLAY=unix$(DISPLAY) -it jimrybarski/nd2reader bash
xhost local:root; docker run --rm -v $(CURDIR):/opt/nd2reader -v ~/nd2s:/var/nd2s -v /tmp/.X11-unix:/tmp/.X11-unix -e DISPLAY=unix$(DISPLAY) -it jimrybarski/nd2reader bash
py2:
xhost local:root; docker run --rm -v ~/nd2s:/var/nd2s -v /tmp/.X11-unix:/tmp/.X11-unix -e DISPLAY=unix$(DISPLAY) -it jimrybarski/nd2reader python2.7
xhost local:root; docker run --rm -v $(CURDIR):/opt/nd2reader -v ~/nd2s:/var/nd2s -v /tmp/.X11-unix:/tmp/.X11-unix -e DISPLAY=unix$(DISPLAY) -it jimrybarski/nd2reader python2.7
py3:
xhost local:root; docker run --rm -v ~/nd2s:/var/nd2s -v /tmp/.X11-unix:/tmp/.X11-unix -e DISPLAY=unix$(DISPLAY) -it jimrybarski/nd2reader python3.4
xhost local:root; docker run --rm -v $(CURDIR):/opt/nd2reader -v ~/nd2s:/var/nd2s -v /tmp/.X11-unix:/tmp/.X11-unix -e DISPLAY=unix$(DISPLAY) -it jimrybarski/nd2reader python3.4
test: build
docker run --rm -it jimrybarski/nd2reader python3.4 /opt/nd2reader/tests.py
docker run --rm -it jimrybarski/nd2reader python2.7 /opt/nd2reader/tests.py
docker run --rm -v $(CURDIR):/opt/nd2reader -it jimrybarski/nd2reader python3.4 test.py
docker run --rm -v $(CURDIR):/opt/nd2reader -it jimrybarski/nd2reader python2.7 test.py
ftest: build
docker run --rm -v ~/nd2s:/var/nd2s -it jimrybarski/nd2reader python3.4 /opt/nd2reader/ftests.py
docker run --rm -v ~/nd2s:/var/nd2s -it jimrybarski/nd2reader python2.7 /opt/nd2reader/ftests.py
xhost local:root; docker run --rm -v /tmp/.X11-unix:/tmp/.X11-unix -e DISPLAY=unix$(DISPLAY) -v $(CURDIR):/opt/nd2reader -v ~/nd2s:/var/nd2s -it jimrybarski/nd2reader python3.4 /opt/nd2reader/ftest.py
docker run --rm -v $(CURDIR):/opt/nd2reader -v ~/nd2s:/var/nd2s -it jimrybarski/nd2reader python2.7 /opt/nd2reader/ftest.py
publish:
python setup.py sdist upload -r pypi

+ 52
- 3
README.md View File

@ -10,7 +10,7 @@
### Installation
If you don't already have the packages `numpy` and `six`, they will be installed automatically:
If you don't already have the packages `numpy`, `six` and `xmltodict`, they will be installed automatically:
`pip3 install nd2reader` for Python 3.x
@ -68,6 +68,15 @@ array([[1894, 1949, 1941, ..., 2104, 2135, 2114],
0
```
If you only want to view images that meet certain criteria, you can use `select()`. It's much faster than iterating
and checking attributes of images manually. You can specify scalars or lists of values. Criteria that aren't specified
default to every possible value. Currently, slicing and selecting can't be done at the same time:
```python
for image in nd2.select(channels="GFP", fields_of_view=(1, 2, 7)):
do_something(image)
```
Slicing is also supported and is extremely memory efficient, as images are only read when directly accessed:
```python
@ -101,10 +110,50 @@ The `Nd2` object has some programmatically-accessible metadata:
30528
```
Each camera has its own settings. If you image multiple wavelengths with one camera, each channel will appear as its
own camera:
```python
>>> nd2.camera_settings
{'GFP': <Camera Settings: GFP>
Camera: Andor Zyla VSC-00461
Camera ID: VSC-00461
Exposure Time (ms): 100.0
Binning: 2x2, 'BF': <Camera Settings: BF>
Camera: Andor Zyla VSC-00461
Camera ID: VSC-00461
Exposure Time (ms): 100.0
Binning: 2x2}
```
Camera information can be accessed programmatically:
```python
>>> nd2.camera_settings['GFP'].id
'VSC-00461'
>>> nd2.camera_settings['GFP'].name
'Andor Zyla VSC-00461'
>>> nd2.camera_settings['GFP'].exposure
100.0
>>> nd2.camera_settings['GFP'].x_binning
2
>>> nd2.camera_settings['GFP'].y_binning
2
```
### Citation
You can cite nd2reader in your research if you want:
```
Rybarski, Jim (2015): nd2reader. figshare.
http://dx.doi.org/10.6084/m9.figshare.1619960
```
### Bug Reports and Features
If this fails to work exactly as expected, please open a Github issue. If you get an unhandled exception, please
paste the entire stack trace into the issue as well.
If this fails to work exactly as expected, please open an [issue](https://github.com/jimrybarski/nd2reader/issues).
If you get an unhandled exception, please paste the entire stack trace into the issue as well.
### Contributing


ftests.py → ftest.py View File


+ 95
- 6
functional_tests/FYLM141111001.py View File

@ -4,6 +4,7 @@ run them unless you're Jim Rybarski.
"""
from nd2reader import Nd2
import numpy as np
from datetime import datetime
import unittest
@ -22,8 +23,9 @@ class FunctionalTests(unittest.TestCase):
def test_date(self):
self.assertEqual(self.nd2.date, datetime(2014, 11, 11, 15, 59, 19))
def test_length(self):
self.assertEqual(len(self.nd2), 30528)
# def test_length(self):
# # This will fail until we address issue #59
# self.assertEqual(len(self.nd2), 17808)
def test_frames(self):
self.assertEqual(len(self.nd2.frames), 636)
@ -32,7 +34,7 @@ class FunctionalTests(unittest.TestCase):
self.assertEqual(len(self.nd2.fields_of_view), 8)
def test_channels(self):
self.assertTupleEqual(tuple(sorted(self.nd2.channels)), ('', 'GFP'))
self.assertTupleEqual(tuple(sorted(self.nd2.channels)), ('BF', 'GFP'))
def test_z_levels(self):
self.assertTupleEqual(tuple(self.nd2.z_levels), (0, 1, 2))
@ -42,7 +44,7 @@ class FunctionalTests(unittest.TestCase):
self.assertEqual(image.field_of_view, 2)
self.assertEqual(image.frame_number, 0)
self.assertAlmostEqual(image.timestamp, 19.0340758)
self.assertEqual(image.channel, '')
self.assertEqual(image.channel, 'BF')
self.assertEqual(image.z_level, 1)
self.assertEqual(image.height, self.nd2.height)
self.assertEqual(image.width, self.nd2.width)
@ -70,11 +72,98 @@ class FunctionalTests(unittest.TestCase):
def test_get_image_by_attribute_ok(self):
image = self.nd2.get_image(4, 0, "GFP", 1)
self.assertIsNotNone(image)
image = self.nd2.get_image(4, 0, "", 0)
image = self.nd2.get_image(4, 0, "BF", 0)
self.assertIsNotNone(image)
image = self.nd2.get_image(4, 0, "", 1)
image = self.nd2.get_image(4, 0, "BF", 1)
self.assertIsNotNone(image)
def test_images(self):
self.assertTupleEqual((self.nd2[0].z_level, self.nd2[0].channel), (0, 'BF'))
self.assertIsNone(self.nd2[1])
self.assertTupleEqual((self.nd2[2].z_level, self.nd2[2].channel), (1, 'BF'))
self.assertTupleEqual((self.nd2[3].z_level, self.nd2[3].channel), (1, 'GFP'))
self.assertTupleEqual((self.nd2[4].z_level, self.nd2[4].channel), (2, 'BF'))
self.assertIsNone(self.nd2[5])
self.assertTupleEqual((self.nd2[6].z_level, self.nd2[6].channel), (0, 'BF'))
self.assertIsNone(self.nd2[7])
self.assertTupleEqual((self.nd2[8].z_level, self.nd2[8].channel), (1, 'BF'))
self.assertTupleEqual((self.nd2[9].z_level, self.nd2[9].channel), (1, 'GFP'))
self.assertTupleEqual((self.nd2[10].z_level, self.nd2[10].channel), (2, 'BF'))
self.assertIsNone(self.nd2[11])
self.assertTupleEqual((self.nd2[12].z_level, self.nd2[12].channel), (0, 'BF'))
self.assertIsNone(self.nd2[13])
self.assertTupleEqual((self.nd2[14].z_level, self.nd2[14].channel), (1, 'BF'))
self.assertTupleEqual((self.nd2[15].z_level, self.nd2[15].channel), (1, 'GFP'))
self.assertTupleEqual((self.nd2[16].z_level, self.nd2[16].channel), (2, 'BF'))
self.assertIsNone(self.nd2[17])
self.assertTupleEqual((self.nd2[18].z_level, self.nd2[18].channel), (0, 'BF'))
self.assertIsNone(self.nd2[19])
self.assertIsNone(self.nd2[47])
self.assertTupleEqual((self.nd2[48].z_level, self.nd2[48].channel), (0, 'BF'))
self.assertIsNone(self.nd2[49])
self.assertTupleEqual((self.nd2[50].z_level, self.nd2[50].channel), (1, 'BF'))
self.assertIsNone(self.nd2[51])
self.assertTupleEqual((self.nd2[52].z_level, self.nd2[52].channel), (2, 'BF'))
self.assertIsNone(self.nd2[53])
self.assertTupleEqual((self.nd2[54].z_level, self.nd2[54].channel), (0, 'BF'))
def test_get_image_by_attribute_none(self):
# Should handle missing images without an exception
image = self.nd2.get_image(4, 0, "GFP", 0)
self.assertIsNone(image)
def test_index(self):
# Do indexes get added to images properly?
for n, image in enumerate(self.nd2):
if image is not None:
self.assertEqual(n, image.index)
if n > 50:
break
def test_select(self):
# If we take the first 20 GFP images, they should be identical to the first 20 items iterated from select()
# if we set our criteria to just "GFP"
manual_images = []
for _, image in zip(range(20), self.nd2):
if image is not None and image.channel == 'GFP':
manual_images.append(image)
filter_images = []
for image in self.nd2.select(channels='GFP'):
filter_images.append(image)
if len(filter_images) == len(manual_images):
break
self.assertEqual(len(manual_images), len(filter_images))
self.assertGreater(len(manual_images), 0)
for a, b in zip(manual_images, filter_images):
self.assertTrue(np.array_equal(a, b))
self.assertEqual(a.index, b.index)
self.assertEqual(a.field_of_view, b.field_of_view)
self.assertEqual(a.channel, b.channel)
def test_filter_order_all(self):
# If we select every possible image using select(), we should just get every image in order
n = 0
for image in self.nd2.select(channels=['BF', 'GFP'], z_levels=[0, 1, 2], fields_of_view=list(range(8))):
while True:
indexed_image = self.nd2[n]
if indexed_image is not None:
break
n += 1
self.assertTrue(np.array_equal(image, indexed_image))
n += 1
if n > 100:
break
def test_filter_order_subset(self):
# Test that images are always yielded in increasing order. This guarantees that no matter what subset of images
# we're filtering, we still get them in the chronological order they were acquired
n = -1
for image in self.nd2.select(channels='BF', z_levels=[0, 1], fields_of_view=[1, 2, 4]):
self.assertGreater(image.index, n)
self.assertEqual(image.channel, 'BF')
self.assertIn(image.field_of_view, (1, 2, 4))
self.assertIn(image.z_level, (0, 1))
n = image.index
if n > 100:
break

+ 7
- 0
functional_tests/single.py View File

@ -25,6 +25,13 @@ class FunctionalTests(unittest.TestCase):
def test_length(self):
self.assertEqual(len(self.nd2), 1)
def test_actual_length(self):
count = 0
for image in self.nd2:
if image is not None:
count += 1
self.assertEqual(len(self.nd2), count)
def test_frames(self):
self.assertEqual(len(self.nd2.frames), 1)


+ 3
- 1
nd2reader/__init__.py View File

@ -1 +1,3 @@
from nd2reader.interface import Nd2
from nd2reader.main import Nd2
__version__ = '2.0.0'

+ 113
- 1
nd2reader/common/v3.py View File

@ -1,13 +1,21 @@
import struct
import array
import six
def read_chunk(fh, chunk_location):
"""
Gets the data for a given chunk pointer
Reads a piece of data given the location of its pointer.
:param fh: an open file handle to the ND2
:param chunk_location: a pointer
:type chunk_location: int
:rtype: bytes
"""
if chunk_location is None:
return None
fh.seek(chunk_location)
# The chunk metadata is always 16 bytes long
chunk_metadata = fh.read(16)
@ -18,3 +26,107 @@ def read_chunk(fh, chunk_location):
# start of the actual data field, which is at some arbitrary place after the metadata.
fh.seek(chunk_location + 16 + relative_offset)
return fh.read(data_length)
def read_array(fh, kind, chunk_location):
kinds = {'double': 'd',
'int': 'i',
'float': 'f'}
if kind not in kinds:
raise ValueError('You attempted to read an array of an unknown type.')
raw_data = read_chunk(fh, chunk_location)
if raw_data is None:
return None
return array.array(kinds[kind], raw_data)
def _parse_unsigned_char(data):
return struct.unpack("B", data.read(1))[0]
def _parse_unsigned_int(data):
return struct.unpack("I", data.read(4))[0]
def _parse_unsigned_long(data):
return struct.unpack("Q", data.read(8))[0]
def _parse_double(data):
return struct.unpack("d", data.read(8))[0]
def _parse_string(data):
value = data.read(2)
while not value.endswith(six.b("\x00\x00")):
# the string ends at the first instance of \x00\x00
value += data.read(2)
return value.decode("utf16")[:-1].encode("utf8")
def _parse_char_array(data):
array_length = struct.unpack("Q", data.read(8))[0]
return array.array("B", data.read(array_length))
def _parse_metadata_item(data, cursor_position):
"""
Reads hierarchical data, analogous to a Python dict.
"""
new_count, length = struct.unpack("<IQ", data.read(12))
length -= data.tell() - cursor_position
next_data_length = data.read(length)
value = read_metadata(next_data_length, new_count)
# Skip some offsets
data.read(new_count * 8)
return value
def _get_value(data, data_type, cursor_position):
"""
ND2s use various codes to indicate different data types, which we translate here.
"""
parser = {1: _parse_unsigned_char,
2: _parse_unsigned_int,
3: _parse_unsigned_int,
5: _parse_unsigned_long,
6: _parse_double,
8: _parse_string,
9: _parse_char_array,
11: _parse_metadata_item}
return parser[data_type](data) if data_type < 11 else parser[data_type](data, cursor_position)
def read_metadata(data, count):
"""
Iterates over each element some section of the metadata and parses it.
"""
if data is None:
return None
data = six.BytesIO(data)
metadata = {}
for _ in range(count):
cursor_position = data.tell()
header = data.read(2)
if not header:
# We've reached the end of some hierarchy of data
break
if six.PY3:
header = header.decode("utf8")
data_type, name_length = map(ord, header)
name = data.read(name_length * 2).decode("utf16")[:-1].encode("utf8")
value = _get_value(data, data_type, cursor_position)
if name not in metadata.keys():
metadata[name] = value
else:
if not isinstance(metadata[name], list):
# We have encountered this key exactly once before. Since we're seeing it again, we know we
# need to convert it to a list before proceeding.
metadata[name] = [metadata[name]]
# We've encountered this key before so we're guaranteed to be dealing with a list. Thus we append
# the value to the already-existing list.
metadata[name].append(value)
return metadata

+ 112
- 49
nd2reader/driver/v3.py View File

@ -3,58 +3,137 @@
import array
import numpy as np
import struct
import six
from nd2reader.model.image import Image
from nd2reader.common.v3 import read_chunk
from nd2reader.exc import NoImageError
class V3Driver(object):
"""
Accesses images from ND2 files made with NIS Elements 4.x. Confusingly, files of this type have a version number of 3.0+.
"""
def __init__(self, metadata, label_map, file_handle):
"""
:param metadata: a Metadata object
:param label_map: a raw dictionary of pointers to image locations
:param file_handle: an open file handle to the ND2
"""
self._metadata = metadata
self._label_map = label_map
self._file_handle = file_handle
def _calculate_field_of_view(self, frame_number):
def calculate_image_properties(self, index):
field_of_view = self._calculate_field_of_view(index)
channel = self._calculate_channel(index)
z_level = self._calculate_z_level(index)
return field_of_view, channel, z_level
def get_image(self, index):
"""
Creates an Image object and adds its metadata, based on the index (which is simply the order in which the image was acquired). May return None if the ND2 contains
multiple channels and not all were taken in each cycle (for example, if you take bright field images every minute, and GFP images every five minutes, there will be some
indexes that do not contain an image. The reason for this is complicated, but suffice it to say that we hope to eliminate this possibility in future releases. For now,
you'll need to check if your image is None if you're doing anything out of the ordinary.
:type index: int
:rtype: Image or None
"""
field_of_view, channel, z_level = self.calculate_image_properties(index)
channel_offset = index % len(self._metadata.channels)
image_group_number = int(index / len(self._metadata.channels))
frame_number = self._calculate_frame_number(image_group_number, field_of_view, z_level)
try:
timestamp, image = self._get_raw_image_data(image_group_number, channel_offset, self._metadata.height, self._metadata.width)
except NoImageError:
return None
else:
image.add_params(index, timestamp, frame_number, field_of_view, channel, z_level)
return image
def get_image_by_attributes(self, frame_number, field_of_view, channel_name, z_level, height, width):
"""
Attempts to get Image based on attributes alone.
:type frame_number: int
:type field_of_view: int
:type channel_name: str
:type z_level: int
:type height: int
:type width: int
:rtype: Image or None
"""
image_group_number = self._calculate_image_group_number(frame_number, field_of_view, z_level)
try:
timestamp, raw_image_data = self._get_raw_image_data(image_group_number,
self._channel_offset[channel_name],
height,
width)
image = Image(raw_image_data)
image.add_params(image_group_number, timestamp, frame_number, field_of_view, channel_name, z_level)
except (TypeError, NoImageError):
return None
else:
return image
def _calculate_field_of_view(self, index):
"""
Determines what field of view was being imaged for a given image.
:type index: int
:rtype: int
"""
images_per_cycle = len(self._metadata.z_levels) * len(self._metadata.channels)
return int((frame_number - (frame_number % images_per_cycle)) / images_per_cycle) % len(self._metadata.fields_of_view)
return int((index - (index % images_per_cycle)) / images_per_cycle) % len(self._metadata.fields_of_view)
def _calculate_channel(self, index):
"""
Determines what channel a particular image is.
:type index: int
:rtype: str
def _calculate_channel(self, frame_number):
return self._metadata.channels[frame_number % len(self._metadata.channels)]
"""
return self._metadata.channels[index % len(self._metadata.channels)]
def _calculate_z_level(self, index):
"""
Determines the plane in the z-axis a given image was taken in. In the future, this will be replaced with the actual offset in micrometers.
def _calculate_z_level(self, frame_number):
return self._metadata.z_levels[int(((frame_number - (frame_number % len(self._metadata.channels))) / len(self._metadata.channels)) % len(self._metadata.z_levels))]
:type index: int
:rtype: int
"""
return self._metadata.z_levels[int(((index - (index % len(self._metadata.channels))) / len(self._metadata.channels)) % len(self._metadata.z_levels))]
def _calculate_image_group_number(self, time_index, fov, z_level):
def _calculate_image_group_number(self, frame_number, fov, z_level):
"""
Images are grouped together if they share the same time index, field of view, and z-level.
:type time_index: int
:type frame_number: int
:type fov: int
:type z_level: int
:rtype: int
"""
return time_index * len(self._metadata.fields_of_view) * len(self._metadata.z_levels) + (fov * len(self._metadata.z_levels) + z_level)
return frame_number * len(self._metadata.fields_of_view) * len(self._metadata.z_levels) + (fov * len(self._metadata.z_levels) + z_level)
def _calculate_frame_number(self, image_group_number, fov, z_level):
return (image_group_number - (fov * len(self._metadata.z_levels) + z_level)) / (len(self._metadata.fields_of_view) * len(self._metadata.z_levels))
def _calculate_frame_number(self, image_group_number, field_of_view, z_level):
"""
Images are in the same frame if they share the same group number and field of view and are taken sequentially.
def get_image(self, index):
channel_offset = index % len(self._metadata.channels)
fov = self._calculate_field_of_view(index)
channel = self._calculate_channel(index)
z_level = self._calculate_z_level(index)
image_group_number = int(index / len(self._metadata.channels))
frame_number = self._calculate_frame_number(image_group_number, fov, z_level)
try:
timestamp, image = self._get_raw_image_data(image_group_number, channel_offset, self._metadata.height, self._metadata.width)
except NoImageError:
return None
else:
image.add_params(timestamp, frame_number, fov, channel, z_level)
return image
:type image_group_number: int
:type field_of_view: int
:type z_level: int
:rtype: int
"""
return (image_group_number - (field_of_view * len(self._metadata.z_levels) + z_level)) / (len(self._metadata.fields_of_view) * len(self._metadata.z_levels))
@property
def _channel_offset(self):
@ -65,24 +144,22 @@ class V3Driver(object):
:rtype: dict
"""
channel_offset = {}
for n, channel in enumerate(self._metadata.channels):
channel_offset[channel] = n
return channel_offset
return {channel: n for n, channel in enumerate(self._metadata.channels)}
def _get_raw_image_data(self, image_group_number, channel_offset, height, width):
"""
Reads the raw bytes and the timestamp of an image.
:param image_group_number: groups are made of images with the same time index, field of view and z-level.
:param image_group_number: groups are made of images with the same time index, field of view and z-level
:type image_group_number: int
:param channel_offset: the offset in the array where the bytes for this image are found.
:param channel_offset: the offset in the array where the bytes for this image are found
:type channel_offset: int
:return: (int, array.array()) or None
:rtype: (int, Image)
:raises: NoImageError
"""
chunk = self._label_map[six.b("ImageDataSeq|%d!" % image_group_number)]
chunk = self._label_map.get_image_data_location(image_group_number)
data = read_chunk(self._file_handle, chunk)
# All images in the same image group share the same timestamp! So if you have complicated image data,
# your timestamps may not be entirely accurate. Practically speaking though, they'll only be off by a few
@ -101,17 +178,3 @@ class V3Driver(object):
if np.any(image_data):
return timestamp, Image(image_data)
raise NoImageError
def get_image_by_attributes(self, frame_number, field_of_view, channel_name, z_level, height, width):
image_group_number = self._calculate_image_group_number(frame_number, field_of_view, z_level)
try:
timestamp, raw_image_data = self._get_raw_image_data(image_group_number,
self._channel_offset[channel_name],
height,
width)
image = Image(raw_image_data)
image.add_params(timestamp, frame_number, field_of_view, channel_name, z_level)
except (TypeError, NoImageError):
return None
else:
return image

+ 0
- 165
nd2reader/interface.py View File

@ -1,165 +0,0 @@
# -*- coding: utf-8 -*-
from nd2reader.model import ImageGroup
from nd2reader.parser import get_parser
from nd2reader.version import get_version
import warnings
class Nd2(object):
"""
Allows easy access to NIS Elements .nd2 image files.
"""
def __init__(self, filename):
self._filename = filename
self._fh = open(filename, "rb")
major_version, minor_version = get_version(self._fh)
parser = get_parser(self._fh, major_version, minor_version)
self._driver = parser.driver
self._metadata = parser.metadata
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self._fh is not None:
self._fh.close()
def __repr__(self):
return "\n".join(["<ND2 %s>" % self._filename,
"Created: %s" % (self.date if self.date is not None else "Unknown"),
"Image size: %sx%s (HxW)" % (self.height, self.width),
"Frames: %s" % len(self.frames),
"Channels: %s" % ", ".join(["'%s'" % str(channel) for channel in self.channels]),
"Fields of View: %s" % len(self.fields_of_view),
"Z-Levels: %s" % len(self.z_levels)
])
def __len__(self):
"""
This should be the total number of images in the ND2, but it may be inaccurate. If the ND2 contains a
different number of images in a cycle (i.e. there are "gap" images) it will be higher than reality.
:rtype: int
"""
return self._metadata.total_images_per_channel * len(self.channels)
def __getitem__(self, item):
"""
Allows slicing ND2s.
:type item: int or slice
:rtype: nd2reader.model.Image() or generator
"""
if isinstance(item, int):
try:
image = self._driver.get_image(item)
except KeyError:
raise IndexError
else:
return image
elif isinstance(item, slice):
return self._slice(item.start, item.stop, item.step)
raise IndexError
def _slice(self, start, stop, step):
"""
Allows for iteration over a selection of the entire dataset.
:type start: int
:type stop: int
:type step: int
:rtype: nd2reader.model.Image()
"""
start = start if start is not None else 0
step = step if step is not None else 1
stop = stop if stop is not None else len(self)
# This weird thing with the step allows you to iterate backwards over the images
for i in range(start, stop)[::step]:
yield self[i]
@property
def image_sets(self):
"""
Iterates over groups of related images. This is useful if your ND2 contains multiple fields of view.
A typical use case might be that you have, say, four areas of interest that you're monitoring, and every
minute you take a bright field and GFP image of each one. For each cycle, this method would produce four
ImageSet objects, each containing one bright field and one GFP image.
:return: model.ImageSet()
"""
warnings.warn("Nd2.image_sets will be removed from the nd2reader library in the near future.",
DeprecationWarning)
for frame in self.frames:
image_group = ImageGroup()
for fov in self.fields_of_view:
for channel_name in self.channels:
for z_level in self.z_levels:
image = self.get_image(frame, fov, channel_name, z_level)
if image is not None:
image_group.add(image)
yield image_group
@property
def date(self):
return self._metadata.date
@property
def z_levels(self):
return self._metadata.z_levels
@property
def fields_of_view(self):
return self._metadata.fields_of_view
@property
def channels(self):
return self._metadata.channels
@property
def frames(self):
return self._metadata.frames
@property
def height(self):
"""
:return: height of each image, in pixels
:rtype: int
"""
return self._metadata.height
@property
def width(self):
"""
:return: width of each image, in pixels
:rtype: int
"""
return self._metadata.width
def get_image(self, frame_number, field_of_view, channel_name, z_level):
"""
Returns an Image if data exists for the given parameters, otherwise returns None.
:type frame_number: int
:param field_of_view: the label for the place in the XY-plane where this image was taken.
:type field_of_view: int
:param channel_name: the name of the color of this image
:type channel_name: str
:param z_level: the label for the location in the Z-plane where this image was taken.
:type z_level: int
:rtype: nd2reader.model.Image()
"""
return self._driver.get_image_by_attributes(frame_number, field_of_view, channel_name, z_level, self.height, self.width)
def close(self):
self._fh.close()

+ 232
- 0
nd2reader/main.py View File

@ -0,0 +1,232 @@
# -*- coding: utf-8 -*-
from nd2reader.parser import get_parser
from nd2reader.version import get_version
import six
class Nd2(object):
""" Allows easy access to NIS Elements .nd2 image files. """
def __init__(self, filename):
self._filename = filename
self._fh = open(filename, "rb")
major_version, minor_version = get_version(self._fh)
self._parser = get_parser(self._fh, major_version, minor_version)
self._metadata = self._parser.metadata
def __repr__(self):
return "\n".join(["<ND2 %s>" % self._filename,
"Created: %s" % (self.date if self.date is not None else "Unknown"),
"Image size: %sx%s (HxW)" % (self.height, self.width),
"Frames: %s" % len(self.frames),
"Channels: %s" % ", ".join(["%s" % str(channel) for channel in self.channels]),
"Fields of View: %s" % len(self.fields_of_view),
"Z-Levels: %s" % len(self.z_levels)
])
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self._fh is not None:
self._fh.close()
def __len__(self):
"""
This should be the total number of images in the ND2, but it may be inaccurate. If the ND2 contains a
different number of images in a cycle (i.e. there are "gap" images) it will be higher than reality.
:rtype: int
"""
return self._metadata.total_images_per_channel * len(self.channels)
def __getitem__(self, item):
"""
Allows slicing ND2s.
:type item: int or slice
:rtype: nd2reader.model.Image() or generator
"""
if isinstance(item, int):
try:
image = self._parser.driver.get_image(item)
except KeyError:
raise IndexError
else:
return image
elif isinstance(item, slice):
return self._slice(item.start, item.stop, item.step)
raise IndexError
def select(self, fields_of_view=None, channels=None, z_levels=None):
"""
Iterates over images matching the given criteria. This can be 2-10 times faster than manually iterating over
the Nd2 and checking the attributes of each image, as this method skips disk reads for any images that don't
meet the criteria.
:type fields_of_view: int or tuple or list
:type channels: str or tuple or list
:type z_levels: int or tuple or list
"""
fields_of_view = self._to_tuple(fields_of_view, self.fields_of_view)
channels = self._to_tuple(channels, self.channels)
z_levels = self._to_tuple(z_levels, self.z_levels)
for frame in self.frames:
field_of_view, channel, z_level = self._parser.driver.calculate_image_properties(frame)
if field_of_view in fields_of_view and channel in channels and z_level in z_levels:
image = self._parser.driver.get_image(frame)
if image is not None:
yield image
@property
def height(self):
"""
The height of each image in pixels.
:rtype: int
"""
return self._metadata.height
@property
def width(self):
"""
The width of each image in pixels.
:rtype: int
"""
return self._metadata.width
@property
def z_levels(self):
"""
A list of integers that represent the different levels on the Z-axis that images were taken. Currently this is
just a list of numbers from 0 to N. For example, an ND2 where images were taken at -3µm, 0µm, and +5µm from a
set position would be represented by 0, 1 and 2, respectively. ND2s do store the actual offset of each image
in micrometers and in the future this will hopefully be available. For now, however, you will have to match up
the order yourself.
:return: list of int
"""
return self._metadata.z_levels
@property
def fields_of_view(self):
"""
A list of integers representing the various stage locations, in the order they were taken in the first round
of acquisition.
:return: list of int
"""
return self._metadata.fields_of_view
@property
def channels(self):
"""
A list of channel (i.e. wavelength) names. These are set by the user in NIS Elements.
:return: list of str
"""
return self._metadata.channels
@property
def frames(self):
"""
A list of integers representing groups of images. ND2s consider images to be part of the same frame if they
are in the same field of view and don't have the same channel. So if you take a bright field and GFP image at
four different fields of view over and over again, you'll have 8 images and 4 frames per cycle.
:return: list of int
"""
return self._metadata.frames
@property
def camera_settings(self):
"""
Basic information about the physical cameras used.
:return: dict of {channel_name: model.metadata.CameraSettings}
"""
return self._parser.camera_metadata
@property
def date(self):
"""
The date and time that the acquisition began. Not guaranteed to have been recorded.
:rtype: datetime.datetime() or None
"""
return self._metadata.date
def get_image(self, frame_number, field_of_view, channel_name, z_level):
"""
Attempts to return the image with the unique combination of given attributes. None will be returned if a match
is not found.
:type frame_number: int
:param field_of_view: the label for the place in the XY-plane where this image was taken.
:type field_of_view: int
:param channel_name: the name of the color of this image
:type channel_name: str
:param z_level: the label for the location in the Z-plane where this image was taken.
:type z_level: int
:rtype: nd2reader.model.Image() or None
"""
return self._parser.driver.get_image_by_attributes(frame_number,
field_of_view,
channel_name,
z_level,
self.height,
self.width)
def close(self):
"""
Closes the file handle to the image. This actually sometimes will prevent problems so it's good to do this or
use Nd2 as a context manager.
"""
self._fh.close()
def _slice(self, start, stop, step):
"""
Allows for iteration over a selection of the entire dataset.
:type start: int
:type stop: int
:type step: int
:rtype: nd2reader.model.Image()
"""
start = start if start is not None else 0
step = step if step is not None else 1
stop = stop if stop is not None else len(self)
# This weird thing with the step allows you to iterate backwards over the images
for i in range(start, stop)[::step]:
yield self[i]
def _to_tuple(self, value, default):
"""
Idempotently converts a value to a tuple. This allows users to pass in scalar values and iterables to
select(), which is more ergonomic than having to remember to pass in single-member lists
:type value: int or str or tuple or list
:type default: tuple or list
:rtype: tuple
"""
value = default if value is None else value
return (value,) if isinstance(value, int) or isinstance(value, six.string_types) else tuple(value)

+ 0
- 1
nd2reader/model/__init__.py View File

@ -1,2 +1 @@
from nd2reader.model.image import Image
from nd2reader.model.group import ImageGroup

+ 0
- 37
nd2reader/model/group.py View File

@ -1,37 +0,0 @@
import collections
class ImageGroup(object):
"""
A group of images that were taken at roughly the same time and in the same field of view.
"""
def __init__(self):
self._images = collections.defaultdict(dict)
def __len__(self):
""" The number of images in the image set. """
return sum([len(channel) for channel in self._images.values()])
def __repr__(self):
return "\n".join(["<ND2 Image Group>",
"Image count: %s" % len(self)])
def get(self, channel, z_level=0):
"""
Retrieve an image with a given channel and z-level. For most users, z_level will always be 0.
:type channel: str
:type z_level: int
"""
return self._images.get(channel).get(z_level)
def add(self, image):
"""
Stores an image.
:type image: nd2reader.model.Image()
"""
self._images[image.channel][image.z_level] = image

+ 39
- 16
nd2reader/model/image.py View File

@ -1,24 +1,28 @@
# -*- coding: utf-8 -*-
import numpy as np
import warnings
class Image(np.ndarray):
"""
Holds the raw pixel data of an image and provides access to some metadata.
"""
def __new__(cls, array):
return np.asarray(array).view(cls)
def __init__(self, array):
self._index = None
self._timestamp = None
self._frame_number = None
self._field_of_view = None
self._channel = None
self._z_level = None
def add_params(self, timestamp, frame_number, field_of_view, channel, z_level):
def add_params(self, index, timestamp, frame_number, field_of_view, channel, z_level):
"""
A wrapper around the raw pixel data of an image.
:param index: The integer that can be used to directly index this image
:type index: int
:param timestamp: The number of milliseconds after the beginning of the acquisition that this image was taken.
:type timestamp: float
:param frame_number: The order in which this image was taken, with images of different channels/z-levels
@ -32,26 +36,43 @@ class Image(np.ndarray):
:type z_level: int
"""
self._index = index
self._timestamp = timestamp
self._frame_number = int(frame_number)
self._field_of_view = field_of_view
self._channel = channel
self._z_level = z_level
@property
def index(self):
return self._index
@property
def height(self):
"""
The height in pixels.
:rtype: int
"""
return self.shape[0]
@property
def width(self):
"""
The width in pixels.
:rtype: int
"""
return self.shape[1]
@property
def field_of_view(self):
"""
Which of the fixed locations this image was taken at.
The index of the stage location where this image was acquired.
:rtype int:
:rtype: int
"""
return self._field_of_view
@ -61,16 +82,23 @@ class Image(np.ndarray):
"""
The number of seconds after the beginning of the acquisition that the image was taken. Note that for a given
field of view and z-level offset, if you have images of multiple channels, they will all be given the same
timestamp. No, this doesn't make much sense. But that's how ND2s are structured, so if your experiment depends
on millisecond accuracy, you need to find an alternative imaging system.
timestamp. That's just how ND2s are structured, so if your experiment depends on millisecond accuracy,
you need to find an alternative imaging system.
:rtype float:
:rtype: float
"""
# data is actually stored in milliseconds
return self._timestamp / 1000.0
@property
def frame_number(self):
"""
The index of the group of images taken sequentially that all have the same group number and field of view.
:rtype: int
"""
return self._frame_number
@property
@ -78,7 +106,7 @@ class Image(np.ndarray):
"""
The name of the filter used to acquire this image. These are user-supplied in NIS Elements.
:rtype str:
:rtype: str
"""
return self._channel
@ -95,12 +123,7 @@ class Image(np.ndarray):
0 µm: 1
+3 µm: 2
:rtype int:
:rtype: int
"""
return self._z_level
@property
def data(self):
warnings.warn("Image objects now directly subclass Numpy arrays, so using the data attribute will be removed in the near future.", DeprecationWarning)
return self

+ 125
- 0
nd2reader/model/label.py View File

@ -0,0 +1,125 @@
import six
import struct
import re
class LabelMap(object):
"""
Contains pointers to metadata. This might only be valid for V3 files.
"""
def __init__(self, raw_binary_data):
self._data = raw_binary_data
self._image_data = {}
def image_attributes(self):
return self._get_location(six.b("ImageAttributesLV!"))
def _get_location(self, label):
try:
label_location = self._data.index(label) + len(label)
return self._parse_data_location(label_location)
except ValueError:
return None
def _parse_data_location(self, label_location):
location, length = struct.unpack("QQ", self._data[label_location: label_location + 16])
return location
@property
def image_text_info(self):
return self._get_location(six.b("ImageTextInfoLV!"))
@property
def image_metadata(self):
return self._get_location(six.b("ImageMetadataLV!"))
@property
def image_metadata_sequence(self):
# there is always only one of these, even though it has a pipe followed by a zero, which is how they do indexes
return self._get_location(six.b("ImageMetadataSeqLV|0!"))
def get_image_data_location(self, index):
if not self._image_data:
regex = re.compile(six.b("""ImageDataSeq\|(\d+)!"""))
for match in regex.finditer(self._data):
if match:
location = self._parse_data_location(match.end())
self._image_data[int(match.group(1))] = location
return self._image_data[index]
@property
def image_calibration(self):
return self._get_location(six.b("ImageCalibrationLV|0!"))
@property
def image_attributes(self):
return self._get_location(six.b("ImageAttributesLV!"))
@property
def x_data(self):
return self._get_location(six.b("CustomData|X!"))
@property
def y_data(self):
return self._get_location(six.b("CustomData|Y!"))
@property
def z_data(self):
return self._get_location(six.b("CustomData|Z!"))
@property
def roi_metadata(self):
return self._get_location(six.b("CustomData|RoiMetadata_v1!"))
@property
def pfs_status(self):
return self._get_location(six.b("CustomData|PFS_STATUS!"))
@property
def pfs_offset(self):
return self._get_location(six.b("CustomData|PFS_OFFSET!"))
@property
def guid(self):
return self._get_location(six.b("CustomData|GUIDStore!"))
@property
def description(self):
return self._get_location(six.b("CustomData|CustomDescriptionV1_0!"))
@property
def camera_exposure_time(self):
return self._get_location(six.b("CustomData|Camera_ExposureTime1!"))
@property
def camera_temp(self):
return self._get_location(six.b("CustomData|CameraTemp1!"))
@property
def acquisition_times(self):
return self._get_location(six.b("CustomData|AcqTimesCache!"))
@property
def acquisition_times_2(self):
return self._get_location(six.b("CustomData|AcqTimes2Cache!"))
@property
def acquisition_frames(self):
return self._get_location(six.b("CustomData|AcqFramesCache!"))
@property
def lut_data(self):
return self._get_location(six.b("CustomDataVar|LUTDataV1_0!"))
@property
def grabber_settings(self):
return self._get_location(six.b("CustomDataVar|GrabberCameraSettingsV1_0!"))
@property
def custom_data(self):
return self._get_location(six.b("CustomDataVar|CustomDataV2_0!"))
@property
def app_info(self):
return self._get_location(six.b("CustomDataVar|AppInfo_V1_0!"))

+ 30
- 0
nd2reader/model/metadata.py View File

@ -1,3 +1,6 @@
import six
class Metadata(object):
""" A simple container for ND2 metadata. """
def __init__(self, height, width, channels, date, fields_of_view, frames, z_levels, total_images_per_channel):
@ -86,4 +89,31 @@ class Metadata(object):
@property
def total_images_per_channel(self):
"""
The total number of images of a particular channel (wavelength, filter, etc) in the entire ND2.
:rtype: int
"""
return self._total_images_per_channel
class CameraSettings(object):
""" Contains some basic information about a physical camera and its settings. """
def __init__(self, name, id, exposure, x_binning, y_binning, channel_name):
self.name = name.decode("utf8")
self.id = id.decode("utf8")
self.exposure = exposure
self.x_binning = int(x_binning)
self.y_binning = int(y_binning)
self.channel_name = channel_name
if six.PY3:
self.channel_name = self.channel_name.decode("utf8") if channel_name is not None else None
def __repr__(self):
return "\n".join(["<Camera Settings: %s>" % self.channel_name,
"Camera: %s" % self.name,
"Camera ID: %s" % self.id,
"Exposure Time (ms): %s" % self.exposure,
"Binning: %sx%s" % (self.x_binning, self.y_binning)
])

+ 8
- 3
nd2reader/parser/base.py View File

@ -2,10 +2,15 @@ from abc import abstractproperty
class BaseParser(object):
@abstractproperty
def metadata(self):
raise NotImplementedError
def __init__(self, fh):
self._fh = fh
self.camera_metadata = None
self.metadata = None
@abstractproperty
def driver(self):
"""
Must return an object that can look up and read images.
"""
raise NotImplementedError

+ 10
- 0
nd2reader/parser/parser.py View File

@ -3,6 +3,16 @@ from nd2reader.exc import InvalidVersionError
def get_parser(fh, major_version, minor_version):
"""
Picks the appropriate parser based on the ND2 version.
:type fh: file
:type major_version: int
:type minor_version: int
:rtype: a parser object
"""
parsers = {(3, None): V3Parser}
parser = parsers.get((major_version, minor_version)) or parsers.get((major_version, None))
if not parser:


+ 210
- 147
nd2reader/parser/v3.py View File

@ -1,68 +1,210 @@
# -*- coding: utf-8 -*-
import array
from datetime import datetime
from nd2reader.model.metadata import Metadata
from nd2reader.model.metadata import Metadata, CameraSettings
from nd2reader.model.label import LabelMap
from nd2reader.parser.base import BaseParser
from nd2reader.driver.v3 import V3Driver
from nd2reader.common.v3 import read_chunk
from nd2reader.common.v3 import read_chunk, read_array, read_metadata
import re
import six
import struct
import xmltodict
def ignore_missing(func):
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except:
return None
return wrapper
class V3RawMetadata(object):
def __init__(self, fh, label_map):
self._fh = fh
self._label_map = label_map
@property
@ignore_missing
def image_text_info(self):
return read_metadata(read_chunk(self._fh, self._label_map.image_text_info), 1)
@property
@ignore_missing
def image_metadata_sequence(self):
return read_metadata(read_chunk(self._fh, self._label_map.image_metadata_sequence), 1)
@property
@ignore_missing
def image_calibration(self):
return read_metadata(read_chunk(self._fh, self._label_map.image_calibration), 1)
@property
@ignore_missing
def image_attributes(self):
return read_metadata(read_chunk(self._fh, self._label_map.image_attributes), 1)
@property
@ignore_missing
def x_data(self):
return read_array(self._fh, 'double', self._label_map.x_data)
@property
@ignore_missing
def y_data(self):
return read_array(self._fh, 'double', self._label_map.y_data)
@property
@ignore_missing
def z_data(self):
return read_array(self._fh, 'double', self._label_map.z_data)
@property
@ignore_missing
def roi_metadata(self):
return read_metadata(read_chunk(self._fh, self._label_map.roi_metadata), 1)
@property
@ignore_missing
def pfs_status(self):
return read_array(self._fh, 'int', self._label_map.pfs_status)
@property
@ignore_missing
def pfs_offset(self):
return read_array(self._fh, 'int', self._label_map.pfs_offset)
@property
@ignore_missing
def camera_exposure_time(self):
return read_array(self._fh, 'double', self._label_map.camera_exposure_time)
@property
@ignore_missing
def lut_data(self):
return xmltodict.parse(read_chunk(self._fh, self._label_map.lut_data))
@property
@ignore_missing
def grabber_settings(self):
return xmltodict.parse(read_chunk(self._fh, self._label_map.grabber_settings))
@property
@ignore_missing
def custom_data(self):
return xmltodict.parse(read_chunk(self._fh, self._label_map.custom_data))
@property
@ignore_missing
def app_info(self):
return xmltodict.parse(read_chunk(self._fh, self._label_map.app_info))
@property
@ignore_missing
def camera_temp(self):
camera_temp = read_array(self._fh, 'double', self._label_map.camera_temp)
if camera_temp:
for temp in map(lambda x: round(x * 100.0, 2), camera_temp):
yield temp
@property
@ignore_missing
def acquisition_times(self):
acquisition_times = read_array(self._fh, 'double', self._label_map.acquisition_times)
if acquisition_times:
for acquisition_time in map(lambda x: x / 1000.0, acquisition_times):
yield acquisition_time
@property
@ignore_missing
def image_metadata(self):
if self._label_map.image_metadata:
return read_metadata(read_chunk(self._fh, self._label_map.image_metadata), 1)
class V3Parser(BaseParser):
""" Parses ND2 files and creates a Metadata and ImageReader object. """
""" Parses ND2 files and creates a Metadata and driver object. """
CHUNK_HEADER = 0xabeceda
CHUNK_MAP_START = six.b("ND2 FILEMAP SIGNATURE NAME 0001!")
CHUNK_MAP_END = six.b("ND2 CHUNK MAP SIGNATURE 0000001!")
def __init__(self, fh):
self._fh = fh
self._metadata = None
self._label_map = None
"""
:type fh: file
@property
def metadata(self):
if not self._metadata:
self._parse_metadata()
return self._metadata
"""
if six.PY3:
super().__init__(fh)
else:
super(V3Parser, self).__init__(fh)
self._label_map = self._build_label_map()
self.raw_metadata = V3RawMetadata(self._fh, self._label_map)
self._parse_camera_metadata()
self._parse_metadata()
@property
def driver(self):
"""
Provides an object that knows how to look up and read images based on an index.
"""
return V3Driver(self.metadata, self._label_map, self._fh)
def _parse_camera_metadata(self):
"""
Gets parsed data about the physical cameras used to produce images and throws them in a dictionary.
"""
self.camera_metadata = {}
for camera_setting in self._parse_camera_settings():
self.camera_metadata[camera_setting.channel_name] = camera_setting
def _parse_metadata(self):
"""
Reads all metadata.
Reads all metadata and instantiates the Metadata object.
"""
metadata_dict = {}
self._label_map = self._build_label_map()
for label in self._label_map.keys():
if label.endswith(six.b("LV!")) or six.b("LV|") in label:
data = read_chunk(self._fh, self._label_map[label])
stop = label.index(six.b("LV"))
metadata_dict[label[:stop]] = self._read_metadata(data, 1)
height = metadata_dict[six.b('ImageAttributes')][six.b('SLxImageAttributes')][six.b('uiHeight')]
width = metadata_dict[six.b('ImageAttributes')][six.b('SLxImageAttributes')][six.b('uiWidth')]
channels = self._parse_channels(metadata_dict)
date = self._parse_date(metadata_dict)
fields_of_view = self._parse_fields_of_view(metadata_dict)
frames = self._parse_frames(metadata_dict)
z_levels = self._parse_z_levels(metadata_dict)
total_images_per_channel = self._parse_total_images_per_channel(metadata_dict)
self._metadata = Metadata(height, width, channels, date, fields_of_view, frames, z_levels, total_images_per_channel)
def _parse_date(self, metadata_dict):
height = self.raw_metadata.image_attributes[six.b('SLxImageAttributes')][six.b('uiHeight')]
width = self.raw_metadata.image_attributes[six.b('SLxImageAttributes')][six.b('uiWidth')]
date = self._parse_date(self.raw_metadata)
fields_of_view = self._parse_fields_of_view(self.raw_metadata)
frames = self._parse_frames(self.raw_metadata)
z_levels = self._parse_z_levels(self.raw_metadata)
total_images_per_channel = self._parse_total_images_per_channel(self.raw_metadata)
channels = sorted([key for key in self.camera_metadata.keys()])
self.metadata = Metadata(height, width, channels, date, fields_of_view, frames, z_levels, total_images_per_channel)
def _parse_camera_settings(self):
"""
Looks up information in the raw metadata about the camera(s) and puts it into a CameraSettings object.
Duplicate cameras can be returned if the same one was used for multiple channels.
:return:
"""
for camera in self.raw_metadata.image_metadata_sequence[six.b('SLxPictureMetadata')][six.b('sPicturePlanes')][six.b('sSampleSetting')].values():
name = camera[six.b('pCameraSetting')][six.b('CameraUserName')]
id = camera[six.b('pCameraSetting')][six.b('CameraUniqueName')]
exposure = camera[six.b('dExposureTime')]
x_binning = camera[six.b('pCameraSetting')][six.b('FormatFast')][six.b('fmtDesc')][six.b('dBinningX')]
y_binning = camera[six.b('pCameraSetting')][six.b('FormatFast')][six.b('fmtDesc')][six.b('dBinningY')]
optical_configs = camera[six.b('sOpticalConfigs')]
if six.b('') in optical_configs.keys():
channel_name = optical_configs[six.b('')][six.b('sOpticalConfigName')]
else:
channel_name = None
yield CameraSettings(name, id, exposure, x_binning, y_binning, channel_name)
def _parse_date(self, raw_metadata):
"""
The date and time when acquisition began.
:type raw_metadata: V3RawMetadata
:rtype: datetime.datetime() or None
"""
for line in metadata_dict[six.b('ImageTextInfo')][six.b('SLxImageTextInfo')].values():
for line in raw_metadata.image_text_info[six.b('SLxImageTextInfo')].values():
line = line.decode("utf8")
absolute_start_12 = None
absolute_start_24 = None
@ -80,19 +222,20 @@ class V3Parser(BaseParser):
return absolute_start_12 if absolute_start_12 else absolute_start_24
return None
def _parse_channels(self, metadata_dict):
def _parse_channels(self, raw_metadata):
"""
These are labels created by the NIS Elements user. Typically they may a short description of the filter cube
used (e.g. "bright field", "GFP", etc.)
:type raw_metadata: V3RawMetadata
:rtype: list
"""
channels = []
metadata = metadata_dict[six.b('ImageMetadataSeq')][six.b('SLxPictureMetadata')][six.b('sPicturePlanes')]
metadata = raw_metadata.image_metadata_sequence[six.b('SLxPictureMetadata')][six.b('sPicturePlanes')]
try:
validity = metadata_dict[six.b('ImageMetadata')][six.b('SLxExperiment')][six.b('ppNextLevelEx')][six.b('')][0][six.b('ppNextLevelEx')][six.b('')][0][six.b('pItemValid')]
except KeyError:
validity = raw_metadata.image_metadata[six.b('SLxExperiment')][six.b('ppNextLevelEx')][six.b('')][0][six.b('ppNextLevelEx')][six.b('')][0][six.b('pItemValid')]
except (KeyError, TypeError):
# If none of the channels have been deleted, there is no validity list, so we just make one
validity = [True for _ in metadata]
# Channel information is contained in dictionaries with the keys a0, a1...an where the number
@ -104,46 +247,50 @@ class V3Parser(BaseParser):
channels.append(chan[six.b('sDescription')].decode("utf8"))
return channels
def _parse_fields_of_view(self, metadata_dict):
def _parse_fields_of_view(self, raw_metadata):
"""
The metadata contains information about fields of view, but it contains it even if some fields
of view were cropped. We can't find anything that states which fields of view are actually
in the image data, so we have to calculate it. There probably is something somewhere, since
NIS Elements can figure it out, but we haven't found it yet.
:rtype: list
:type raw_metadata: V3RawMetadata
:rtype: list
"""
return self._parse_dimension(r""".*?XY\((\d+)\).*?""", metadata_dict)
return self._parse_dimension(r""".*?XY\((\d+)\).*?""", raw_metadata)
def _parse_frames(self, metadata_dict):
def _parse_frames(self, raw_metadata):
"""
The number of cycles.
:type raw_metadata: V3RawMetadata
:rtype: list
"""
return self._parse_dimension(r""".*?T'?\((\d+)\).*?""", metadata_dict)
return self._parse_dimension(r""".*?T'?\((\d+)\).*?""", raw_metadata)
def _parse_z_levels(self, metadata_dict):
def _parse_z_levels(self, raw_metadata):
"""
The different levels in the Z-plane. Just a sequence from 0 to n.
:rtype: list
:type raw_metadata: V3RawMetadata
:rtype: list
"""
return self._parse_dimension(r""".*?Z\((\d+)\).*?""", metadata_dict)
return self._parse_dimension(r""".*?Z\((\d+)\).*?""", raw_metadata)
def _parse_dimension_text(self, metadata_dict):
def _parse_dimension_text(self, raw_metadata):
"""
While there are metadata values that represent a lot of what we want to capture, they seem to be unreliable.
Sometimes certain elements don't exist, or change their data type randomly. However, the human-readable text
is always there and in the same exact format, so we just parse that instead.
:rtype: str
:type raw_metadata: V3RawMetadata
:rtype: str
"""
for line in metadata_dict[six.b('ImageTextInfo')][six.b('SLxImageTextInfo')].values():
for line in raw_metadata.image_text_info[six.b('SLxImageTextInfo')].values():
if six.b("Dimensions:") in line:
metadata = line
break
@ -157,8 +304,16 @@ class V3Parser(BaseParser):
return six.b("")
return dimension_text
def _parse_dimension(self, pattern, metadata_dict):
dimension_text = self._parse_dimension_text(metadata_dict)
def _parse_dimension(self, pattern, raw_metadata):
"""
:param pattern: a valid regex pattern
:type pattern: str
:type raw_metadata: V3RawMetadata
:rtype: list of int
"""
dimension_text = self._parse_dimension_text(raw_metadata)
if six.PY3:
dimension_text = dimension_text.decode("utf8")
match = re.match(pattern, dimension_text)
@ -167,14 +322,15 @@ class V3Parser(BaseParser):
count = int(match.group(1))
return list(range(count))
def _parse_total_images_per_channel(self, metadata_dict):
def _parse_total_images_per_channel(self, raw_metadata):
"""
The total number of images per channel. Warning: this may be inaccurate as it includes "gap" images.
:type raw_metadata: V3RawMetadata
:rtype: int
"""
return metadata_dict[six.b('ImageAttributes')][six.b('SLxImageAttributes')][six.b('uiSequenceCount')]
return raw_metadata.image_attributes[six.b('SLxImageAttributes')][six.b('uiSequenceCount')]
def _build_label_map(self):
"""
@ -182,104 +338,11 @@ class V3Parser(BaseParser):
as some of the bytes contain the value 33, which is the ASCII code for "!". So we iteratively find each label,
grab the subsequent data (always 16 bytes long), advance to the next label and repeat.
:rtype: dict
:rtype: LabelMap
"""
label_map = {}
self._fh.seek(-8, 2)
chunk_map_start_location = struct.unpack("Q", self._fh.read(8))[0]
self._fh.seek(chunk_map_start_location)
raw_text = self._fh.read(-1)
label_start = raw_text.index(V3Parser.CHUNK_MAP_START) + 32
while True:
data_start = raw_text.index(six.b("!"), label_start) + 1
key = raw_text[label_start: data_start]
location, length = struct.unpack("QQ", raw_text[data_start: data_start + 16])
if key == V3Parser.CHUNK_MAP_END:
# We've reached the end of the chunk map
break
label_map[key] = location
label_start = data_start + 16
return label_map
def _parse_unsigned_char(self, data):
return struct.unpack("B", data.read(1))[0]
def _parse_unsigned_int(self, data):
return struct.unpack("I", data.read(4))[0]
def _parse_unsigned_long(self, data):
return struct.unpack("Q", data.read(8))[0]
def _parse_double(self, data):
return struct.unpack("d", data.read(8))[0]
def _parse_string(self, data):
value = data.read(2)
while not value.endswith(six.b("\x00\x00")):
# the string ends at the first instance of \x00\x00
value += data.read(2)
return value.decode("utf16")[:-1].encode("utf8")
def _parse_char_array(self, data):
array_length = struct.unpack("Q", data.read(8))[0]
return array.array("B", data.read(array_length))
def _parse_metadata_item(self, data):
"""
Reads hierarchical data, analogous to a Python dict.
"""
new_count, length = struct.unpack("<IQ", data.read(12))
length -= data.tell() - self._cursor_position
next_data_length = data.read(length)
value = self._read_metadata(next_data_length, new_count)
# Skip some offsets
data.read(new_count * 8)
return value
def _get_value(self, data, data_type):
"""
ND2s use various codes to indicate different data types, which we translate here.
"""
parser = {1: self._parse_unsigned_char,
2: self._parse_unsigned_int,
3: self._parse_unsigned_int,
5: self._parse_unsigned_long,
6: self._parse_double,
8: self._parse_string,
9: self._parse_char_array,
11: self._parse_metadata_item}
return parser[data_type](data)
def _read_metadata(self, data, count):
"""
Iterates over each element some section of the metadata and parses it.
"""
data = six.BytesIO(data)
metadata = {}
for _ in range(count):
self._cursor_position = data.tell()
header = data.read(2)
if not header:
# We've reached the end of some hierarchy of data
break
if six.PY3:
header = header.decode("utf8")
data_type, name_length = map(ord, header)
name = data.read(name_length * 2).decode("utf16")[:-1].encode("utf8")
value = self._get_value(data, data_type)
if name not in metadata.keys():
metadata[name] = value
else:
if not isinstance(metadata[name], list):
# We have encountered this key exactly once before. Since we're seeing it again, we know we
# need to convert it to a list before proceeding.
metadata[name] = [metadata[name]]
# We've encountered this key before so we're guaranteed to be dealing with a list. Thus we append
# the value to the already-existing list.
metadata[name].append(value)
return metadata
return LabelMap(raw_text)

+ 1
- 0
requirements.txt View File

@ -1,2 +1,3 @@
numpy>=1.9.2
six>=1.4
xmltodict>=0.9.2

+ 3
- 2
setup.py View File

@ -1,13 +1,14 @@
from setuptools import setup
VERSION = "1.1.4"
VERSION = "2.0.0"
setup(
name="nd2reader",
packages=['nd2reader', 'nd2reader.model', 'nd2reader.driver', 'nd2reader.parser', 'nd2reader.common'],
install_requires=[
'numpy>=1.6.2, <2.0',
'six>=1.4, <2.0'
'six>=1.4, <2.0',
'xmltodict>=0.9.2, <1.0'
],
version=VERSION,
description='A tool for reading ND2 files produced by NIS Elements',


tests.py → test.py View File


+ 0
- 183
tests/__init__.py View File

@ -1,183 +0,0 @@
# from nd2reader.parser import Nd2Parser
# import unittest
#
#
# class MockNd2Parser(object):
# def __init__(self, channels, fields_of_view, z_levels):
# self.channels = channels
# self.fields_of_view = fields_of_view
# self.z_levels = z_levels
#
#
# class TestNd2Parser(unittest.TestCase):
# def test_calculate_field_of_view_simple(self):
# """ With a single field of view, the field of view should always be the same number (0). """
# nd2 = MockNd2Parser([''], [0], [0])
# for frame_number in range(1000):
# result = Nd2Parser._calculate_field_of_view(nd2, frame_number)
# self.assertEqual(result, 0)
#
# def test_calculate_field_of_view_two_channels(self):
# nd2 = MockNd2Parser(['', 'GFP'], [0], [0])
# for frame_number in range(1000):
# result = Nd2Parser._calculate_field_of_view(nd2, frame_number)
# self.assertEqual(result, 0)
#
# def test_calculate_field_of_view_three_channels(self):
# nd2 = MockNd2Parser(['', 'GFP', 'dsRed'], [0], [0])
# for frame_number in range(1000):
# result = Nd2Parser._calculate_field_of_view(nd2, frame_number)
# self.assertEqual(result, 0)
#
# def test_calculate_field_of_view_two_fovs(self):
# nd2 = MockNd2Parser([''], [0, 1], [0])
# for frame_number in range(1000):
# result = Nd2Parser._calculate_field_of_view(nd2, frame_number)
# self.assertEqual(result, frame_number % 2)
#
# def test_calculate_field_of_view_two_fovs_two_zlevels(self):
# nd2 = MockNd2Parser([''], [0, 1], [0, 1])
# self.assertEqual(Nd2Parser._calculate_field_of_view(nd2, 0), 0)
# self.assertEqual(Nd2Parser._calculate_field_of_view(nd2, 1), 0)
# self.assertEqual(Nd2Parser._calculate_field_of_view(nd2, 2), 1)
# self.assertEqual(Nd2Parser._calculate_field_of_view(nd2, 3), 1)
# self.assertEqual(Nd2Parser._calculate_field_of_view(nd2, 4), 0)
# self.assertEqual(Nd2Parser._calculate_field_of_view(nd2, 5), 0)
# self.assertEqual(Nd2Parser._calculate_field_of_view(nd2, 6), 1)
# self.assertEqual(Nd2Parser._calculate_field_of_view(nd2, 7), 1)
#
# def test_calculate_field_of_view_two_everything(self):
# nd2 = MockNd2Parser(['', 'GFP'], [0, 1], [0, 1])
# self.assertEqual(Nd2Parser._calculate_field_of_view(nd2, 0), 0)
# self.assertEqual(Nd2Parser._calculate_field_of_view(nd2, 1), 0)
# self.assertEqual(Nd2Parser._calculate_field_of_view(nd2, 2), 0)
# self.assertEqual(Nd2Parser._calculate_field_of_view(nd2, 3), 0)
# self.assertEqual(Nd2Parser._calculate_field_of_view(nd2, 4), 1)
# self.assertEqual(Nd2Parser._calculate_field_of_view(nd2, 5), 1)
# self.assertEqual(Nd2Parser._calculate_field_of_view(nd2, 6), 1)
# self.assertEqual(Nd2Parser._calculate_field_of_view(nd2, 7), 1)
#
# def test_calculate_field_of_view_7c2f2z(self):
# nd2 = MockNd2Parser(['', 'GFP', 'dsRed', 'dTomato', 'lulzBlue', 'jimbotronPurple', 'orange'], [0, 1], [0, 1])
# for i in range(14):
# self.assertEqual(Nd2Parser._calculate_field_of_view(nd2, i), 0)
# for i in range(14, 28):
# self.assertEqual(Nd2Parser._calculate_field_of_view(nd2, i), 1)
# for i in range(28, 42):
# self.assertEqual(Nd2Parser._calculate_field_of_view(nd2, i), 0)
# for i in range(42, 56):
# self.assertEqual(Nd2Parser._calculate_field_of_view(nd2, i), 1)
# for i in range(56, 70):
# self.assertEqual(Nd2Parser._calculate_field_of_view(nd2, i), 0)
# for i in range(70, 84):
# self.assertEqual(Nd2Parser._calculate_field_of_view(nd2, i), 1)
#
# def test_calculate_field_of_view_2c3f5z(self):
# """ All prime numbers to elucidate any errors that won't show up when numbers are multiples of each other """
# nd2 = MockNd2Parser(['', 'GFP'], [0, 1, 2], [0, 1, 2, 3, 4])
# for i in range(10):
# self.assertEqual(Nd2Parser._calculate_field_of_view(nd2, i), 0)
# for i in range(10, 20):
# self.assertEqual(Nd2Parser._calculate_field_of_view(nd2, i), 1)
# for i in range(20, 30):
# self.assertEqual(Nd2Parser._calculate_field_of_view(nd2, i), 2)
# for i in range(30, 40):
# self.assertEqual(Nd2Parser._calculate_field_of_view(nd2, i), 0)
# for i in range(40, 50):
# self.assertEqual(Nd2Parser._calculate_field_of_view(nd2, i), 1)
# for i in range(50, 60):
# self.assertEqual(Nd2Parser._calculate_field_of_view(nd2, i), 2)
#
# def test_calculate_channel_simple(self):
# nd2 = MockNd2Parser(['GFP'], [0], [0])
# for i in range(1000):
# self.assertEqual(Nd2Parser._calculate_channel(nd2, i), 'GFP')
#
# def test_calculate_channel(self):
# nd2 = MockNd2Parser(['', 'GFP', 'dsRed', 'dTomato', 'lulzBlue', 'jimbotronPurple', 'orange'], [0], [0])
# for i in range(1000):
# for n, channel in enumerate(['', 'GFP', 'dsRed', 'dTomato', 'lulzBlue', 'jimbotronPurple', 'orange'], start=i*7):
# self.assertEqual(Nd2Parser._calculate_channel(nd2, n), channel)
#
# def test_calculate_channel_7c2fov1z(self):
# nd2 = MockNd2Parser(['', 'GFP', 'dsRed', 'dTomato', 'lulzBlue', 'jimbotronPurple', 'orange'], [0, 1], [0])
# for i in range(1000):
# for n, channel in enumerate(['', 'GFP', 'dsRed', 'dTomato', 'lulzBlue', 'jimbotronPurple', 'orange'], start=i*7):
# self.assertEqual(Nd2Parser._calculate_channel(nd2, n), channel)
#
# def test_calculate_channel_ludicrous_values(self):
# nd2 = MockNd2Parser(['', 'GFP', 'dsRed', 'dTomato', 'lulzBlue', 'jimbotronPurple', 'orange'], list(range(31)), list(range(17)))
# for i in range(10000):
# for n, channel in enumerate(['', 'GFP', 'dsRed', 'dTomato', 'lulzBlue', 'jimbotronPurple', 'orange'], start=i*7):
# self.assertEqual(Nd2Parser._calculate_channel(nd2, n), channel)
#
# def test_calculate_z_level(self):
# nd2 = MockNd2Parser([''], [0], [0])
# for frame_number in range(1000):
# result = Nd2Parser._calculate_z_level(nd2, frame_number)
# self.assertEqual(result, 0)
#
# def test_calculate_z_level_1c1f2z(self):
# nd2 = MockNd2Parser([''], [0], [0, 1])
# for frame_number in range(1000):
# result = Nd2Parser._calculate_z_level(nd2, frame_number)
# self.assertEqual(result, frame_number % 2)
#
# def test_calculate_z_level_31c17f1z(self):
# nd2 = MockNd2Parser(list(range(31)), list(range(17)), [0])
# for frame_number in range(1000):
# result = Nd2Parser._calculate_z_level(nd2, frame_number)
# self.assertEqual(result, 0)
#
# def test_calculate_z_level_2c1f2z(self):
# nd2 = MockNd2Parser(['', 'GFP'], [0], [0, 1])
# self.assertEqual(Nd2Parser._calculate_z_level(nd2, 0), 0)
# self.assertEqual(Nd2Parser._calculate_z_level(nd2, 1), 0)
# self.assertEqual(Nd2Parser._calculate_z_level(nd2, 2), 1)
# self.assertEqual(Nd2Parser._calculate_z_level(nd2, 3), 1)
# self.assertEqual(Nd2Parser._calculate_z_level(nd2, 4), 0)
# self.assertEqual(Nd2Parser._calculate_z_level(nd2, 5), 0)
# self.assertEqual(Nd2Parser._calculate_z_level(nd2, 6), 1)
# self.assertEqual(Nd2Parser._calculate_z_level(nd2, 7), 1)
# self.assertEqual(Nd2Parser._calculate_z_level(nd2, 8), 0)
# self.assertEqual(Nd2Parser._calculate_z_level(nd2, 9), 0)
# self.assertEqual(Nd2Parser._calculate_z_level(nd2, 10), 1)
# self.assertEqual(Nd2Parser._calculate_z_level(nd2, 11), 1)
# self.assertEqual(Nd2Parser._calculate_z_level(nd2, 12), 0)
# self.assertEqual(Nd2Parser._calculate_z_level(nd2, 13), 0)
# self.assertEqual(Nd2Parser._calculate_z_level(nd2, 14), 1)
# self.assertEqual(Nd2Parser._calculate_z_level(nd2, 15), 1)
#
# def test_calculate_z_level_2c3f5z(self):
# nd2 = MockNd2Parser(['', 'GFP'], [0, 1, 2], [0, 1, 2, 3, 4])
# self.assertEqual(Nd2Parser._calculate_z_level(nd2, 0), 0)
# self.assertEqual(Nd2Parser._calculate_z_level(nd2, 1), 0)
# self.assertEqual(Nd2Parser._calculate_z_level(nd2, 2), 1)
# self.assertEqual(Nd2Parser._calculate_z_level(nd2, 3), 1)
# self.assertEqual(Nd2Parser._calculate_z_level(nd2, 4), 2)
# self.assertEqual(Nd2Parser._calculate_z_level(nd2, 5), 2)
# self.assertEqual(Nd2Parser._calculate_z_level(nd2, 6), 3)
# self.assertEqual(Nd2Parser._calculate_z_level(nd2, 7), 3)
# self.assertEqual(Nd2Parser._calculate_z_level(nd2, 8), 4)
# self.assertEqual(Nd2Parser._calculate_z_level(nd2, 9), 4)
# self.assertEqual(Nd2Parser._calculate_z_level(nd2, 10), 0)
# self.assertEqual(Nd2Parser._calculate_z_level(nd2, 11), 0)
# self.assertEqual(Nd2Parser._calculate_z_level(nd2, 12), 1)
# self.assertEqual(Nd2Parser._calculate_z_level(nd2, 13), 1)
# self.assertEqual(Nd2Parser._calculate_z_level(nd2, 14), 2)
# self.assertEqual(Nd2Parser._calculate_z_level(nd2, 15), 2)
# self.assertEqual(Nd2Parser._calculate_z_level(nd2, 16), 3)
# self.assertEqual(Nd2Parser._calculate_z_level(nd2, 17), 3)
# self.assertEqual(Nd2Parser._calculate_z_level(nd2, 18), 4)
# self.assertEqual(Nd2Parser._calculate_z_level(nd2, 19), 4)
# self.assertEqual(Nd2Parser._calculate_z_level(nd2, 20), 0)
# self.assertEqual(Nd2Parser._calculate_z_level(nd2, 21), 0)
# self.assertEqual(Nd2Parser._calculate_z_level(nd2, 22), 1)
# self.assertEqual(Nd2Parser._calculate_z_level(nd2, 23), 1)
# self.assertEqual(Nd2Parser._calculate_z_level(nd2, 24), 2)
# self.assertEqual(Nd2Parser._calculate_z_level(nd2, 25), 2)
# self.assertEqual(Nd2Parser._calculate_z_level(nd2, 26), 3)
# self.assertEqual(Nd2Parser._calculate_z_level(nd2, 27), 3)
# self.assertEqual(Nd2Parser._calculate_z_level(nd2, 28), 4)
# self.assertEqual(Nd2Parser._calculate_z_level(nd2, 29), 4)
# self.assertEqual(Nd2Parser._calculate_z_level(nd2, 30), 0)

+ 1
- 1
tests/model/image.py View File

@ -14,7 +14,7 @@ class ImageTests(unittest.TestCase):
[45, 12, 9],
[12, 12, 99]])
self.image = Image(array)
self.image.add_params(1200.314, 17, 2, 'GFP', 1)
self.image.add_params(1, 1200.314, 17, 2, 'GFP', 1)
def test_size(self):
self.assertEqual(self.image.height, 3)


Loading…
Cancel
Save