Browse Source

Merge pull request #142 from jimrybarski/140-some-files-unreadable

resolves #140
zolfa-add_slices_loading
Jim Rybarski 9 years ago
parent
commit
092eba2a9c
6 changed files with 148 additions and 16 deletions
  1. +3
    -3
      ftest.py
  2. +8
    -7
      functional_tests/FYLM141111001.py
  3. +123
    -0
      functional_tests/monocycle.py
  4. +1
    -1
      functional_tests/single.py
  5. +4
    -0
      nd2reader/driver/v3.py
  6. +9
    -5
      nd2reader/parser/v3.py

+ 3
- 3
ftest.py View File

@ -1,7 +1,7 @@
import unittest
from functional_tests.FYLM141111001 import FunctionalTests as FYLM141111Tests
from functional_tests.single import FunctionalTests as SingleTests
from functional_tests.FYLM141111001 import FYLM141111Tests
from functional_tests.single import SingleTests
from functional_tests.monocycle import Monocycle1Tests, Monocycle2Tests
if __name__ == '__main__':
unittest.main()

+ 8
- 7
functional_tests/FYLM141111001.py View File

@ -9,7 +9,7 @@ from datetime import datetime
import unittest
class FunctionalTests(unittest.TestCase):
class FYLM141111Tests(unittest.TestCase):
def setUp(self):
self.nd2 = Nd2("/var/nd2s/FYLM-141111-001.nd2")
@ -23,9 +23,9 @@ class FunctionalTests(unittest.TestCase):
def test_date(self):
self.assertEqual(self.nd2.date, datetime(2014, 11, 11, 15, 59, 19))
# def test_length(self):
# # This will fail until we address issue #59
# self.assertEqual(len(self.nd2), 17808)
@unittest.skip("This will fail until we address issue #59")
def test_length(self):
self.assertEqual(len(self.nd2), 17808)
def test_frames(self):
self.assertEqual(len(self.nd2.frames), 636)
@ -127,12 +127,13 @@ class FunctionalTests(unittest.TestCase):
for _, image in zip(range(20), self.nd2):
if image is not None and image.channel == 'GFP':
manual_images.append(image)
filter_images = []
filter_images = []
for image in self.nd2.select(channels='GFP'):
filter_images.append(image)
if len(filter_images) == len(manual_images):
break
self.assertEqual(len(manual_images), len(filter_images))
self.assertGreater(len(manual_images), 0)
for a, b in zip(manual_images, filter_images):
@ -141,7 +142,7 @@ class FunctionalTests(unittest.TestCase):
self.assertEqual(a.field_of_view, b.field_of_view)
self.assertEqual(a.channel, b.channel)
def test_filter_order_all(self):
def test_select_order_all(self):
# If we select every possible image using select(), we should just get every image in order
n = 0
for image in self.nd2.select(channels=['', 'GFP'], z_levels=[0, 1, 2], fields_of_view=list(range(8))):
@ -155,7 +156,7 @@ class FunctionalTests(unittest.TestCase):
if n > 100:
break
def test_filter_order_subset(self):
def test_select_order_subset(self):
# Test that images are always yielded in increasing order. This guarantees that no matter what subset of images
# we're filtering, we still get them in the chronological order they were acquired
n = -1


+ 123
- 0
functional_tests/monocycle.py View File

@ -0,0 +1,123 @@
"""
Tests on ND2s that have 1 or 2 cycles only. This is unlike the ND2s I work with typically, which are all done over very long periods of time.
"""
from nd2reader import Nd2
import numpy as np
import unittest
class Monocycle1Tests(unittest.TestCase):
def setUp(self):
self.nd2 = Nd2("/var/nd2s/simone1.nd2")
def tearDown(self):
self.nd2.close()
def test_select(self):
# If we take the first 20 GFP images, they should be identical to the first 20 items iterated from select()
# if we set our criteria to just "GFP"
manual_images = []
for _, image in zip(range(20), self.nd2):
if image is not None and image.channel == 'FITC':
manual_images.append(image)
filter_images = []
for image in self.nd2.select(channels='FITC'):
filter_images.append(image)
if len(filter_images) == len(manual_images):
break
self.assertEqual(len(manual_images), len(filter_images))
self.assertGreater(len(manual_images), 0)
for a, b in zip(manual_images, filter_images):
self.assertTrue(np.array_equal(a, b))
self.assertEqual(a.index, b.index)
self.assertEqual(a.field_of_view, b.field_of_view)
self.assertEqual(a.channel, b.channel)
def test_select_order_all(self):
# If we select every possible image using select(), we should just get every image in order
n = 0
for image in self.nd2.select(channels=['Cy3Narrow', 'DAPI', 'FITC', 'TxRed-modified'], z_levels=list(range(35)), fields_of_view=list(range(5))):
while True:
indexed_image = self.nd2[n]
if indexed_image is not None:
break
n += 1
self.assertTrue(np.array_equal(image, indexed_image))
n += 1
if n > 100:
# Quit after the first hundred images just to save time. If there's a problem, we'll have seen it by now.
break
def test_select_order_subset(self):
# Test that images are always yielded in increasing order. This guarantees that no matter what subset of images
# we're filtering, we still get them in the chronological order they were acquired
n = -1
for image in self.nd2.select(channels='FITC', z_levels=[0, 1], fields_of_view=[1, 2, 4]):
self.assertGreater(image.index, n)
self.assertEqual(image.channel, 'FITC')
self.assertIn(image.field_of_view, (1, 2, 4))
self.assertIn(image.z_level, (0, 1))
n = image.index
if n > 100:
break
class Monocycle2Tests(unittest.TestCase):
def setUp(self):
self.nd2 = Nd2("/var/nd2s/hawkjo.nd2")
def tearDown(self):
self.nd2.close()
def test_select(self):
# If we take the first 20 HHQ 500 LP 1 images, they should be identical to the first 20 items iterated from select()
# if we set our criteria to just "HHQ 500 LP 1"
manual_images = []
for _, image in zip(range(20), self.nd2):
if image is not None and image.channel == 'HHQ 500 LP 1':
manual_images.append(image)
filter_images = []
for image in self.nd2.select(channels='HHQ 500 LP 1'):
filter_images.append(image)
if len(filter_images) == len(manual_images):
break
self.assertEqual(len(manual_images), len(filter_images))
self.assertGreater(len(manual_images), 0)
for a, b in zip(manual_images, filter_images):
self.assertTrue(np.array_equal(a, b))
self.assertEqual(a.index, b.index)
self.assertEqual(a.field_of_view, b.field_of_view)
self.assertEqual(a.channel, b.channel)
def test_select_order_all(self):
# If we select every possible image using select(), we should just get every image in order
n = 0
for image in self.nd2.select(channels=['HHQ 500 LP 1', 'HHQ 500 LP 2'], z_levels=[0], fields_of_view=list(range(100))):
while True:
indexed_image = self.nd2[n]
if indexed_image is not None:
break
n += 1
self.assertTrue(np.array_equal(image, indexed_image))
n += 1
if n > 100:
# Quit after the first hundred images just to save time. If there's a problem, we'll have seen it by now.
break
def test_select_order_subset(self):
# Test that images are always yielded in increasing order. This guarantees that no matter what subset of images
# we're filtering, we still get them in the chronological order they were acquired
n = -1
for image in self.nd2.select(channels='HHQ 500 LP 2', z_levels=[0], fields_of_view=[1, 2, 4]):
self.assertGreater(image.index, n)
self.assertEqual(image.channel, 'HHQ 500 LP 2')
self.assertIn(image.field_of_view, (1, 2, 4))
self.assertEqual(image.z_level, 0)
n = image.index
if n > 100:
break

+ 1
- 1
functional_tests/single.py View File

@ -8,7 +8,7 @@ from datetime import datetime
import unittest
class FunctionalTests(unittest.TestCase):
class SingleTests(unittest.TestCase):
def setUp(self):
self.nd2 = Nd2("/var/nd2s/single.nd2")


+ 4
- 0
nd2reader/driver/v3.py View File

@ -161,16 +161,20 @@ class V3Driver(object):
"""
chunk = self._label_map.get_image_data_location(image_group_number)
data = read_chunk(self._file_handle, chunk)
# print("data", data, "that was data")
# All images in the same image group share the same timestamp! So if you have complicated image data,
# your timestamps may not be entirely accurate. Practically speaking though, they'll only be off by a few
# seconds unless you're doing something super weird.
timestamp = struct.unpack("d", data[:8])[0]
image_group_data = array.array("H", data)
image_data_start = 4 + channel_offset
# The images for the various channels are interleaved within the same array. For example, the second image
# of a four image group will be composed of bytes 2, 6, 10, etc. If you understand why someone would design
# a data structure that way, please send the author of this library a message.
image_data = np.reshape(image_group_data[image_data_start::len(self._metadata.channels)], (height, width))
# Skip images that are all zeros! This is important, since NIS Elements creates blank "gap" images if you
# don't have the same number of images each cycle. We discovered this because we only took GFP images every
# other cycle to reduce phototoxicity, but NIS Elements still allocated memory as if we were going to take


+ 9
- 5
nd2reader/parser/v3.py View File

@ -173,7 +173,7 @@ class V3Parser(BaseParser):
frames = self._parse_frames(self.raw_metadata)
z_levels = self._parse_z_levels(self.raw_metadata)
total_images_per_channel = self._parse_total_images_per_channel(self.raw_metadata)
channels = sorted([key for key in self.camera_metadata.keys()])
channels = self._parse_channels(self.raw_metadata)
self.metadata = Metadata(height, width, channels, date, fields_of_view, frames, z_levels, total_images_per_channel)
def _parse_camera_settings(self):
@ -181,20 +181,24 @@ class V3Parser(BaseParser):
Looks up information in the raw metadata about the camera(s) and puts it into a CameraSettings object.
Duplicate cameras can be returned if the same one was used for multiple channels.
:return:
"""
for camera in self.raw_metadata.image_metadata_sequence[six.b('SLxPictureMetadata')][six.b('sPicturePlanes')][six.b('sSampleSetting')].values():
for n, camera in enumerate(self.raw_metadata.image_metadata_sequence[six.b('SLxPictureMetadata')][six.b('sPicturePlanes')][six.b('sSampleSetting')].values()):
name = camera[six.b('pCameraSetting')][six.b('CameraUserName')]
id = camera[six.b('pCameraSetting')][six.b('CameraUniqueName')]
exposure = camera[six.b('dExposureTime')]
x_binning = camera[six.b('pCameraSetting')][six.b('FormatFast')][six.b('fmtDesc')][six.b('dBinningX')]
y_binning = camera[six.b('pCameraSetting')][six.b('FormatFast')][six.b('fmtDesc')][six.b('dBinningY')]
optical_configs = camera[six.b('sOpticalConfigs')]
# This definitely is not working right. It seems to be totally inconsistent in each of the sample ND2s that I have.
# Fixing one breaks another.
if six.b('') in optical_configs.keys():
channel_name = optical_configs[six.b('')][six.b('sOpticalConfigName')]
yield CameraSettings(name, id, exposure, x_binning, y_binning, channel_name)
else:
channel_name = None
yield CameraSettings(name, id, exposure, x_binning, y_binning, channel_name)
channel_names = [channel[six.b('Name')] for key, channel in camera[six.b('pCameraSetting')][six.b('Metadata')][six.b('Channels')].items()]
for channel_name in channel_names:
yield CameraSettings(name, id, exposure, x_binning, y_binning, channel_name)
def _parse_date(self, raw_metadata):
"""


Loading…
Cancel
Save