Browse Source

Refactored.

zolfa-add_slices_loading
Gabriele Girelli 4 years ago
parent
commit
a27ce7745c
3 changed files with 61 additions and 68 deletions
  1. +7
    -67
      nd2reader/parser.py
  2. +54
    -0
      nd2reader/stitched.py
  3. +0
    -1
      tests/test_parser.py

+ 7
- 67
nd2reader/parser.py View File

@ -8,9 +8,9 @@ from pims.base_frames import Frame
import numpy as np
from nd2reader.common import get_version, read_chunk
from nd2reader.exceptions import InvalidVersionError
from nd2reader.label_map import LabelMap
from nd2reader.raw_metadata import RawMetadata
from nd2reader import stitched
class Parser(object):
@ -232,8 +232,7 @@ class Parser(object):
Returns:
"""
return (image_group_number - (field_of_view * len(self.metadata["z_levels"]) + z_level)) / (
len(self.metadata["fields_of_view"]) * len(self.metadata["z_levels"]))
return (image_group_number - (field_of_view * len(self.metadata["z_levels"]) + z_level)) / (len(self.metadata["fields_of_view"]) * len(self.metadata["z_levels"]))
@property
def _channel_offset(self):
@ -247,38 +246,6 @@ class Parser(object):
"""
return {channel: n for n, channel in enumerate(self.metadata["channels"])}
def _get_unwanted_bytes_ids(
self, image_group_data, image_data_start, height, width
):
# Check if the byte array size conforms to the image axes size. If not, check
# that the number of unexpected (unwanted) bytes is a multiple of the number of
# rows (height), as the same unmber of unwanted bytes is expected to be
# appended at the end of each row. Then, returns the indexes of the unwanted
# bytes.
number_of_true_channels = int(len(image_group_data[4:]) / (height * width))
n_unwanted_bytes = (len(image_group_data[image_data_start:])) % (height * width)
if not n_unwanted_bytes:
return np.arange(0)
assert 0 == n_unwanted_bytes % height, (
"An unexpected number of extra bytes was encountered based on the expected"
+ " frame size, therefore the file could not be parsed."
)
return np.arange(
image_data_start + height * number_of_true_channels,
len(image_group_data) - n_unwanted_bytes + 1,
height * number_of_true_channels,
)
def _remove_bytes_by_id(self, byte_ids, image_group_data, height):
# Remove bytes by ID.
bytes_per_row = len(byte_ids) // height
warnings.warn(
f"{len(byte_ids)} ({bytes_per_row}*{height}) unexpected zero "
+ "bytes were found in the ND2 file and removed to allow further parsing."
)
for i in range(len(byte_ids)):
del image_group_data[byte_ids[i] : (byte_ids[i] + bytes_per_row)]
def _get_raw_image_data(self, image_group_number, channel_offset, height, width):
"""Reads the raw bytes and the timestamp of an image.
@ -300,42 +267,17 @@ class Parser(object):
timestamp = struct.unpack("d", data[:8])[0]
image_group_data = array.array("H", data)
image_data_start = 4 + channel_offset
# Stitched ND2 files have been reported to contain unexpected (according to
# image shape) zero bytes at the end of each image data row. This hinders
# proper reshaping of the data. Hence, here the unwanted zero bytes are
# identified and removed.
unwanted_byte_ids = self._get_unwanted_bytes_ids(
image_group_data, image_data_start, height, width
)
if 0 != len(unwanted_byte_ids):
assert np.all(
image_group_data[unwanted_byte_ids + np.arange(len(unwanted_byte_ids))]
== 0
), (
f"{len(unwanted_byte_ids)} unexpected non-zero bytes were found"
+ " in the ND2 file, the file could not be parsed."
)
self._remove_bytes_by_id(unwanted_byte_ids, image_group_data, height)
image_group_data = stitched.remove_parsed_unwanted_bytes(image_group_data, image_data_start, height, width)
# The images for the various channels are interleaved within the same array. For example, the second image
# of a four image group will be composed of bytes 2, 6, 10, etc. If you understand why someone would design
# a data structure that way, please send the author of this library a message.
number_of_true_channels = int(len(image_group_data[4:]) / (height * width))
try:
image_data = np.reshape(
image_group_data[image_data_start::number_of_true_channels],
(height, width),
)
image_data = np.reshape(image_group_data[image_data_start::number_of_true_channels], (height, width))
except ValueError:
image_data = np.reshape(
image_group_data[image_data_start::number_of_true_channels],
(
height,
len(image_group_data[image_data_start::number_of_true_channels])
// height,
),
)
new_width = len(image_group_data[image_data_start::number_of_true_channels]) // height
image_data = np.reshape(image_group_data[image_data_start::number_of_true_channels], (height, new_width))
# Skip images that are all zeros! This is important, since NIS Elements creates blank "gap" images if you
# don't have the same number of images each cycle. We discovered this because we only took GFP images every
@ -349,9 +291,7 @@ class Parser(object):
else:
empty_frame = np.full((height, width), np.nan)
warnings.warn(
"ND2 file contains gap frames which are represented by np.nan-filled"
+ " arrays; to convert to zeros use e.g. np.nan_to_num(array)"
)
"ND2 file contains gap frames which are represented by np.nan-filled arrays; to convert to zeros use e.g. np.nan_to_num(array)")
return timestamp, image_data
def _get_frame_metadata(self):


+ 54
- 0
nd2reader/stitched.py View File

@ -0,0 +1,54 @@
# -*- coding: utf-8 -*-
import numpy as np # type: ignore
import warnings
def get_unwanted_bytes_ids(self, image_group_data, image_data_start, height, width):
# Check if the byte array size conforms to the image axes size. If not, check
# that the number of unexpected (unwanted) bytes is a multiple of the number of
# rows (height), as the same unmber of unwanted bytes is expected to be
# appended at the end of each row. Then, returns the indexes of the unwanted
# bytes.
number_of_true_channels = int(len(image_group_data[4:]) / (height * width))
n_unwanted_bytes = (len(image_group_data[image_data_start:])) % (height * width)
if not n_unwanted_bytes:
return np.arange(0)
assert 0 == n_unwanted_bytes % height, (
"An unexpected number of extra bytes was encountered based on the expected"
+ " frame size, therefore the file could not be parsed."
)
return np.arange(
image_data_start + height * number_of_true_channels,
len(image_group_data) - n_unwanted_bytes + 1,
height * number_of_true_channels,
)
def remove_bytes_by_id(self, byte_ids, image_group_data, height):
# Remove bytes by ID.
bytes_per_row = len(byte_ids) // height
warnings.warn(
f"{len(byte_ids)} ({bytes_per_row}*{height}) unexpected zero "
+ "bytes were found in the ND2 file and removed to allow further parsing."
)
for i in range(len(byte_ids)):
del image_group_data[byte_ids[i] : (byte_ids[i] + bytes_per_row)]
def remove_parsed_unwanted_bytes(image_group_data, image_data_start, height, width):
# Stitched ND2 files have been reported to contain unexpected (according to
# image shape) zero bytes at the end of each image data row. This hinders
# proper reshaping of the data. Hence, here the unwanted zero bytes are
# identified and removed.
unwanted_byte_ids = get_unwanted_bytes_ids(
image_group_data, image_data_start, height, width
)
if 0 != len(unwanted_byte_ids):
assert np.all(
image_group_data[unwanted_byte_ids + np.arange(len(unwanted_byte_ids))] == 0
), (
f"{len(unwanted_byte_ids)} unexpected non-zero bytes were found"
+ " in the ND2 file, the file could not be parsed."
)
remove_bytes_by_id(unwanted_byte_ids, image_group_data, height)
return image_group_data

+ 0
- 1
tests/test_parser.py View File

@ -2,7 +2,6 @@ import unittest
from os import path
from nd2reader.artificial import ArtificialND2
from nd2reader.common import check_or_make_dir
from nd2reader.exceptions import InvalidVersionError
from nd2reader.parser import Parser
import urllib.request


Loading…
Cancel
Save