202 Commits

Author SHA1 Message Date
  Zolfa ffac15e083 added x coordiantes and rotation matrix readout 3 years ago
  Zolfa 25ec773d1d Minor changes for dev branched version release 3 years ago
  Zolfa f331c8fbcd Add functions to read only rectangular regions. 4 years ago
  Ruben Verweij 728a425a25 Merge pull request #43 from ggirelli/ggirelli-filename-patch 4 years ago
  Gabriele Girelli 0acc42eb66 Fixed missing filename path. 4 years ago
  Ruben Verweij f73cd93674 Merge pull request #40 from ggirelli/ggirelli-patch-parser-for-stitched-images 4 years ago
  Gabriele Girelli 5a2d9ac764 Update stitched.py 4 years ago
  Ruben Verweij c0abe63fe9 Merge pull request #39 from ggirelli/ggirelli-patch-nd2reader-from-file 4 years ago
  Gabriele Girelli e029f3c0b7 Fixed func def 4 years ago
  Gabriele Girelli 165b8095c7 Refactored. 4 years ago
  Gabriele Girelli 6a4ffcdc73 Added get_image test of stitched sample 4 years ago
  Gabriele Girelli 7cee1a70cc Using numpy assets when possible, and formatted with black 4 years ago
  Gabriele Girelli 2cd36638b1 Switched from warning to raising exception and improved message 4 years ago
  Gabriele Girelli e969424a63 Improved assert and warning messages 4 years ago
  Gabriele Girelli fdc10ca56e Fixed new init method 4 years ago
  Gabriele Girelli 481ce67e34 Implemented test. 4 years ago
  Gabriele Girelli 0b8ae27dde Allowed for fh to be either str or IO, and formatted with black. 4 years ago
  Gabriele Girelli 1dd4827a6b Changed structure to check/remove method pair 4 years ago
  Gabriele Girelli ee4b5ade59 Allowing case of multiple unwanted bytes per step 4 years ago
  Gabriele Girelli 5763229496 Complexity 4 years ago
  Gabriele Girelli 62c86f34e4 Update parser.py 4 years ago
  Gabriele Girelli e5d4eebd17 Complexity 4 years ago
  Gabriele Girelli 460583ba14 Fixed complexity 4 years ago
  Gabriele Girelli b84b52d93d Regenerated Reader 4 years ago
  Gabriele Girelli 77016ccbaf Fixed complexity 4 years ago
  Gabriele Girelli 9b0b4b137a Removed empty bytes at channel subchunk ends 4 years ago
  Gabriele Girelli ce29f90f20 Update reader.py 4 years ago
  Ruben Verweij a2385164a1 update Travis settings 5 years ago
  Ruben Verweij d0e4abcf55 Fix issue #34 5 years ago
  Ruben Verweij b496774361 Use timesteps as fallback to NDExperiment loops to determine total file duration, and thus framerate 5 years ago
  Ruben Verweij 0a16ce9003 Fix error in loop handling 5 years ago
  Ruben Verweij c0f289f4b5 Merge pull request #32 from ulugbekna/handle-invalid-file-extension 5 years ago
  Ulugbek Abdullaev a051a70350 Merge pull request #1 from ggirelli/patch-1 5 years ago
  Gabriele Girelli 2508bef3e0 f-strings not supported by Python3.6- 5 years ago
  Ruben Verweij c54d37a606 Fix typo in z-level check 5 years ago
  Ulugbek Abdullaev 1fc0389059 add a check that filename passed to nd2reader has an extension .nd2 because the current implementation raises InvalidVersionError if the passed file is non-nd2 type, which is misleading 5 years ago
  Ruben Verweij c038c5b24d Merge pull request #33 from ggirelli/master 5 years ago
  Ruben Verweij 17606e0a47 Fix codeclimate integration 5 years ago
  Ruben Verweij ffef793c83 Update README.md 5 years ago
  Ruben Verweij 7c636ac002 Remove support for Python < 3.5 5 years ago
  Gabriele Girelli e011b578e6 Removed self.__class__ from super() call 5 years ago
  Ruben Verweij 58d60ebf88 Version 3.2.3 due to build error of new version number storage system 5 years ago
  Ruben Verweij 9297cc86da Merge branch 'master' of github.com:rbnvrw/nd2reader 5 years ago
  Ruben Verweij a92617a567 New version 3.2.2 5 years ago
  Ruben Verweij b868fcabc6 Update release.txt 5 years ago
  Ruben Verweij 66361332cd Merge pull request #30 from rbnvrw/add-code-of-conduct-1 5 years ago
  Ruben Verweij 0d87fe32bc Create CODE_OF_CONDUCT.md 5 years ago
  Ruben Verweij 61d6f99548 Merge pull request #28 from ggirelli/master 5 years ago
  Gabriele Girelli 5fd18220ef Update raw_metadata.py 5 years ago
  Gabriele Girelli 60dedbe71b Update raw_metadata.py 5 years ago
  Gabriele Girelli 6f27d14339 Update raw_metadata.py 5 years ago
  Gabriele Girelli 96695d50ab Using Z slice coordinates from Z-data instead of Z-levels and show warning in case of mismatch 5 years ago
  Ruben Verweij 0fd78a4175 Resolve issue #24 5 years ago
  Ruben Verweij 9b205ca1da Issue #24: only return a Frame when bundle_axes is only y,x 5 years ago
  Ruben Verweij c6a184ec2e Merge branch 'master' of github.com:rbnvrw/nd2reader 5 years ago
  Ruben Verweij 8b2f506990 Update get_frame_vczyx function based on feedback in issue #24 5 years ago
  Ruben Verweij 84caa2b52c Update release.txt 5 years ago
  Ruben Verweij 20dd8e0d2a Make unit test less specific to accomodate for different versions of struct 5 years ago
  Ruben Verweij d98e7acb71 Fixes issue #25, adds unit tests and release 3.2.1 5 years ago
  Ruben Verweij afa38ea0e7 Fixes issue 25 5 years ago
  Ruben Verweij c5e293463a Merge branch 'master' of github.com:rbnvrw/nd2reader 5 years ago
  Ruben Verweij 010073d24c Fix issue with bundle_axes and get_frame 5 years ago
  Ruben Verweij 8d081751f1 Update release.txt 5 years ago
  Ruben Verweij 14fb182e33 Update version 3.2.0 5 years ago
  Ruben Verweij c9a1e13936 Merge branch 'master' of github.com:rbnvrw/nd2reader 5 years ago
  Ruben Verweij da0238577c Version 3.2.0 5 years ago
  Ruben Verweij e182d2e73a Merge pull request #22 from WMAPernice/fix-missing-frames-issue_to-current-master 5 years ago
  unknown 54fd7253b6 More informative warning if gap frames encountered and filled with np.nan 5 years ago
  unknown 5d8fd312bc Handle gap frames with np.nan-filled array and issue warning about it. 5 years ago
  unknown 6649d577a8 removed NoImageError call, class def, and import references since now redundant 5 years ago
  Ruben Verweij 94fa888c09 Add acquisition times to parser 5 years ago
  Ruben Verweij bd09f668a7 Fixes issue #11 5 years ago
  Ruben Verweij 16cb8fb67f Fixes issue #16 5 years ago
  Ruben Verweij b6aa323420 Fallback function for old get_frame_2D function 5 years ago
  unknown 52e52b3616 fix to allow reading of ND2-files with missing frames, due to e.g. skipping z-positions in one channel of a multichannel image; based on most recent commit of master 5 years ago
  Ruben Verweij 8f8a3aff1e Use _register_get_frame functions 6 years ago
  Ruben Verweij eb81db8460 Merge pull request #21 from glichtner/image-events 6 years ago
  Ruben Verweij 4b6a462aaf Merge pull request #20 from glichtner/valid-loops 6 years ago
  Gregor Lichtner 25d7485868 added event names 6 years ago
  Gregor Lichtner 926f61a8a4 fixed behavior when no events are available 6 years ago
  Gregor Lichtner bf30f9dee1 added image events to parser 6 years ago
  Gregor Lichtner 2881b0f3b1 include only valid loops in experiment metadata 6 years ago
  Ruben Verweij fb1ce2dc7b Merge pull request #15 from jbf81tb/master 6 years ago
  jbf81tb 1fe728a52c get_frame_2D fix 6 years ago
  Ruben Verweij 2ad607a0e5 Remove google analytics 6 years ago
  Ruben Verweij 836eebbf42 Rebuild docs 6 years ago
  Ruben Verweij d547987658 Update layout.html 6 years ago
  Ruben Verweij b1aef51c92 update docs 6 years ago
  Ruben Verweij 26f10dda6f Merge branch 'master' of github.com:rbnvrw/nd2reader 6 years ago
  Ruben Verweij 97846ac6b1 Fix fps calculation 6 years ago
  Ruben Verweij cce5104626 Docs v.3.1.0 6 years ago
  Ruben Verweij 29fcb53a9c Version 3.1.0 6 years ago
  Ruben Verweij 7b15150b64 Fix error 6 years ago
  Ruben Verweij b7cbd58d9b Fix formatting error 6 years ago
  Ruben Verweij af5acc9f76 Expand tests 6 years ago
  Ruben Verweij c8aef85cd5 Refactor 6 years ago
  Ruben Verweij c56da21d09 Refactor 6 years ago
  Ruben Verweij c57aff43d7 Refactor 6 years ago
  Ruben Verweij d656da42c4 Refactor 6 years ago
  Ruben Verweij 8e417fe2b2 Workaround issue #6, fix unit test timesteps 6 years ago
  Ruben Verweij 1311e2df58 Fix to take into account real acquisition times when calculating framerate instead of the set interval (which is sometimes inaccurate) 6 years ago
  Ruben Verweij 1607a24ba1 3.0.9 7 years ago
  Ruben Verweij bf1e04f6d2 Fix test and update gitignore 7 years ago
  Ruben Verweij c02dd5f350 Fix version number 7 years ago
  Ruben Verweij 25e4bafd99 New release 7 years ago
  Ruben Verweij e9ffd4ba98 Version 3.0.7 7 years ago
  Ruben Verweij 53163826a3 Fix #4 7 years ago
  Ruben Verweij b134842880 Update contributing 7 years ago
  Ruben Verweij a42412db31 Add universal bdist wheel 7 years ago
  Ruben Verweij fbdb5a8a55 Version 3.0.6: Add FOV support 7 years ago
  Ruben Verweij 31868d2765 Add additional date formats for parsing raw metadata 7 years ago
  Ruben Verweij 847f1f3517 Version 3.0.5 7 years ago
  Ruben Verweij 482d719c3e Refactor CQ 8 years ago
  Ruben Verweij a6760b173e Test color channels, fix a bug in color channel encoding 8 years ago
  Ruben Verweij 6511f7d82a Test legacy Nd2, parser, reader fixes 8 years ago
  Ruben Verweij 0187996815 Extend unit tests 8 years ago
  Ruben Verweij 6bd8ac2bd2 Extend unit tests, remove badge 8 years ago
  Ruben Verweij 25275d2fd6 Remove unused lines from artificial 8 years ago
  Ruben Verweij e840e11c36 Fix bytes compatibility for Python 2 8 years ago
  Ruben Verweij 34191adb61 Unit testing: test the correct parsing of image_attributes 8 years ago
  Ruben Verweij 589345ebac Add a class priority so this reader will be loaded before the Bioformats reader 8 years ago
  Ruben Verweij 1a3a78f34c Add try/except blocks to make code more robust + fix unit tests 8 years ago
  Ruben Verweij 0accf20f68 Merge remote-tracking branch 'origin/master' 8 years ago
  Ruben Verweij 9f3e4c1b35 Extend artificial to also write the label_map + data to file for more extensive unit testing 8 years ago
  Ruben Verweij 60972d90a9 New documentation theme 8 years ago
  Ruben Verweij 6128ba4838 Fix imports of pims and release 3.0.4 8 years ago
  Ruben Verweij 09bd870734 Fix small bug when uiCount cannot be found 8 years ago
  Ruben Verweij e7555f87de Version 3.0.3 8 years ago
  Ruben Verweij f5301c039d Add a third method to determine the loop sampling interval 8 years ago
  Ruben Verweij 0442981422 Travis: use less python versions for testing 8 years ago
  Ruben Verweij 531f53a36a Update travis script because of upstream bug 8 years ago
  Ruben Verweij cbdec0fdc9 Cache timesteps and add frame rate property 8 years ago
  Ruben Verweij 9f96d23988 Always set 't' axis 8 years ago
  Ruben Verweij 5f43890053 Also get sampling interval when dAvgPeriodDiff is not set 8 years ago
  Ruben Verweij 4bd89eac89 New documentation 8 years ago
  Ruben Verweij 9c957a1d0a Only setup certain axes if size > 1 8 years ago
  Ruben Verweij 30457e5931 Update version 3.0.2 8 years ago
  Ruben Verweij df7913e6b3 Issue #2: fix calculation of image group number when there is no z-axis 8 years ago
  Ruben Verweij bbc95851e6 Merge remote-tracking branch 'origin/master' 8 years ago
  Ruben Verweij d4e505779d Issue #2: clean up axes handling, guess a better default axis 8 years ago
  Ruben Verweij bb7d3a3df4 Update README.md 8 years ago
  Ruben Verweij b4170d6b85 Merge remote-tracking branch 'origin/master' 8 years ago
  Ruben Verweij 80a6e51a77 Fix issue #2 by following PIMS conventions more closely 8 years ago
  Ruben Verweij a9683c528d Installation via Conda Forge 8 years ago
  Ruben Verweij 01acb84546 Add conda installation instructions 8 years ago
  Ruben Verweij 45674ff241 Do not add 'z' to sizes of the reader if there is only one plane, as seems to be the PIMS convention 8 years ago
  Ruben Verweij 0bd5bdc0fd version 3.0.1: Include LICENSE via MANIFEST.in for conda forge 8 years ago
  Ruben Verweij 107c05293d Fix the number of frames 8 years ago
  Ruben Verweij 5b2d2a3832 update docs for new version 8 years ago
  Ruben Verweij 07efade878 Update readme for release on PyPi 8 years ago
  Ruben Verweij 416f4f69fc Update documentation, quick start tutorial 8 years ago
  Ruben Verweij 325ad5f8a6 Add test for raw metadata storage 8 years ago
  Ruben Verweij da38f7cd66 Add more unit test for common + fix possibly endless loop 8 years ago
  Ruben Verweij 3590a7ec6b Fix creating test file for unit tests 8 years ago
  Ruben Verweij b640490de1 Bug fixes by testing different .nd2 files 8 years ago
  Ruben Verweij 480e231d17 Bump version number to reflect api changes 8 years ago
  Ruben Verweij 46eff09834 Update README.md 8 years ago
  Ruben Verweij 0adfd83af9 Add badge 8 years ago
  Ruben Verweij 5a39a82052 Add badge 8 years ago
  Ruben Verweij 05684cf8fe Add badge 8 years ago
  Ruben Verweij 1b38242d81 Extend unit tests 8 years ago
  Ruben Verweij a5417099b0 Extend unit tests 8 years ago
  Ruben Verweij 5251464df5 Extend unit tests 8 years ago
  Ruben Verweij 82f285de5b Fix link 8 years ago
  Ruben Verweij cfd6b93cca New documentation 8 years ago
  Ruben Verweij b0f22eb83b Very basic Reader test 8 years ago
  Ruben Verweij d2255ee123 Last test of LabelMap 8 years ago
  Ruben Verweij 4490d7b830 Code fixes 8 years ago
  Ruben Verweij 19c5db83af Test the LabelMap 8 years ago
  Ruben Verweij cb7cb05f74 Add test for Parser 8 years ago
  Ruben Verweij eb12a4cfd4 Add ArtificialND2 class to help in testing nd2 file reading and parsing 8 years ago
  Ruben Verweij 3ece8689bf Remove functional test for now, cannot store large nd2 files on github 8 years ago
  Ruben Verweij e07e29d688 Remove test data (until I find a solution to store them on github) 8 years ago
  Ruben Verweij e224589934 data 8 years ago
  Ruben Verweij 29c68884ea Test files to git large file storage (git LFS) 8 years ago
  Ruben Verweij e4550bce79 Update tests 8 years ago
  Ruben Verweij 6d1fd6d243 Provide backwards compatibility with old 2.1.3 version 8 years ago
  Ruben Verweij b0a8bcdbf3 update docs 8 years ago
  Ruben Verweij c80e528d0b Update tutorial 8 years ago
  Ruben Verweij 3b0c9462ad Update README.md 8 years ago
  Ruben Verweij 048e101e2d Cleanup documentation 8 years ago
  Ruben Verweij a947149cff Merge remote-tracking branch 'origin/master' 8 years ago
  Ruben Verweij f3d5c62042 Improve documentation 8 years ago
  Ruben Verweij 4df98c13a6 Update README.md 8 years ago
  Ruben Verweij 2ac0210feb Update README.md 8 years ago
  Ruben Verweij dab0b4aca8 docstrings 8 years ago
  Ruben Verweij 46116b8f71 Merge branch 'master' of https://github.com/rbnvrw/nd2reader 8 years ago
  Ruben Verweij 5f863a3ca5 Cleanup docstrings to use google python docs 8 years ago
  Ruben Verweij fc5b32ab3b Update README.md 8 years ago
  Ruben Verweij f3e0e0efdd generated docs in other branch 8 years ago
  Ruben Verweij 5a10934993 API docs using Sphinx 8 years ago
  Ruben Verweij 1d894b8f0a Update README.md 8 years ago
  Ruben Verweij 0760dedfd1 Set theme jekyll-theme-cayman 8 years ago
  Ruben Verweij 4d0fb8165d Create index.md 8 years ago
  Ruben Verweij 00cc869497 Correctly parse experiment data for NDAqcuisition and add get_timesteps function to reader 8 years ago
  Ruben Verweij c2e48e36db Add ability to bundle the color axis 8 years ago
  Ruben Verweij 3b07b58d01 Add ability to bundle the color axis 8 years ago
  Ruben Verweij cf8f241dfa Speedup numpy installation on travis 8 years ago
  Ruben Verweij 4877ee0591 Fix old leftover in setup 8 years ago
  Ruben Verweij 334817cead Fix bug 8 years ago
  Ruben Verweij 0a944682e0 Fix path to test files 8 years ago
  Ruben Verweij bd10811aee Add unit tests for opening files and axis sizes 8 years ago
39 changed files with 3558 additions and 377 deletions
Unified View
  1. +2
    -2
      .gitignore
  2. +4
    -0
      .gitmodules
  3. +23
    -13
      .travis.yml
  4. +76
    -0
      CODE_OF_CONDUCT.md
  5. +2
    -3
      CONTRIBUTING.md
  6. +5
    -2
      COPYING
  7. +621
    -0
      LICENSE
  8. +6
    -0
      MANIFEST.in
  9. +43
    -4
      README.md
  10. +1
    -0
      docs
  11. +3
    -1
      nd2reader/__init__.py
  12. +280
    -0
      nd2reader/artificial.py
  13. +182
    -39
      nd2reader/common.py
  14. +111
    -0
      nd2reader/common_raw_metadata.py
  15. +13
    -7
      nd2reader/exceptions.py
  16. +151
    -6
      nd2reader/label_map.py
  17. +167
    -0
      nd2reader/legacy.py
  18. +230
    -82
      nd2reader/parser.py
  19. +461
    -151
      nd2reader/raw_metadata.py
  20. +208
    -41
      nd2reader/reader.py
  21. +54
    -0
      nd2reader/stitched.py
  22. +17
    -0
      release.txt
  23. +4
    -1
      setup.cfg
  24. +9
    -10
      setup.py
  25. +20
    -0
      sphinx/Makefile
  26. +1
    -0
      sphinx/_templates/layout.html
  27. +164
    -0
      sphinx/conf.py
  28. +20
    -0
      sphinx/index.rst
  29. +55
    -0
      sphinx/make.bat
  30. +72
    -0
      sphinx/nd2reader.rst
  31. +119
    -0
      sphinx/tutorial.md
  32. +22
    -0
      tests/test_artificial.py
  33. +107
    -1
      tests/test_common.py
  34. +79
    -0
      tests/test_label_map.py
  35. +38
    -0
      tests/test_legacy.py
  36. +36
    -0
      tests/test_parser.py
  37. +71
    -8
      tests/test_raw_metadata.py
  38. +79
    -0
      tests/test_reader.py
  39. +2
    -6
      tests/test_version.py

+ 2
- 2
.gitignore View File

@ -1,3 +1,4 @@
*.nd2
run.py run.py
# Byte-compiled / optimized / DLL files # Byte-compiled / optimized / DLL files
__pycache__/ __pycache__/
@ -52,6 +53,5 @@ coverage.xml
*.log *.log
*.pot *.pot
# Sphinx documentation
docs/_build/
tests/test_data/

+ 4
- 0
.gitmodules View File

@ -0,0 +1,4 @@
[submodule "docs"]
path = docs
url = https://github.com/rbnvrw/nd2reader.git
branch = gh-pages

+ 23
- 13
.travis.yml View File

@ -1,23 +1,33 @@
env:
global:
- CC_TEST_REPORTER_ID=8582900c285e4da0f253555b1bac1ba501bd6ff07850b0f227166b3cdac59ecc
language: python language: python
git: git:
depth: 3 depth: 3
notifications: notifications:
email: false email: false
addons:
code_climate:
repo_token: 8582900c285e4da0f253555b1bac1ba501bd6ff07850b0f227166b3cdac59ecc
python: python:
- "2.7"
- "3.4"
- "3.5"
- "3.5-dev" # 3.5 development branch
- "3.6"
- "3.6-dev" # 3.6 development branch
- "3.7-dev" # 3.7 development branch
- "nightly" # currently points to 3.7-dev
- 3.5
- 3.6
- 3.7
- 3.8
install: install:
- pip install --upgrade pip setuptools wheel
- pip install --only-binary=numpy numpy
- pip install -r requirements.txt - pip install -r requirements.txt
- pip install codeclimate-test-reporter coverage
- pip install 'coverage>=4.0,<4.4' --force-reinstall # Upstream bug: https://github.com/nedbat/coveragepy/issues/578
before_script:
- curl -L https://codeclimate.com/downloads/test-reporter/test-reporter-latest-linux-amd64 > ./cc-test-reporter
- chmod +x ./cc-test-reporter
- ./cc-test-reporter before-build
script: python ./test.py
script: python ./test.py && CODECLIMATE_REPO_TOKEN=8582900c285e4da0f253555b1bac1ba501bd6ff07850b0f227166b3cdac59ecc codeclimate-test-reporter
after_script:
- ./cc-test-reporter after-build --exit-code $TRAVIS_TEST_RESULT

+ 76
- 0
CODE_OF_CONDUCT.md View File

@ -0,0 +1,76 @@
# Contributor Covenant Code of Conduct
## Our Pledge
In the interest of fostering an open and welcoming environment, we as
contributors and maintainers pledge to making participation in our project and
our community a harassment-free experience for everyone, regardless of age, body
size, disability, ethnicity, sex characteristics, gender identity and expression,
level of experience, education, socio-economic status, nationality, personal
appearance, race, religion, or sexual identity and orientation.
## Our Standards
Examples of behavior that contributes to creating a positive environment
include:
* Using welcoming and inclusive language
* Being respectful of differing viewpoints and experiences
* Gracefully accepting constructive criticism
* Focusing on what is best for the community
* Showing empathy towards other community members
Examples of unacceptable behavior by participants include:
* The use of sexualized language or imagery and unwelcome sexual attention or
advances
* Trolling, insulting/derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or electronic
address, without explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Our Responsibilities
Project maintainers are responsible for clarifying the standards of acceptable
behavior and are expected to take appropriate and fair corrective action in
response to any instances of unacceptable behavior.
Project maintainers have the right and responsibility to remove, edit, or
reject comments, commits, code, wiki edits, issues, and other contributions
that are not aligned to this Code of Conduct, or to ban temporarily or
permanently any contributor for other behaviors that they deem inappropriate,
threatening, offensive, or harmful.
## Scope
This Code of Conduct applies both within project spaces and in public spaces
when an individual is representing the project or its community. Examples of
representing a project or community include using an official project e-mail
address, posting via an official social media account, or acting as an appointed
representative at an online or offline event. Representation of a project may be
further defined and clarified by project maintainers.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported by contacting the project team at development@lighthacking.nl. All
complaints will be reviewed and investigated and will result in a response that
is deemed necessary and appropriate to the circumstances. The project team is
obligated to maintain confidentiality with regard to the reporter of an incident.
Further details of specific enforcement policies may be posted separately.
Project maintainers who do not follow or enforce the Code of Conduct in good
faith may face temporary or permanent repercussions as determined by other
members of the project's leadership.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
[homepage]: https://www.contributor-covenant.org
For answers to common questions about this code of conduct, see
https://www.contributor-covenant.org/faq

+ 2
- 3
CONTRIBUTING.md View File

@ -6,9 +6,8 @@ request.
## Running and Writing Tests ## Running and Writing Tests
Unit tests can be run with the commands `python3.4 test.py` and `python2.7 test.py`. The test finder will automatically locate any tests in the `tests` directory. Test classes
must inherit from `unittest.TestCase` and tests will only be run if the function name starts with `test`. If you've built the Docker image, you can also run unit tests with
`make test` - this will conveniently run tests for all supported versions of Python.
Unit tests can be run with the command `python test.py`. The test finder will automatically locate any tests in the `tests` directory. Test classes
must inherit from `unittest.TestCase` and tests will only be run if the function name starts with `test`.
There are also functional tests that work with real ND2s to make sure the code actually works with a wide variety of files. We hope to someday put these into a continuous integration There are also functional tests that work with real ND2s to make sure the code actually works with a wide variety of files. We hope to someday put these into a continuous integration
system so everyone can benefit, but for now, they will just be manually run by the maintainer of this library before merging in any contributions. system so everyone can benefit, but for now, they will just be manually run by the maintainer of this library before merging in any contributions.


LICENSE.txt → COPYING View File


+ 621
- 0
LICENSE View File

@ -0,0 +1,621 @@
GNU GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU General Public License is a free, copyleft license for
software and other kinds of works.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
the GNU General Public License is intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users. We, the Free Software Foundation, use the
GNU General Public License for most of our software; it applies also to
any other work released this way by its authors. You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
To protect your rights, we need to prevent others from denying you
these rights or asking you to surrender the rights. Therefore, you have
certain responsibilities if you distribute copies of the software, or if
you modify it: responsibilities to respect the freedom of others.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must pass on to the recipients the same
freedoms that you received. You must make sure that they, too, receive
or can get the source code. And you must show them these terms so they
know their rights.
Developers that use the GNU GPL protect your rights with two steps:
(1) assert copyright on the software, and (2) offer you this License
giving you legal permission to copy, distribute and/or modify it.
For the developers' and authors' protection, the GPL clearly explains
that there is no warranty for this free software. For both users' and
authors' sake, the GPL requires that modified versions be marked as
changed, so that their problems will not be attributed erroneously to
authors of previous versions.
Some devices are designed to deny users access to install or run
modified versions of the software inside them, although the manufacturer
can do so. This is fundamentally incompatible with the aim of
protecting users' freedom to change the software. The systematic
pattern of such abuse occurs in the area of products for individuals to
use, which is precisely where it is most unacceptable. Therefore, we
have designed this version of the GPL to prohibit the practice for those
products. If such problems arise substantially in other domains, we
stand ready to extend this provision to those domains in future versions
of the GPL, as needed to protect the freedom of users.
Finally, every program is threatened constantly by software patents.
States should not allow patents to restrict development and use of
software on general-purpose computers, but in those that do, we wish to
avoid the special danger that patents applied to a free program could
make it effectively proprietary. To prevent this, the GPL assures that
patents cannot be used to render the program non-free.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Use with the GNU Affero General Public License.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU Affero General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the special requirements of the GNU Affero General Public License,
section 13, concerning interaction through a network will apply to the
combination as such.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS

+ 6
- 0
MANIFEST.in View File

@ -0,0 +1,6 @@
include MANIFEST.in
include VERSION
include setup.py
include README.md
include LICENSE
include COPYING

+ 43
- 4
README.md View File

@ -1,5 +1,7 @@
# nd2reader # nd2reader
[![Anaconda-Server Badge](https://anaconda.org/conda-forge/nd2reader/badges/version.svg)](https://anaconda.org/conda-forge/nd2reader)
[![Anaconda-Server Badge](https://anaconda.org/conda-forge/nd2reader/badges/downloads.svg)](https://anaconda.org/conda-forge/nd2reader)
[![Build Status](https://travis-ci.org/rbnvrw/nd2reader.svg?branch=master)](https://travis-ci.org/rbnvrw/nd2reader) [![Build Status](https://travis-ci.org/rbnvrw/nd2reader.svg?branch=master)](https://travis-ci.org/rbnvrw/nd2reader)
[![Test Coverage](https://codeclimate.com/github/rbnvrw/nd2reader/badges/coverage.svg)](https://codeclimate.com/github/rbnvrw/nd2reader/coverage) [![Test Coverage](https://codeclimate.com/github/rbnvrw/nd2reader/badges/coverage.svg)](https://codeclimate.com/github/rbnvrw/nd2reader/coverage)
[![Code Climate](https://codeclimate.com/github/rbnvrw/nd2reader/badges/gpa.svg)](https://codeclimate.com/github/rbnvrw/nd2reader) [![Code Climate](https://codeclimate.com/github/rbnvrw/nd2reader/badges/gpa.svg)](https://codeclimate.com/github/rbnvrw/nd2reader)
@ -9,21 +11,58 @@
`nd2reader` is a pure-Python package that reads images produced by NIS Elements 4.0+. It has only been definitively tested on NIS Elements 4.30.02 Build 1053. Support for older versions is being actively worked on. `nd2reader` is a pure-Python package that reads images produced by NIS Elements 4.0+. It has only been definitively tested on NIS Elements 4.30.02 Build 1053. Support for older versions is being actively worked on.
The reader is written in the [pims](https://github.com/soft-matter/pims) framework, enabling easy access to multidimensional files, lazy slicing, and nice display in IPython. The reader is written in the [pims](https://github.com/soft-matter/pims) framework, enabling easy access to multidimensional files, lazy slicing, and nice display in IPython.
### Documentation
The documentation is available [here](http://www.lighthacking.nl/nd2reader/).
### Installation ### Installation
The package is available on PyPi. Install it using:
```
pip install nd2reader
```
If you don't already have the packages `numpy`, `pims`, `six` and `xmltodict`, they will be installed automatically if you use the `setup.py` script. If you don't already have the packages `numpy`, `pims`, `six` and `xmltodict`, they will be installed automatically if you use the `setup.py` script.
`nd2reader` is an order of magnitude faster in Python 3. I recommend using it unless you have no other choice. Python 2.7 and Python >= 3.4 are supported.
Python >= 3.5 are supported.
#### Installation via Conda Forge
Installing `nd2reader` from the `conda-forge` channel can be achieved by adding `conda-forge` to your channels with:
```
conda config --add channels conda-forge
```
Once the `conda-forge` channel has been enabled, `nd2reader` can be installed with:
```
conda install nd2reader
```
It is possible to list all of the versions of `nd2reader` available on your platform with:
```
conda search nd2reader --channel conda-forge
```
### ND2s ### ND2s
`nd2reader` follows the [pims](https://github.com/soft-matter/pims) framework. To open a file:
`nd2reader` follows the [pims](https://github.com/soft-matter/pims) framework. To open a file and show the first frame:
```python ```python
from nd2reader import ND2Reader from nd2reader import ND2Reader
images = ND2Reader('my_directory/example.nd2')
import matplotlib.pyplot as plt
with ND2Reader('my_directory/example.nd2') as images:
plt.imshow(images[0])
``` ```
After opening the file, all `pims` features are supported. Please refer to the [documentation](http://soft-matter.github.io/pims/).
After opening the file, all `pims` features are supported. Please refer to the [pims documentation](http://soft-matter.github.io/pims/).
#### Backwards compatibility
Older versions of `nd2reader` do not use the `pims` framework. To provide backwards compatibility, a legacy [Nd2](http://www.lighthacking.nl/nd2reader/nd2reader.html#module-nd2reader.legacy) class is provided.
### Contributing ### Contributing


+ 1
- 0
docs

@ -0,0 +1 @@
Subproject commit f700c239f8f9d7d1f99a3c10d9f67e2b3b8ef307

+ 3
- 1
nd2reader/__init__.py View File

@ -1,3 +1,5 @@
from os import path
from nd2reader.reader import ND2Reader from nd2reader.reader import ND2Reader
from nd2reader.legacy import Nd2
__version__ = '2.1.3'
__version__ = '3.2.3'

+ 280
- 0
nd2reader/artificial.py View File

@ -0,0 +1,280 @@
"""Functions to create artificial nd2 data for testing purposes
"""
import six
import numpy as np
import struct
from nd2reader.common import check_or_make_dir
from os import path
global_labels = ['image_attributes', 'image_text_info', 'image_metadata',
'image_metadata_sequence', 'image_calibration', 'x_data',
'y_data', 'z_data', 'roi_metadata', 'pfs_status', 'pfs_offset',
'guid', 'description', 'camera_exposure_time', 'camera_temp',
'acquisition_times', 'acquisition_times_2',
'acquisition_frames', 'lut_data', 'grabber_settings',
'custom_data', 'app_info', 'image_frame_0']
global_file_labels = ["ImageAttributesLV!", "ImageTextInfoLV!",
"ImageMetadataLV!", "ImageMetadataSeqLV|0!",
"ImageCalibrationLV|0!", "CustomData|X!", "CustomData|Y!",
"CustomData|Z!", "CustomData|RoiMetadata_v1!",
"CustomData|PFS_STATUS!", "CustomData|PFS_OFFSET!",
"CustomData|GUIDStore!", "CustomData|CustomDescriptionV1_0!",
"CustomData|Camera_ExposureTime1!", "CustomData|CameraTemp1!",
"CustomData|AcqTimesCache!", "CustomData|AcqTimes2Cache!",
"CustomData|AcqFramesCache!", "CustomDataVar|LUTDataV1_0!",
"CustomDataVar|GrabberCameraSettingsV1_0!",
"CustomDataVar|CustomDataV2_0!", "CustomDataVar|AppInfo_V1_0!",
"ImageDataSeq|0!"]
class ArtificialND2(object):
"""Artificial ND2 class (for testing purposes)
"""
header = 0xabeceda
relative_offset = 0
data_types = {'unsigned_char': 1,
'unsigned_int': 2,
'unsigned_int_2': 3,
'unsigned_long': 5,
'double': 6,
'string': 8,
'char_array': 9,
'metadata_item': 11,
}
def __init__(self, file, version=(3, 0), skip_blocks=None):
self.version = version
self.raw_text, self.locations, self.data = b'', None, None
check_or_make_dir(path.dirname(file))
self._fh = open(file, 'w+b', 0)
self.write_file(skip_blocks)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
@property
def file_handle(self):
"""The file handle to the binary file
Returns:
file: the file handle
"""
return self._fh
def close(self):
"""Correctly close the file handle
"""
if self._fh is not None:
self._fh.close()
def write_file(self, skip_blocks=None):
if skip_blocks is None:
skip_blocks = []
if 'version' not in skip_blocks:
# write version header at start of the file
self.write_version()
if 'label_map' not in skip_blocks:
# write label map + data in the center
self.locations, self.data = self.write_label_map()
if 'label_map_marker' not in skip_blocks:
# write start position of label map at the end of the file
self.write_label_map_info()
# write all to file
self._fh.write(self.raw_text)
def write_version(self):
"""Write file header
"""
# write 16 empty bytes
self.raw_text += bytearray(16)
# write version info
self.raw_text += self._get_version_string()
def write_label_map_info(self):
"""Write the location of the start of the label map at the end of the file
"""
location = self._get_version_byte_length()
self.raw_text += struct.pack("Q", location)
def _get_version_string(self):
return six.b('ND2 FILE SIGNATURE CHUNK NAME01!Ver%s.%s' % self.version)
def _get_version_byte_length(self):
return 16 + len(self._get_version_string())
def write_label_map(self):
raw_text, locations, data = self.create_label_map_bytes()
self.raw_text += raw_text
return locations, data
def create_label_map_bytes(self):
"""Construct a binary label map
Returns:
tuple: (binary data, dictionary data)
"""
raw_text = six.b('')
labels = global_labels
file_labels = global_file_labels
file_data, file_data_dict = self._get_file_data(labels)
locations = {}
# generate random positions and lengths
version_length = self._get_version_byte_length()
# calculate data length
label_length = np.sum([len(six.b(l)) + 16 for l in file_labels])
# write label map
cur_pos = version_length + label_length
for label, file_label, data in zip(labels, file_labels, file_data):
raw_text += six.b(file_label)
data_length = len(data)
raw_text += struct.pack('QQ', cur_pos, data_length)
locations[label] = (cur_pos, data_length)
cur_pos += data_length
# write data
raw_text += six.b('').join(file_data)
return raw_text, locations, file_data_dict
def _pack_data_with_metadata(self, data):
packed_data = self._pack_raw_data_with_metadata(data)
raw_data = struct.pack("IIQ", self.header, self.relative_offset, len(packed_data))
raw_data += packed_data
return raw_data
def _pack_raw_data_with_metadata(self, data):
raw_data = b''
if isinstance(data, dict):
raw_data = self._pack_dict_with_metadata(data)
elif isinstance(data, int):
raw_data = struct.pack('I', data)
elif isinstance(data, float):
raw_data = struct.pack('d', data)
elif isinstance(data, str):
raw_data = self._str_to_padded_bytes(data)
return raw_data
def _get_data_type(self, data):
if isinstance(data, dict):
return self.data_types['metadata_item']
elif isinstance(data, int):
return self.data_types['unsigned_int']
elif isinstance(data, str):
return self.data_types['string']
else:
return self.data_types['double']
@staticmethod
def _str_to_padded_bytes(data):
return six.b('').join([struct.pack('cx', six.b(s)) for s in data]) + struct.pack('xx')
def _pack_dict_with_metadata(self, data):
raw_data = b''
for data_key in data.keys():
# names have always one character extra and are padded in zero bytes???
b_data_key = self._str_to_padded_bytes(data_key)
# header consists of data type and length of key name, it is represented by 2 unsigned chars
raw_data += struct.pack('BB', self._get_data_type(data[data_key]), len(data_key) + 1)
raw_data += b_data_key
sub_data = self._pack_raw_data_with_metadata(data[data_key])
if isinstance(data[data_key], dict):
# Pack: the number of keys and the length of raw data until now, sub data
# and the 12 bytes that we add now
raw_data += struct.pack("<IQ", len(data[data_key].keys()), len(sub_data) + len(raw_data) + 12)
raw_data += sub_data
if isinstance(data[data_key], dict):
# apparently there is also a huge empty space
raw_data += b''.join([struct.pack('x')] * len(data[data_key].keys()) * 8)
return raw_data
@staticmethod
def _get_slx_img_attrib():
return {'uiWidth': 128,
'uiWidthBytes': 256,
'uiHeight': 128,
'uiComp': 1,
'uiBpcInMemory': 16,
'uiBpcSignificant': 12,
'uiSequenceCount': 70,
'uiTileWidth': 128,
'uiTileHeight': 128,
'eCompression': 2,
'dCompressionParam': -1.0,
'ePixelType': 1,
'uiVirtualComponents': 1
}
@staticmethod
def _get_slx_picture_metadata():
return {'sPicturePlanes':
{
'sPlaneNew': {
# channels are numbered a0, a1, ..., aN
'a0': {
'sDescription': 'TRITC'
}
}
}
}
def _get_file_data(self, labels):
file_data = [
{'SLxImageAttributes': self._get_slx_img_attrib()}, # ImageAttributesLV!",
7, # ImageTextInfoLV!",
7, # ImageMetadataLV!",
{'SLxPictureMetadata': self._get_slx_picture_metadata()}, # ImageMetadataSeqLV|0!",
7, # ImageCalibrationLV|0!",
7, # CustomData|X!",
7, # CustomData|Y!",
7, # CustomData|Z!",
7, # CustomData|RoiMetadata_v1!",
7, # CustomData|PFS_STATUS!",
7, # CustomData|PFS_OFFSET!",
7, # CustomData|GUIDStore!",
7, # CustomData|CustomDescriptionV1_0!",
7, # CustomData|Camera_ExposureTime1!",
7, # CustomData|CameraTemp1!",
[0], # CustomData|AcqTimesCache!",
[0], # CustomData|AcqTimes2Cache!",
[0], # CustomData|AcqFramesCache!",
7, # CustomDataVar|LUTDataV1_0!",
7, # CustomDataVar|GrabberCameraSettingsV1_0!",
7, # CustomDataVar|CustomDataV2_0!",
7, # CustomDataVar|AppInfo_V1_0!",
7, # ImageDataSeq|0!"
]
file_data_dict = {l: d for l, d in zip(labels, file_data)}
# convert to bytes
file_data = [self._pack_data_with_metadata(d) for d in file_data]
return file_data, file_data_dict

+ 182
- 39
nd2reader/common.py View File

@ -1,3 +1,4 @@
import os
import struct import struct
import array import array
from datetime import datetime from datetime import datetime
@ -7,11 +8,13 @@ from nd2reader.exceptions import InvalidVersionError
def get_version(fh): def get_version(fh):
"""
Determines what version the ND2 is.
"""Determines what version the ND2 is.
Args:
fh: File handle of the .nd2 file
:param fh: an open file handle to the ND2
:type fh: file
Returns:
tuple: Major and minor version
""" """
# the first 16 bytes seem to have no meaning, so we skip them # the first 16 bytes seem to have no meaning, so we skip them
@ -23,11 +26,13 @@ def get_version(fh):
def parse_version(data): def parse_version(data):
"""
Parses a string with the version data in it.
"""Parses a string with the version data in it.
:param data: the 19th through 54th byte of the ND2, representing the version
:type data: unicode
Args:
data (unicode): the 19th through 54th byte of the ND2, representing the version
Returns:
tuple: Major and minor version
""" """
match = re.search(r"""^ND2 FILE SIGNATURE CHUNK NAME01!Ver(?P<major>\d)\.(?P<minor>\d)$""", data) match = re.search(r"""^ND2 FILE SIGNATURE CHUNK NAME01!Ver(?P<major>\d)\.(?P<minor>\d)$""", data)
@ -40,17 +45,17 @@ def parse_version(data):
def read_chunk(fh, chunk_location): def read_chunk(fh, chunk_location):
"""
Reads a piece of data given the location of its pointer.
"""Reads a piece of data given the location of its pointer.
:param fh: an open file handle to the ND2
:param chunk_location: a pointer
:type chunk_location: int
Args:
fh: an open file handle to the ND2
chunk_location (int): location to read
:rtype: bytes
Returns:
bytes: the data at the chunk location
""" """
if chunk_location is None:
if chunk_location is None or fh is None:
return None return None
fh.seek(chunk_location) fh.seek(chunk_location)
# The chunk metadata is always 16 bytes long # The chunk metadata is always 16 bytes long
@ -65,6 +70,17 @@ def read_chunk(fh, chunk_location):
def read_array(fh, kind, chunk_location): def read_array(fh, kind, chunk_location):
"""
Args:
fh: File handle of the nd2 file
kind: data type, can be one of 'double', 'int' or 'float'
chunk_location: the location of the array chunk in the binary nd2 file
Returns:
array.array: an array of the data
"""
kinds = {'double': 'd', kinds = {'double': 'd',
'int': 'i', 'int': 'i',
'float': 'f'} 'float': 'f'}
@ -77,30 +93,93 @@ def read_array(fh, kind, chunk_location):
def _parse_unsigned_char(data): def _parse_unsigned_char(data):
"""
Args:
data: binary data
Returns:
char: the data converted to unsigned char
"""
return struct.unpack("B", data.read(1))[0] return struct.unpack("B", data.read(1))[0]
def _parse_unsigned_int(data): def _parse_unsigned_int(data):
"""
Args:
data: binary data
Returns:
int: the data converted to unsigned int
"""
return struct.unpack("I", data.read(4))[0] return struct.unpack("I", data.read(4))[0]
def _parse_unsigned_long(data): def _parse_unsigned_long(data):
"""
Args:
data: binary data
Returns:
long: the data converted to unsigned long
"""
return struct.unpack("Q", data.read(8))[0] return struct.unpack("Q", data.read(8))[0]
def _parse_double(data): def _parse_double(data):
"""
Args:
data: binary data
Returns:
double: the data converted to double
"""
return struct.unpack("d", data.read(8))[0] return struct.unpack("d", data.read(8))[0]
def _parse_string(data): def _parse_string(data):
"""
Args:
data: binary data
Returns:
string: the data converted to string
"""
value = data.read(2) value = data.read(2)
# the string ends at the first instance of \x00\x00
while not value.endswith(six.b("\x00\x00")): while not value.endswith(six.b("\x00\x00")):
# the string ends at the first instance of \x00\x00
value += data.read(2)
return value.decode("utf16")[:-1].encode("utf8")
next_data = data.read(2)
if len(next_data) == 0:
break
value += next_data
try:
decoded = value.decode("utf16")[:-1].encode("utf8")
except UnicodeDecodeError:
decoded = value.decode('utf8').encode("utf8")
return decoded
def _parse_char_array(data): def _parse_char_array(data):
"""
Args:
data: binary data
Returns:
array.array: the data converted to an array
"""
array_length = struct.unpack("Q", data.read(8))[0] array_length = struct.unpack("Q", data.read(8))[0]
return array.array("B", data.read(array_length)) return array.array("B", data.read(array_length))
@ -109,40 +188,60 @@ def parse_date(text_info):
""" """
The date and time when acquisition began. The date and time when acquisition began.
:rtype: datetime.datetime() or None
Args:
text_info: the text that contains the date and time information
Returns:
datetime: the date and time of the acquisition
""" """
for line in text_info.values(): for line in text_info.values():
line = line.decode("utf8") line = line.decode("utf8")
# ND2s seem to randomly switch between 12- and 24-hour representations. # ND2s seem to randomly switch between 12- and 24-hour representations.
try:
absolute_start = datetime.strptime(line, "%m/%d/%Y %H:%M:%S")
except (TypeError, ValueError):
possible_formats = ["%m/%d/%Y %H:%M:%S", "%m/%d/%Y %I:%M:%S %p", "%d/%m/%Y %H:%M:%S"]
for date_format in possible_formats:
try: try:
absolute_start = datetime.strptime(line, "%m/%d/%Y %I:%M:%S %p")
absolute_start = datetime.strptime(line, date_format)
except (TypeError, ValueError): except (TypeError, ValueError):
absolute_start = None
continue
return absolute_start
return absolute_start
return None
def _parse_metadata_item(data, cursor_position): def _parse_metadata_item(data, cursor_position):
"""
Reads hierarchical data, analogous to a Python dict.
"""Reads hierarchical data, analogous to a Python dict.
Args:
data: the binary data that needs to be parsed
cursor_position: the position in the binary nd2 file
Returns:
dict: a dictionary containing the metadata item
""" """
new_count, length = struct.unpack("<IQ", data.read(12)) new_count, length = struct.unpack("<IQ", data.read(12))
length -= data.tell() - cursor_position length -= data.tell() - cursor_position
next_data_length = data.read(length) next_data_length = data.read(length)
value = read_metadata(next_data_length, new_count) value = read_metadata(next_data_length, new_count)
# Skip some offsets # Skip some offsets
data.read(new_count * 8) data.read(new_count * 8)
return value return value
def _get_value(data, data_type, cursor_position): def _get_value(data, data_type, cursor_position):
"""
ND2s use various codes to indicate different data types, which we translate here.
"""ND2s use various codes to indicate different data types, which we translate here.
Args:
data: the binary data
data_type: the data type (unsigned char = 1, unsigned int = 2 or 3, unsigned long = 5, double = 6, string = 8,
char array = 9, metadata item = 11)
cursor_position: the cursor position in the binary nd2 file
Returns:
mixed: the parsed value
""" """
parser = {1: _parse_unsigned_char, parser = {1: _parse_unsigned_char,
@ -153,12 +252,24 @@ def _get_value(data, data_type, cursor_position):
8: _parse_string, 8: _parse_string,
9: _parse_char_array, 9: _parse_char_array,
11: _parse_metadata_item} 11: _parse_metadata_item}
return parser[data_type](data) if data_type < 11 else parser[data_type](data, cursor_position)
try:
value = parser[data_type](data) if data_type < 11 else parser[data_type](data, cursor_position)
except (KeyError, struct.error):
value = None
return value
def read_metadata(data, count): def read_metadata(data, count):
""" """
Iterates over each element some section of the metadata and parses it.
Iterates over each element of some section of the metadata and parses it.
Args:
data: the metadata in binary form
count: the number of metadata elements
Returns:
dict: a dictionary containing the parsed metadata
""" """
if data is None: if data is None:
@ -175,10 +286,7 @@ def read_metadata(data, count):
# We've reached the end of some hierarchy of data # We've reached the end of some hierarchy of data
break break
if six.PY3:
header = header.decode("utf8")
data_type, name_length = map(ord, header)
data_type, name_length = struct.unpack('BB', header)
name = data.read(name_length * 2).decode("utf16")[:-1].encode("utf8") name = data.read(name_length * 2).decode("utf16")[:-1].encode("utf8")
value = _get_value(data, data_type, cursor_position) value = _get_value(data, data_type, cursor_position)
@ -190,10 +298,15 @@ def read_metadata(data, count):
def _add_to_metadata(metadata, name, value): def _add_to_metadata(metadata, name, value):
""" """
Add the name value pair to the metadata dict Add the name value pair to the metadata dict
:param metadata:
:param name:
:param value:
:return:
Args:
metadata (dict): a dictionary containing the metadata
name (string): the dictionary key
value: the value to add
Returns:
dict: the new metadata dictionary
""" """
if name not in metadata.keys(): if name not in metadata.keys():
metadata[name] = value metadata[name] = value
@ -208,3 +321,33 @@ def _add_to_metadata(metadata, name, value):
metadata[name].append(value) metadata[name].append(value)
return metadata return metadata
def get_from_dict_if_exists(key, dictionary, convert_key_to_binary=True):
"""
Get the entry from the dictionary if it exists
Args:
key: key to lookup
dictionary: dictionary to look in
convert_key_to_binary: convert the key from string to binary if true
Returns:
the value of dictionary[key] or None
"""
if convert_key_to_binary:
key = six.b(key)
if key not in dictionary:
return None
return dictionary[key]
def check_or_make_dir(directory):
"""
Check if a directory exists, if not, create it
Args:
directory: the path to the directory
"""
if not os.path.exists(directory):
os.makedirs(directory)

+ 111
- 0
nd2reader/common_raw_metadata.py View File

@ -0,0 +1,111 @@
import six
import warnings
from nd2reader.common import get_from_dict_if_exists
def parse_if_not_none(to_check, callback):
if to_check is not None:
return callback()
return None
def parse_dimension_text_line(line):
if six.b("Dimensions:") in line:
entries = line.split(six.b("\r\n"))
for entry in entries:
if entry.startswith(six.b("Dimensions:")):
return entry
return None
def parse_roi_shape(shape):
if shape == 3:
return 'rectangle'
elif shape == 9:
return 'circle'
return None
def parse_roi_type(type_no):
if type_no == 4:
return 'stimulation'
elif type_no == 3:
return 'reference'
elif type_no == 2:
return 'background'
return None
def get_loops_from_data(loop_data):
# special ND experiment
if six.b('pPeriod') not in loop_data:
return []
if six.b('uiPeriodCount') in loop_data and loop_data[six.b('uiPeriodCount')] > 0:
loops = []
for i, period in enumerate(loop_data[six.b('pPeriod')]):
# exclude invalid periods
if six.b('pPeriodValid') in loop_data:
try:
if loop_data[six.b('pPeriodValid')][i] == 1:
loops.append(loop_data[six.b('pPeriod')][period])
except IndexError:
continue
else:
# we can't be sure, append all
loops.append(loop_data[six.b('pPeriod')][period])
return [loop_data]
def guess_sampling_from_loops(duration, loop):
""" In some cases, both keys are not saved. Then try to calculate it.
Args:
duration: the total duration of the loop
loop: the raw loop data
Returns:
float: the guessed sampling interval in milliseconds
"""
number_of_loops = get_from_dict_if_exists('uiCount', loop)
number_of_loops = number_of_loops if number_of_loops is not None and number_of_loops > 0 else 1
interval = duration / number_of_loops
return interval
def determine_sampling_interval(duration, loop):
"""Determines the loop sampling interval in milliseconds
Args:
duration: loop duration in milliseconds
loop: loop dictionary
Returns:
float: the sampling interval in milliseconds
"""
interval = get_from_dict_if_exists('dPeriod', loop)
avg_interval = get_from_dict_if_exists('dAvgPeriodDiff', loop)
if interval is None or interval <= 0:
interval = avg_interval
else:
avg_interval_set = avg_interval is not None and avg_interval > 0
if round(avg_interval) != round(interval) and avg_interval_set:
message = ("Reported average frame interval (%.1f ms) doesn't"
" match the set interval (%.1f ms). Using the average"
" now.")
warnings.warn(message % (avg_interval, interval), RuntimeWarning)
interval = avg_interval
if interval is None or interval <= 0:
# In some cases, both keys are not saved. Then try to calculate it.
interval = guess_sampling_from_loops(duration, loop)
return interval

+ 13
- 7
nd2reader/exceptions.py View File

@ -1,15 +1,21 @@
class InvalidVersionError(Exception):
"""
We don't know how to parse the version of ND2 that we were given.
class InvalidFileType(Exception):
"""Non .nd2 extension file.
File does not have an extension .nd2.
""" """
pass pass
class InvalidVersionError(Exception):
"""Unknown version.
We don't know how to parse the version of ND2 that we were given.
class NoImageError(Exception):
""" """
Some apparent images in ND2s are just completely blank placeholders. These are used when the number of images per
cycle are unequal (e.g. if you take fluorescent images every 2 minutes, and bright field images every minute).
pass
class EmptyFileError(Exception):
"""This .nd2 file seems to be empty.
Raised if no axes are found in the file.
""" """
pass

+ 151
- 6
nd2reader/label_map.py View File

@ -4,17 +4,14 @@ import re
class LabelMap(object): class LabelMap(object):
"""
Contains pointers to metadata. This might only be valid for V3 files.
"""Contains pointers to metadata. This might only be valid for V3 files.
""" """
def __init__(self, raw_binary_data): def __init__(self, raw_binary_data):
self._data = raw_binary_data self._data = raw_binary_data
self._image_data = {} self._image_data = {}
def image_attributes(self):
return self._get_location(six.b("ImageAttributesLV!"))
def _get_location(self, label): def _get_location(self, label):
try: try:
label_location = self._data.index(label) + len(label) label_location = self._data.index(label) + len(label)
@ -28,18 +25,52 @@ class LabelMap(object):
@property @property
def image_text_info(self): def image_text_info(self):
"""Get the location of the textual image information
Returns:
int: The location of the textual image information
"""
return self._get_location(six.b("ImageTextInfoLV!")) return self._get_location(six.b("ImageTextInfoLV!"))
@property @property
def image_metadata(self): def image_metadata(self):
"""Get the location of the image metadata
Returns:
int: The location of the image metadata
"""
return self._get_location(six.b("ImageMetadataLV!")) return self._get_location(six.b("ImageMetadataLV!"))
@property
def image_events(self):
"""Get the location of the image events
Returns:
int: The location of the image events
"""
return self._get_location(six.b("ImageEventsLV!"))
@property @property
def image_metadata_sequence(self): def image_metadata_sequence(self):
# there is always only one of these, even though it has a pipe followed by a zero, which is how they do indexes
"""Get the location of the image metadata sequence. There is always only one of these, even though it has a pipe
followed by a zero, which is how they do indexes.
Returns:
int: The location of the image metadata sequence
"""
return self._get_location(six.b("ImageMetadataSeqLV|0!")) return self._get_location(six.b("ImageMetadataSeqLV|0!"))
def get_image_data_location(self, index): def get_image_data_location(self, index):
"""Get the location of the image data
Returns:
int: The location of the image data
"""
if not self._image_data: if not self._image_data:
regex = re.compile(six.b("""ImageDataSeq\|(\d+)!""")) regex = re.compile(six.b("""ImageDataSeq\|(\d+)!"""))
for match in regex.finditer(self._data): for match in regex.finditer(self._data):
@ -50,76 +81,190 @@ class LabelMap(object):
@property @property
def image_calibration(self): def image_calibration(self):
"""Get the location of the image calibration
Returns:
int: The location of the image calibration
"""
return self._get_location(six.b("ImageCalibrationLV|0!")) return self._get_location(six.b("ImageCalibrationLV|0!"))
@property @property
def image_attributes(self): def image_attributes(self):
"""Get the location of the image attributes
Returns:
int: The location of the image attributes
"""
return self._get_location(six.b("ImageAttributesLV!")) return self._get_location(six.b("ImageAttributesLV!"))
@property @property
def x_data(self): def x_data(self):
"""Get the location of the custom x data
Returns:
int: The location of the custom x data
"""
return self._get_location(six.b("CustomData|X!")) return self._get_location(six.b("CustomData|X!"))
@property @property
def y_data(self): def y_data(self):
"""Get the location of the custom y data
Returns:
int: The location of the custom y data
"""
return self._get_location(six.b("CustomData|Y!")) return self._get_location(six.b("CustomData|Y!"))
@property @property
def z_data(self): def z_data(self):
"""Get the location of the custom z data
Returns:
int: The location of the custom z data
"""
return self._get_location(six.b("CustomData|Z!")) return self._get_location(six.b("CustomData|Z!"))
@property @property
def roi_metadata(self): def roi_metadata(self):
"""Information about any regions of interest (ROIs) defined in the nd2 file
Returns:
int: The location of the regions of interest (ROIs)
"""
return self._get_location(six.b("CustomData|RoiMetadata_v1!")) return self._get_location(six.b("CustomData|RoiMetadata_v1!"))
@property @property
def pfs_status(self): def pfs_status(self):
"""Get the location of the perfect focus system (PFS) status
Returns:
int: The location of the perfect focus system (PFS) status
"""
return self._get_location(six.b("CustomData|PFS_STATUS!")) return self._get_location(six.b("CustomData|PFS_STATUS!"))
@property @property
def pfs_offset(self): def pfs_offset(self):
"""Get the location of the perfect focus system (PFS) offset
Returns:
int: The location of the perfect focus system (PFS) offset
"""
return self._get_location(six.b("CustomData|PFS_OFFSET!")) return self._get_location(six.b("CustomData|PFS_OFFSET!"))
@property @property
def guid(self): def guid(self):
"""Get the location of the image guid
Returns:
int: The location of the image guid
"""
return self._get_location(six.b("CustomData|GUIDStore!")) return self._get_location(six.b("CustomData|GUIDStore!"))
@property @property
def description(self): def description(self):
"""Get the location of the image description
Returns:
int: The location of the image description
"""
return self._get_location(six.b("CustomData|CustomDescriptionV1_0!")) return self._get_location(six.b("CustomData|CustomDescriptionV1_0!"))
@property @property
def camera_exposure_time(self): def camera_exposure_time(self):
"""Get the location of the camera exposure time
Returns:
int: The location of the camera exposure time
"""
return self._get_location(six.b("CustomData|Camera_ExposureTime1!")) return self._get_location(six.b("CustomData|Camera_ExposureTime1!"))
@property @property
def camera_temp(self): def camera_temp(self):
"""Get the location of the camera temperature
Returns:
int: The location of the camera temperature
"""
return self._get_location(six.b("CustomData|CameraTemp1!")) return self._get_location(six.b("CustomData|CameraTemp1!"))
@property @property
def acquisition_times(self): def acquisition_times(self):
"""Get the location of the acquisition times, block 1
Returns:
int: The location of the acquisition times, block 1
"""
return self._get_location(six.b("CustomData|AcqTimesCache!")) return self._get_location(six.b("CustomData|AcqTimesCache!"))
@property @property
def acquisition_times_2(self): def acquisition_times_2(self):
"""Get the location of the acquisition times, block 2
Returns:
int: The location of the acquisition times, block 2
"""
return self._get_location(six.b("CustomData|AcqTimes2Cache!")) return self._get_location(six.b("CustomData|AcqTimes2Cache!"))
@property @property
def acquisition_frames(self): def acquisition_frames(self):
"""Get the location of the acquisition frames
Returns:
int: The location of the acquisition frames
"""
return self._get_location(six.b("CustomData|AcqFramesCache!")) return self._get_location(six.b("CustomData|AcqFramesCache!"))
@property @property
def lut_data(self): def lut_data(self):
"""Get the location of the LUT data
Returns:
int: The location of the LUT data
"""
return self._get_location(six.b("CustomDataVar|LUTDataV1_0!")) return self._get_location(six.b("CustomDataVar|LUTDataV1_0!"))
@property @property
def grabber_settings(self): def grabber_settings(self):
"""Get the location of the grabber settings
Returns:
int: The location of the grabber settings
"""
return self._get_location(six.b("CustomDataVar|GrabberCameraSettingsV1_0!")) return self._get_location(six.b("CustomDataVar|GrabberCameraSettingsV1_0!"))
@property @property
def custom_data(self): def custom_data(self):
"""Get the location of the custom user data
Returns:
int: The location of the custom user data
"""
return self._get_location(six.b("CustomDataVar|CustomDataV2_0!")) return self._get_location(six.b("CustomDataVar|CustomDataV2_0!"))
@property @property
def app_info(self): def app_info(self):
"""Get the location of the application info metadata
Returns:
int: The location of the application info metadata
"""
return self._get_location(six.b("CustomDataVar|AppInfo_V1_0!")) return self._get_location(six.b("CustomDataVar|AppInfo_V1_0!"))

+ 167
- 0
nd2reader/legacy.py View File

@ -0,0 +1,167 @@
"""
Legacy class for backwards compatibility
"""
import warnings
from nd2reader import ND2Reader
class Nd2(object):
""" Warning: this module is deprecated and only maintained for backwards compatibility with the non-PIMS version of
nd2reader.
"""
def __init__(self, filename):
warnings.warn(
"The 'Nd2' class is deprecated, please consider using the new ND2Reader interface which uses pims.",
DeprecationWarning)
self.reader = ND2Reader(filename)
def __repr__(self):
return "\n".join(["<Deprecated ND2 %s>" % self.reader.filename,
"Created: %s" % (self.date if self.date is not None else "Unknown"),
"Image size: %sx%s (HxW)" % (self.height, self.width),
"Frames: %s" % len(self.frames),
"Channels: %s" % ", ".join(["%s" % str(channel) for channel in self.channels]),
"Fields of View: %s" % len(self.fields_of_view),
"Z-Levels: %s" % len(self.z_levels)
])
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.reader is not None:
self.reader.close()
def __len__(self):
return len(self.reader)
def __getitem__(self, item):
return self.reader[item]
def select(self, fields_of_view=None, channels=None, z_levels=None, start=0, stop=None):
"""Select images based on criteria.
Args:
fields_of_view: the fields of view
channels: the color channels
z_levels: the z levels
start: the starting frame
stop: the last frame
Returns:
ND2Reader: Sliced ND2Reader which contains the frames
"""
if stop is None:
stop = len(self.frames)
return self.reader[start:stop]
def get_image(self, frame_number, field_of_view, channel_name, z_level):
"""Deprecated. Returns the specified image from the ND2Reader class.
Args:
frame_number: the frame number
field_of_view: the field of view number
channel_name: the name of the color channel
z_level: the z level number
Returns:
Frame: the specified image
"""
return self.reader.parser.get_image_by_attributes(frame_number, field_of_view, channel_name, z_level,
self.height, self.width)
def close(self):
"""Closes the ND2Reader
"""
if self.reader is not None:
self.reader.close()
@property
def height(self):
"""Deprecated. Fetches the height of the image.
Returns:
int: the pixel height of the image
"""
return self._get_width_or_height("height")
@property
def width(self):
"""Deprecated. Fetches the width of the image.
Returns:
int: the pixel width of the image
"""
return self._get_width_or_height("width")
def _get_width_or_height(self, key):
return self.reader.metadata[key] if self.reader.metadata[key] is not None else 0
@property
def z_levels(self):
"""Deprecated. Fetches the available z levels.
Returns:
list: z levels.
"""
return self.reader.metadata["z_levels"]
@property
def fields_of_view(self):
"""Deprecated. Fetches the fields of view.
Returns:
list: fields of view.
"""
return self.reader.metadata["fields_of_view"]
@property
def channels(self):
"""Deprecated. Fetches all color channels.
Returns:
list: the color channels.
"""
return self.reader.metadata["channels"]
@property
def frames(self):
"""Deprecated. Fetches all frames.
Returns:
list: list of frames
"""
return self.reader.metadata["frames"]
@property
def date(self):
"""Deprecated. Fetches the acquisition date.
Returns:
string: the date
"""
return self.reader.metadata["date"]
@property
def pixel_microns(self):
"""Deprecated. Fetches the amount of microns per pixel.
Returns:
float: microns per pixel
"""
return self.reader.metadata["pixel_microns"]

+ 230
- 82
nd2reader/parser.py View File

@ -3,17 +3,20 @@ import struct
import array import array
import six import six
from pims import Frame
import warnings
from pims.base_frames import Frame
import numpy as np import numpy as np
from nd2reader.common import get_version, read_chunk from nd2reader.common import get_version, read_chunk
from nd2reader.exceptions import InvalidVersionError, NoImageError
from nd2reader.label_map import LabelMap from nd2reader.label_map import LabelMap
from nd2reader.raw_metadata import RawMetadata from nd2reader.raw_metadata import RawMetadata
from nd2reader import stitched
class Parser(object): class Parser(object):
""" Parses ND2 files and creates a Metadata and driver object. """
"""Parses ND2 files and creates a Metadata and driver object.
"""
CHUNK_HEADER = 0xabeceda CHUNK_HEADER = 0xabeceda
CHUNK_MAP_START = six.b("ND2 FILEMAP SIGNATURE NAME 0001!") CHUNK_MAP_START = six.b("ND2 FILEMAP SIGNATURE NAME 0001!")
CHUNK_MAP_END = six.b("ND2 CHUNK MAP SIGNATURE 0000001!") CHUNK_MAP_END = six.b("ND2 CHUNK MAP SIGNATURE 0000001!")
@ -21,26 +24,26 @@ class Parser(object):
supported_file_versions = {(3, None): True} supported_file_versions = {(3, None): True}
def __init__(self, fh): def __init__(self, fh):
"""
:type fh: file
"""
self._fh = fh self._fh = fh
self._label_map = None self._label_map = None
self._raw_metadata = None self._raw_metadata = None
self.metadata = None self.metadata = None
# First check the file version # First check the file version
self._check_version_supported()
self.supported = self._check_version_supported()
# Parse the metadata # Parse the metadata
self._parse_metadata() self._parse_metadata()
def calculate_image_properties(self, index): def calculate_image_properties(self, index):
"""
Calculate FOV, channels and z_levels
:param index:
:return:
"""Calculate FOV, channels and z_levels
Args:
index(int): the index (which is simply the order in which the image was acquired)
Returns:
tuple: tuple of the field of view, the channel and the z level
""" """
field_of_view = self._calculate_field_of_view(index) field_of_view = self._calculate_field_of_view(index)
channel = self._calculate_channel(index) channel = self._calculate_channel(index)
@ -56,8 +59,11 @@ class Parser(object):
eliminate this possibility in future releases. For now, you'll need to check if your image is None if you're eliminate this possibility in future releases. For now, you'll need to check if your image is None if you're
doing anything out of the ordinary. doing anything out of the ordinary.
:type index: int
:rtype: Image or None
Args:
index(int): the index (which is simply the order in which the image was acquired)
Returns:
Frame: the image
""" """
field_of_view, channel, z_level = self.calculate_image_properties(index) field_of_view, channel, z_level = self.calculate_image_properties(index)
@ -67,66 +73,107 @@ class Parser(object):
try: try:
timestamp, image = self._get_raw_image_data(image_group_number, channel_offset, self.metadata["height"], timestamp, image = self._get_raw_image_data(image_group_number, channel_offset, self.metadata["height"],
self.metadata["width"]) self.metadata["width"])
except (TypeError, NoImageError):
except (TypeError):
return Frame([], frame_no=frame_number, metadata=self._get_frame_metadata()) return Frame([], frame_no=frame_number, metadata=self._get_frame_metadata())
else: else:
return image
return Frame(image, frame_no=frame_number, metadata=self._get_frame_metadata())
def get_slice_by_attributes(self, xywh, frame_number, field_of_view, channel, z_level, height, width):
"""Gets a rectangular slice of an image based on its attributes alone
Args:
xywh: tuples containing (x, y, w, h) values of the
rectangular region to load
frame_number: the frame number
field_of_view: the field of view
channel_name: the color channel name
z_level: the z level
height: the height of the image
width: the width of the image
Returns:
Frame: the requested image
def get_image_by_attributes(self, frame_number, field_of_view, channel_name, z_level, height, width):
""" """
Attempts to get Image based on attributes alone.
frame_number = 0 if frame_number is None else frame_number
field_of_view = 0 if field_of_view is None else field_of_view
channel = 0 if channel is None else channel
z_level = 0 if z_level is None else z_level
image_group_number = self._calculate_image_group_number(frame_number, field_of_view, z_level)
try:
timestamp, raw_image_data = self._get_raw_slice_data(
xywh, image_group_number, channel, height, width
)
except (TypeError):
return Frame([], frame_no=frame_number, metadata=self._get_frame_metadata())
else:
return Frame(raw_image_data, frame_no=frame_number, metadata=self._get_frame_metadata())
def get_image_by_attributes(self, frame_number, field_of_view, channel, z_level, height, width):
"""Gets an image based on its attributes alone
Args:
frame_number: the frame number
field_of_view: the field of view
channel_name: the color channel name
z_level: the z level
height: the height of the image
width: the width of the image
:type frame_number: int
:type field_of_view: int
:type channel_name: str
:type z_level: int
:type height: int
:type width: int
Returns:
Frame: the requested image
:rtype: Image or None
""" """
frame_number = 0 if frame_number is None else frame_number
field_of_view = 0 if field_of_view is None else field_of_view
channel = 0 if channel is None else channel
z_level = 0 if z_level is None else z_level
image_group_number = self._calculate_image_group_number(frame_number, field_of_view, z_level) image_group_number = self._calculate_image_group_number(frame_number, field_of_view, z_level)
try: try:
timestamp, raw_image_data = self._get_raw_image_data(image_group_number, self._channel_offset[channel_name],
timestamp, raw_image_data = self._get_raw_image_data(image_group_number, channel,
height, width) height, width)
except (TypeError, NoImageError):
except (TypeError):
return Frame([], frame_no=frame_number, metadata=self._get_frame_metadata()) return Frame([], frame_no=frame_number, metadata=self._get_frame_metadata())
else: else:
return raw_image_data
return Frame(raw_image_data, frame_no=frame_number, metadata=self._get_frame_metadata())
@staticmethod @staticmethod
def get_dtype_from_metadata(): def get_dtype_from_metadata():
"""
Determine the data type from the metadata.
"""Determine the data type from the metadata.
For now, always use float64 to prevent unexpected overflow errors when manipulating the data (calculating sums/ For now, always use float64 to prevent unexpected overflow errors when manipulating the data (calculating sums/
means/etc.) means/etc.)
:return:
""" """
return np.float64 return np.float64
def _check_version_supported(self): def _check_version_supported(self):
"""
Checks if the ND2 file version is supported by this reader.
:return:
"""Checks if the ND2 file version is supported by this reader.
Returns:
bool: True on supported
""" """
major_version, minor_version = get_version(self._fh) major_version, minor_version = get_version(self._fh)
supported = self.supported_file_versions.get((major_version, minor_version)) or \
self.supported_file_versions.get((major_version, None))
supported = self.supported_file_versions.get(
(major_version, minor_version)) or self.supported_file_versions.get((major_version, None))
if not supported: if not supported:
raise InvalidVersionError("No parser is available for that version.")
print("Warning: No parser is available for your current ND2 version (%d.%d). " % (
major_version, minor_version) + "This might lead to unexpected behaviour.")
return supported return supported
def _parse_metadata(self): def _parse_metadata(self):
"""
Reads all metadata and instantiates the Metadata object.
"""Reads all metadata and instantiates the Metadata object.
""" """
# Retrieve raw metadata from the label mapping # Retrieve raw metadata from the label mapping
self._label_map = self._build_label_map() self._label_map = self._build_label_map()
self._raw_metadata = RawMetadata(self._fh, self._label_map) self._raw_metadata = RawMetadata(self._fh, self._label_map)
self.metadata = self._raw_metadata.__dict__ self.metadata = self._raw_metadata.__dict__
self.acquisition_times = self._raw_metadata.acquisition_times
def _build_label_map(self): def _build_label_map(self):
""" """
@ -134,9 +181,11 @@ class Parser(object):
as some of the bytes contain the value 33, which is the ASCII code for "!". So we iteratively find each label, as some of the bytes contain the value 33, which is the ASCII code for "!". So we iteratively find each label,
grab the subsequent data (always 16 bytes long), advance to the next label and repeat. grab the subsequent data (always 16 bytes long), advance to the next label and repeat.
:rtype: LabelMap
Returns:
LabelMap: the computed label map
""" """
# go 8 bytes back from file end
self._fh.seek(-8, 2) self._fh.seek(-8, 2)
chunk_map_start_location = struct.unpack("Q", self._fh.read(8))[0] chunk_map_start_location = struct.unpack("Q", self._fh.read(8))[0]
self._fh.seek(chunk_map_start_location) self._fh.seek(chunk_map_start_location)
@ -144,33 +193,40 @@ class Parser(object):
return LabelMap(raw_text) return LabelMap(raw_text)
def _calculate_field_of_view(self, index): def _calculate_field_of_view(self, index):
"""
Determines what field of view was being imaged for a given image.
"""Determines what field of view was being imaged for a given image.
:type index: int
:rtype: int
Args:
index(int): the index (which is simply the order in which the image was acquired)
Returns:
int: the field of view
""" """
images_per_cycle = len(self.metadata["z_levels"]) * len(self.metadata["channels"]) images_per_cycle = len(self.metadata["z_levels"]) * len(self.metadata["channels"])
return int((index - (index % images_per_cycle)) / images_per_cycle) % len(self.metadata["fields_of_view"]) return int((index - (index % images_per_cycle)) / images_per_cycle) % len(self.metadata["fields_of_view"])
def _calculate_channel(self, index): def _calculate_channel(self, index):
"""
Determines what channel a particular image is.
"""Determines what channel a particular image is.
Args:
index(int): the index (which is simply the order in which the image was acquired)
:type index: int
:rtype: str
Returns:
string: the name of the color channel
""" """
return self.metadata["channels"][index % len(self.metadata["channels"])] return self.metadata["channels"][index % len(self.metadata["channels"])]
def _calculate_z_level(self, index): def _calculate_z_level(self, index):
"""
Determines the plane in the z-axis a given image was taken in. In the future, this will be replaced with the
actual offset in micrometers.
"""Determines the plane in the z-axis a given image was taken in.
In the future, this will be replaced with the actual offset in micrometers.
Args:
index(int): the index (which is simply the order in which the image was acquired)
Returns:
The z level
:type index: int
:rtype: int
""" """
return self.metadata["z_levels"][int( return self.metadata["z_levels"][int(
((index - (index % len(self.metadata["channels"]))) / len(self.metadata["channels"])) % len( ((index - (index % len(self.metadata["channels"]))) / len(self.metadata["channels"])) % len(
@ -180,29 +236,35 @@ class Parser(object):
""" """
Images are grouped together if they share the same time index, field of view, and z-level. Images are grouped together if they share the same time index, field of view, and z-level.
:type frame_number: int
:type fov: int
:type z_level: int
Args:
frame_number: the time index
fov: the field of view number
z_level: the z level number
:rtype: int
Returns:
int: the image group number
""" """
return frame_number * len(self.metadata["fields_of_view"]) * len(self.metadata["z_levels"]) + (
fov * len(self.metadata["z_levels"]) + z_level)
z_length = len(self.metadata['z_levels'])
z_length = z_length if z_length > 0 else 1
fields_of_view = len(self.metadata["fields_of_view"])
fields_of_view = fields_of_view if fields_of_view > 0 else 1
return frame_number * fields_of_view * z_length + (fov * z_length + z_level)
def _calculate_frame_number(self, image_group_number, field_of_view, z_level): def _calculate_frame_number(self, image_group_number, field_of_view, z_level):
""" """
Images are in the same frame if they share the same group number and field of view and are taken sequentially. Images are in the same frame if they share the same group number and field of view and are taken sequentially.
:type image_group_number: int
:type field_of_view: int
:type z_level: int
Args:
image_group_number: the image group number (see _calculate_image_group_number)
field_of_view: the field of view number
z_level: the z level number
:rtype: int
Returns:
""" """
return (image_group_number - (field_of_view * len(self.metadata["z_levels"]) + z_level)) / (
len(self.metadata["fields_of_view"]) * len(self.metadata["z_levels"]))
return (image_group_number - (field_of_view * len(self.metadata["z_levels"]) + z_level)) / (len(self.metadata["fields_of_view"]) * len(self.metadata["z_levels"]))
@property @property
def _channel_offset(self): def _channel_offset(self):
@ -210,52 +272,138 @@ class Parser(object):
Image data is interleaved for each image set. That is, if there are four images in a set, the first image Image data is interleaved for each image set. That is, if there are four images in a set, the first image
will consist of pixels 1, 5, 9, etc, the second will be pixels 2, 6, 10, and so forth. will consist of pixels 1, 5, 9, etc, the second will be pixels 2, 6, 10, and so forth.
:rtype: dict
Returns:
dict: the channel offset for each channel
""" """
return {channel: n for n, channel in enumerate(self.metadata["channels"])} return {channel: n for n, channel in enumerate(self.metadata["channels"])}
def _get_raw_image_data(self, image_group_number, channel_offset, height, width):
def _get_raw_slice_data(self, xywh, image_group_number, channel, height, width):
"""Reads the raw bytes and the timestamp of a rectangular slice
of an image.
Args:
xywh: tuples containing (x, y, w, h) values of the
rectangular region to load
image_group_number: the image group number (see _calculate_image_group_number)
channel: the position (int) of the channel to load
height: the height of the image
width: the width of the image
Returns:
""" """
Reads the raw bytes and the timestamp of an image.
size_c = len(self.metadata["channels"])
x0, y0, w, h = xywh
chunk_location = self._label_map.get_image_data_location(image_group_number)
fh = self._fh
if chunk_location is None or fh is None:
return None
fh.seek(chunk_location)
# The chunk metadata is always 16 bytes long
chunk_metadata = fh.read(16)
header, relative_offset, data_length = struct.unpack("IIQ", chunk_metadata)
if header != 0xabeceda:
raise ValueError("The ND2 file seems to be corrupted.")
# We start at the location of the chunk metadata, skip over the metadata, and then proceed to the
# start of the actual data field, which is at some arbitrary place after the metadata.
fh.seek(chunk_location + 16 + relative_offset)
# Read timestamp (8 bytes)
timestamp = struct.unpack("d", fh.read(8))[0]
# Stitched Images: evaluate number of bytes to strip
# (with stitched images sometimes after each row we have a regular number of extra bytes)
n_unwanted_bytes = (data_length-8) % (height*width)
assert 0 == n_unwanted_bytes % height
rowskip = n_unwanted_bytes // height
# Read ROI: row-by-row
image_start_pos = chunk_location + 16 + relative_offset + 8
line_bytemask = np.zeros(size_c, dtype=np.bool)
line_bytemask[channel] = True
line_bytemask = np.tile(line_bytemask.repeat(2),w)
def get_line(y):
fh.seek(image_start_pos + size_c*2*((width)*y+x0) + y*rowskip)
return np.frombuffer(fh.read(size_c*2*w), np.byte)[line_bytemask]
:param image_group_number: groups are made of images with the same time index, field of view and z-level
:type image_group_number: int
:param channel_offset: the offset in the array where the bytes for this image are found
:type channel_offset: int
data = [get_line(y) for y in range(y0, y0+h)]
data = bytes().join(data)
:rtype: (int, Image)
:raises: NoImageError
image_group_data = array.array("H", data)
true_channels_no = int(len(image_group_data) / (h * w))
image_data = np.reshape(image_group_data, (h, w, true_channels_no))
missing_channels = ~np.any(image_data, axis=(0, 1))
image_data[..., missing_channels] = np.full(
(h, w, missing_channels.sum()), np.nan)
if np.any(missing_channels):
warnings.warn(
"ND2 file contains gap frames which are represented by "
+ "np.nan-filled arrays; to convert to zeros use e.g. "
+ "np.nan_to_num(array)")
return timestamp, image_data[...,0]
def _get_raw_image_data(self, image_group_number, channel_offset, height, width):
"""Reads the raw bytes and the timestamp of an image.
Args:
image_group_number: the image group number (see _calculate_image_group_number)
channel_offset: the number of the color channel
height: the height of the image
width: the width of the image
Returns:
""" """
chunk = self._label_map.get_image_data_location(image_group_number) chunk = self._label_map.get_image_data_location(image_group_number)
data = read_chunk(self._fh, chunk) data = read_chunk(self._fh, chunk)
# print("data", data, "that was data")
# All images in the same image group share the same timestamp! So if you have complicated image data, # All images in the same image group share the same timestamp! So if you have complicated image data,
# your timestamps may not be entirely accurate. Practically speaking though, they'll only be off by a few # your timestamps may not be entirely accurate. Practically speaking though, they'll only be off by a few
# seconds unless you're doing something super weird. # seconds unless you're doing something super weird.
timestamp = struct.unpack("d", data[:8])[0] timestamp = struct.unpack("d", data[:8])[0]
image_group_data = array.array("H", data) image_group_data = array.array("H", data)
image_data_start = 4 + channel_offset image_data_start = 4 + channel_offset
image_group_data = stitched.remove_parsed_unwanted_bytes(image_group_data, image_data_start, height, width)
# The images for the various channels are interleaved within the same array. For example, the second image # The images for the various channels are interleaved within the same array. For example, the second image
# of a four image group will be composed of bytes 2, 6, 10, etc. If you understand why someone would design # of a four image group will be composed of bytes 2, 6, 10, etc. If you understand why someone would design
# a data structure that way, please send the author of this library a message. # a data structure that way, please send the author of this library a message.
number_of_true_channels = int((len(image_group_data) - 4) / (height * width))
image_data = np.reshape(image_group_data[image_data_start::number_of_true_channels], (height, width))
number_of_true_channels = int(len(image_group_data[4:]) / (height * width))
try:
image_data = np.reshape(image_group_data[image_data_start::number_of_true_channels], (height, width))
except ValueError:
new_width = len(image_group_data[image_data_start::number_of_true_channels]) // height
image_data = np.reshape(image_group_data[image_data_start::number_of_true_channels], (height, new_width))
# Skip images that are all zeros! This is important, since NIS Elements creates blank "gap" images if you # Skip images that are all zeros! This is important, since NIS Elements creates blank "gap" images if you
# don't have the same number of images each cycle. We discovered this because we only took GFP images every # don't have the same number of images each cycle. We discovered this because we only took GFP images every
# other cycle to reduce phototoxicity, but NIS Elements still allocated memory as if we were going to take # other cycle to reduce phototoxicity, but NIS Elements still allocated memory as if we were going to take
# them every cycle. # them every cycle.
if np.any(image_data): if np.any(image_data):
return timestamp, Frame(image_data, metadata=self._get_frame_metadata())
return timestamp, image_data
raise NoImageError
# If a blank "gap" image is encountered, generate an array of corresponding height and width to avoid
# errors with ND2-files with missing frames. Array is filled with nan to reflect that data is missing.
else:
empty_frame = np.full((height, width), np.nan)
warnings.warn(
"ND2 file contains gap frames which are represented by np.nan-filled arrays; to convert to zeros use e.g. np.nan_to_num(array)")
return timestamp, image_data
def _get_frame_metadata(self): def _get_frame_metadata(self):
"""
Get the metadata for one frame
:return:
"""Get the metadata for one frame
Returns:
dict: a dictionary containing the parsed metadata
""" """
return self.metadata return self.metadata

+ 461
- 151
nd2reader/raw_metadata.py View File

@ -1,22 +1,17 @@
import re import re
from nd2reader.common import read_chunk, read_array, read_metadata, parse_date
import xmltodict import xmltodict
import six import six
import numpy as np import numpy as np
import warnings
def ignore_missing(func):
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except:
return None
return wrapper
from nd2reader.common import read_chunk, read_array, read_metadata, parse_date, get_from_dict_if_exists
from nd2reader.common_raw_metadata import parse_dimension_text_line, parse_if_not_none, parse_roi_shape, parse_roi_type, get_loops_from_data, determine_sampling_interval
class RawMetadata(object): class RawMetadata(object):
"""RawMetadata class parses and stores the raw metadata that is read from the binary file in dict format.
"""
def __init__(self, fh, label_map): def __init__(self, fh, label_map):
self._fh = fh self._fh = fh
self._label_map = label_map self._label_map = label_map
@ -24,152 +19,306 @@ class RawMetadata(object):
@property @property
def __dict__(self): def __dict__(self):
"""
Returns the parsed metadata in dictionary form
:return:
"""Returns the parsed metadata in dictionary form.
Returns:
dict: the parsed metadata
""" """
return self.get_parsed_metadata() return self.get_parsed_metadata()
def get_parsed_metadata(self): def get_parsed_metadata(self):
"""
Returns the parsed metadata in dictionary form
:return:
"""Returns the parsed metadata in dictionary form.
Returns:
dict: the parsed metadata
""" """
if self._metadata_parsed is not None: if self._metadata_parsed is not None:
return self._metadata_parsed return self._metadata_parsed
frames_per_channel = self._parse_total_images_per_channel()
self._metadata_parsed = { self._metadata_parsed = {
"height": self.image_attributes[six.b('SLxImageAttributes')][six.b('uiHeight')],
"width": self.image_attributes[six.b('SLxImageAttributes')][six.b('uiWidth')],
"date": parse_date(self.image_text_info[six.b('SLxImageTextInfo')]),
"height": parse_if_not_none(self.image_attributes, self._parse_height),
"width": parse_if_not_none(self.image_attributes, self._parse_width),
"date": parse_if_not_none(self.image_text_info, self._parse_date),
"fields_of_view": self._parse_fields_of_view(), "fields_of_view": self._parse_fields_of_view(),
"frames": self._parse_frames(), "frames": self._parse_frames(),
"z_levels": self._parse_z_levels(), "z_levels": self._parse_z_levels(),
"total_images_per_channel": self._parse_total_images_per_channel(),
"z_coordinates": parse_if_not_none(self.z_data, self._parse_z_coordinates),
"x_coordinates": parse_if_not_none(self.x_data, self._parse_x_coordinates),
"y_coordinates": parse_if_not_none(self.y_data, self._parse_y_coordinates),
"total_images_per_channel": frames_per_channel,
"channels": self._parse_channels(), "channels": self._parse_channels(),
"pixel_microns": self.image_calibration.get(six.b('SLxCalibration'), {}).get(six.b('dCalibration')),
"pixel_microns": parse_if_not_none(self.image_calibration, self._parse_calibration),
"camera_stage_angle": parse_if_not_none(self.image_metadata_sequence, self._parse_camera_angle),
"camera_stage_matrix": parse_if_not_none(self.image_metadata_sequence, self._parse_camera_matrix)
} }
self._set_default_if_not_empty('fields_of_view')
self._set_default_if_not_empty('frames')
self._metadata_parsed['num_frames'] = len(self._metadata_parsed['frames']) self._metadata_parsed['num_frames'] = len(self._metadata_parsed['frames'])
self._parse_roi_metadata() self._parse_roi_metadata()
self._parse_experiment_metadata() self._parse_experiment_metadata()
self._parse_events()
return self._metadata_parsed return self._metadata_parsed
def _parse_channels(self):
def _set_default_if_not_empty(self, entry):
total_images = self._metadata_parsed['total_images_per_channel'] \
if self._metadata_parsed['total_images_per_channel'] is not None else 0
if len(self._metadata_parsed[entry]) == 0 and total_images > 0:
# if the file is not empty, we always have one of this entry
self._metadata_parsed[entry] = [0]
def _parse_width_or_height(self, key):
try:
length = self.image_attributes[six.b('SLxImageAttributes')][six.b(key)]
except KeyError:
length = None
return length
def _parse_height(self):
return self._parse_width_or_height('uiHeight')
def _parse_width(self):
return self._parse_width_or_height('uiWidth')
def _parse_date(self):
try:
return parse_date(self.image_text_info[six.b('SLxImageTextInfo')])
except KeyError:
return None
def _parse_calibration(self):
try:
return self.image_calibration.get(six.b('SLxCalibration'), {}).get(six.b('dCalibration'))
except KeyError:
return None
def _parse_frames(self):
"""The number of cycles.
Returns:
list: list of all the frame numbers
""" """
These are labels created by the NIS Elements user. Typically they may a short description of the filter cube
used (e.g. "bright field", "GFP", etc.)
return self._parse_dimension(r""".*?T'?\((\d+)\).*?""")
:rtype: list
def _parse_channels(self):
"""These are labels created by the NIS Elements user. Typically they may a short description of the filter cube
used (e.g. 'bright field', 'GFP', etc.)
Returns:
list: the color channels
""" """
channels = []
metadata = self.image_metadata_sequence[six.b('SLxPictureMetadata')][six.b('sPicturePlanes')]
if self.image_metadata_sequence is None:
return []
try: try:
validity = self.image_metadata[six.b('SLxExperiment')][six.b('ppNextLevelEx')][six.b('')][0][
six.b('ppNextLevelEx')][six.b('')][0][six.b('pItemValid')]
except (KeyError, TypeError):
# If none of the channels have been deleted, there is no validity list, so we just make one
validity = [True for _ in metadata]
metadata = self.image_metadata_sequence[six.b('SLxPictureMetadata')][six.b('sPicturePlanes')]
except KeyError:
return []
channels = self._process_channels_metadata(metadata)
return channels
def _process_channels_metadata(self, metadata):
validity = self._get_channel_validity_list(metadata)
# Channel information is contained in dictionaries with the keys a0, a1...an where the number # Channel information is contained in dictionaries with the keys a0, a1...an where the number
# indicates the order in which the channel is stored. So by sorting the dicts alphabetically # indicates the order in which the channel is stored. So by sorting the dicts alphabetically
# we get the correct order. # we get the correct order.
for (label, chan), valid in zip(sorted(metadata[six.b('sPlaneNew')].items()), validity):
channels = []
for valid, (label, chan) in zip(validity, sorted(metadata[six.b('sPlaneNew')].items())):
if not valid: if not valid:
continue continue
channels.append(chan[six.b('sDescription')].decode("utf8"))
if chan[six.b('sDescription')] is not None:
channels.append(chan[six.b('sDescription')].decode("utf8"))
else:
channels.append('Unknown')
return channels return channels
def _get_channel_validity_list(self, metadata):
try:
validity = self.image_metadata[six.b('SLxExperiment')][six.b('ppNextLevelEx')][six.b('')][0][
six.b('ppNextLevelEx')][six.b('')][0][six.b('pItemValid')]
except (KeyError, TypeError):
# If none of the channels have been deleted, there is no validity list, so we just make one
validity = [True for _ in metadata]
return validity
def _parse_fields_of_view(self): def _parse_fields_of_view(self):
"""
The metadata contains information about fields of view, but it contains it even if some fields
"""The metadata contains information about fields of view, but it contains it even if some fields
of view were cropped. We can't find anything that states which fields of view are actually of view were cropped. We can't find anything that states which fields of view are actually
in the image data, so we have to calculate it. There probably is something somewhere, since in the image data, so we have to calculate it. There probably is something somewhere, since
NIS Elements can figure it out, but we haven't found it yet. NIS Elements can figure it out, but we haven't found it yet.
:rtype: list
""" """
return self._parse_dimension(r""".*?XY\((\d+)\).*?""") return self._parse_dimension(r""".*?XY\((\d+)\).*?""")
def _parse_frames(self):
def _parse_z_levels(self):
"""The different levels in the Z-plane.
If they are not available from the _parse_dimension function AND there
is NO 'Dimensions: ' textinfo item in the file, we return a range with
the length of z_coordinates if available, otherwise an empty list.
Returns:
list: the z levels, just a sequence from 0 to n.
""" """
The number of cycles.
# get the dimension text to check if we should apply the fallback or not
dimension_text = self._parse_dimension_text()
# this returns range(len(z_levels))
z_levels = self._parse_dimension(r""".*?Z\((\d+)\).*?""", dimension_text)
if len(z_levels) > 0 or len(dimension_text) > 0:
# Either we have found the z_levels (first condition) so return, or
# don't fallback, because Z is apparently not in Dimensions, so
# there should be no z_levels
return z_levels
# Not available from dimension, get from z_coordinates
z_levels = parse_if_not_none(self.z_data, self._parse_z_coordinates)
:rtype: list
if z_levels is None:
# No z coordinates, return empty list
return []
warnings.warn("Z-levels details missing in metadata. Using Z-coordinates instead.")
return range(len(z_levels))
def _parse_z_coordinates(self):
"""The coordinate in micron for all z planes.
Returns:
list: the z coordinates in micron
""" """
return self._parse_dimension(r""".*?T'?\((\d+)\).*?""")
return self.z_data.tolist()
def _parse_z_levels(self):
def _parse_x_coordinates(self):
"""The coordinate in micron for all x frames.
Returns:
list: the x coordinates in micron
""" """
The different levels in the Z-plane. Just a sequence from 0 to n.
return self.x_data.tolist()
:rtype: list
def _parse_y_coordinates(self):
"""The coordinate in micron for all y frames.
Returns:
list: the y coordinates in micron
""" """
return self._parse_dimension(r""".*?Z\((\d+)\).*?""")
return self.y_data.tolist()
def _parse_camera_angle(self):
if self.image_metadata_sequence is None:
return []
try:
metadata = self.image_metadata_sequence[six.b('SLxPictureMetadata')]
except KeyError:
return []
try:
return metadata[b'dAngle']
except KeyError:
return None
def _parse_camera_matrix(self):
if self.image_metadata_sequence is None:
return []
try:
metadata = self.image_metadata_sequence[six.b('SLxPictureMetadata')][b'sPicturePlanes']
except KeyError:
return []
validity = self._get_channel_validity_list(metadata)
channels = []
for valid, (label, chan) in zip(validity, sorted(metadata[b'sSampleSetting'].items())):
if not valid:
continue
if chan[b'matCameraToStage'] is not None:
mat_data = chan[b'matCameraToStage'][b'Data']
mat_rows = chan[b'matCameraToStage'][b'Rows']
mat_columns = chan[b'matCameraToStage'][b'Columns']
mat = np.frombuffer(mat_data, dtype=np.float64).reshape([mat_rows, mat_columns])
channels.append(mat)
else:
channels.append(None)
return channels
def _parse_dimension_text(self): def _parse_dimension_text(self):
"""
While there are metadata values that represent a lot of what we want to capture, they seem to be unreliable.
"""While there are metadata values that represent a lot of what we want to capture, they seem to be unreliable.
Sometimes certain elements don't exist, or change their data type randomly. However, the human-readable text Sometimes certain elements don't exist, or change their data type randomly. However, the human-readable text
is always there and in the same exact format, so we just parse that instead. is always there and in the same exact format, so we just parse that instead.
:rtype: str
""" """
dimension_text = six.b("") dimension_text = six.b("")
textinfo = self.image_text_info[six.b('SLxImageTextInfo')].values()
if self.image_text_info is None:
return dimension_text
try:
textinfo = self.image_text_info[six.b('SLxImageTextInfo')].values()
except KeyError:
return dimension_text
for line in textinfo: for line in textinfo:
if six.b("Dimensions:") in line:
entries = line.split(six.b("\r\n"))
for entry in entries:
if entry.startswith(six.b("Dimensions:")):
return entry
entry = parse_dimension_text_line(line)
if entry is not None:
return entry
return dimension_text return dimension_text
def _parse_dimension(self, pattern):
"""
:param pattern: a valid regex pattern
:type pattern: str
:rtype: list of int
def _parse_dimension(self, pattern, dimension_text=None):
dimension_text = self._parse_dimension_text() if dimension_text is None else dimension_text
if dimension_text is None:
return []
"""
dimension_text = self._parse_dimension_text()
if six.PY3: if six.PY3:
dimension_text = dimension_text.decode("utf8") dimension_text = dimension_text.decode("utf8")
match = re.match(pattern, dimension_text) match = re.match(pattern, dimension_text)
if not match: if not match:
return [0]
return []
count = int(match.group(1)) count = int(match.group(1))
return list(range(count))
return range(count)
def _parse_total_images_per_channel(self): def _parse_total_images_per_channel(self):
"""
The total number of images per channel. Warning: this may be inaccurate as it includes "gap" images.
"""The total number of images per channel.
:rtype: int
Warning: this may be inaccurate as it includes 'gap' images.
""" """
return self.image_attributes[six.b('SLxImageAttributes')][six.b('uiSequenceCount')]
if self.image_attributes is None:
return 0
try:
total_images = self.image_attributes[six.b('SLxImageAttributes')][six.b('uiSequenceCount')]
except KeyError:
total_images = None
return total_images
def _parse_roi_metadata(self): def _parse_roi_metadata(self):
"""
Parse the raw ROI metadata.
:return:
"""Parse the raw ROI metadata.
""" """
if self.roi_metadata is None or not six.b('RoiMetadata_v1') in self.roi_metadata: if self.roi_metadata is None or not six.b('RoiMetadata_v1') in self.roi_metadata:
return return
raw_roi_data = self.roi_metadata[six.b('RoiMetadata_v1')] raw_roi_data = self.roi_metadata[six.b('RoiMetadata_v1')]
if not six.b('m_vectGlobal_Size') in raw_roi_data:
return
number_of_rois = raw_roi_data[six.b('m_vectGlobal_Size')] number_of_rois = raw_roi_data[six.b('m_vectGlobal_Size')]
roi_objects = [] roi_objects = []
@ -180,11 +329,16 @@ class RawMetadata(object):
self._metadata_parsed['rois'] = roi_objects self._metadata_parsed['rois'] = roi_objects
def _parse_roi(self, raw_roi_dict): def _parse_roi(self, raw_roi_dict):
"""
Extract the vector animation parameters from the ROI.
"""Extract the vector animation parameters from the ROI.
This includes the position and size at the given timepoints. This includes the position and size at the given timepoints.
:param raw_roi_dict:
:return:
Args:
raw_roi_dict: dictionary of raw roi metadata
Returns:
dict: the parsed ROI metadata
""" """
number_of_timepoints = raw_roi_dict[six.b('m_vectAnimParams_Size')] number_of_timepoints = raw_roi_dict[six.b('m_vectAnimParams_Size')]
@ -192,11 +346,11 @@ class RawMetadata(object):
"timepoints": [], "timepoints": [],
"positions": [], "positions": [],
"sizes": [], "sizes": [],
"shape": self._parse_roi_shape(raw_roi_dict[six.b('m_sInfo')][six.b('m_uiShapeType')]),
"type": self._parse_roi_type(raw_roi_dict[six.b('m_sInfo')][six.b('m_uiInterpType')])
"shape": parse_roi_shape(raw_roi_dict[six.b('m_sInfo')][six.b('m_uiShapeType')]),
"type": parse_roi_type(raw_roi_dict[six.b('m_sInfo')][six.b('m_uiInterpType')])
} }
for i in range(number_of_timepoints): for i in range(number_of_timepoints):
roi_dict = self._parse_vect_anim(roi_dict, raw_roi_dict[six.b('m_vectAnimParams_%d') % i])
roi_dict = self._parse_vect_anim(roi_dict, raw_roi_dict[six.b('m_vectAnimParams_%d' % i)])
# convert to NumPy arrays # convert to NumPy arrays
roi_dict["timepoints"] = np.array(roi_dict["timepoints"], dtype=np.float) roi_dict["timepoints"] = np.array(roi_dict["timepoints"], dtype=np.float)
@ -205,31 +359,17 @@ class RawMetadata(object):
return roi_dict return roi_dict
@staticmethod
def _parse_roi_shape(shape):
if shape == 3:
return 'rectangle'
elif shape == 9:
return 'circle'
return None
@staticmethod
def _parse_roi_type(type_no):
if type_no == 4:
return 'stimulation'
elif type_no == 3:
return 'reference'
elif type_no == 2:
return 'background'
return None
def _parse_vect_anim(self, roi_dict, animation_dict): def _parse_vect_anim(self, roi_dict, animation_dict):
""" """
Parses a ROI vector animation object and adds it to the global list of timepoints and positions. Parses a ROI vector animation object and adds it to the global list of timepoints and positions.
:param animation_dict:
:return:
Args:
roi_dict: the raw roi dictionary
animation_dict: the raw animation dictionary
Returns:
dict: the parsed metadata
""" """
roi_dict["timepoints"].append(animation_dict[six.b('m_dTimeMs')]) roi_dict["timepoints"].append(animation_dict[six.b('m_dTimeMs')])
@ -251,42 +391,36 @@ class RawMetadata(object):
return roi_dict return roi_dict
def _parse_experiment_metadata(self): def _parse_experiment_metadata(self):
"""
Parse the metadata of the ND experiment
:return:
"""
if not six.b('SLxExperiment') in self.image_metadata:
return
"""Parse the metadata of the ND experiment
raw_data = self.image_metadata[six.b('SLxExperiment')]
experimental_data = {
"""
self._metadata_parsed['experiment'] = {
'description': 'unknown', 'description': 'unknown',
'loops': [] 'loops': []
} }
if self.image_metadata is None or six.b('SLxExperiment') not in self.image_metadata:
return
raw_data = self.image_metadata[six.b('SLxExperiment')]
if six.b('wsApplicationDesc') in raw_data: if six.b('wsApplicationDesc') in raw_data:
experimental_data['description'] = raw_data[six.b('wsApplicationDesc')].decode('utf8')
self._metadata_parsed['experiment']['description'] = raw_data[six.b('wsApplicationDesc')].decode('utf8')
if six.b('uLoopPars') in raw_data: if six.b('uLoopPars') in raw_data:
experimental_data['loops'] = self._parse_loop_data(raw_data[six.b('uLoopPars')])
self._metadata_parsed['experiment'] = experimental_data
self._metadata_parsed['experiment']['loops'] = self._parse_loop_data(raw_data[six.b('uLoopPars')])
def _parse_loop_data(self, loop_data): def _parse_loop_data(self, loop_data):
"""
Parse the experimental loop data
:param loop_data:
:return:
"""
if six.b('uiPeriodCount') not in loop_data or loop_data[six.b('uiPeriodCount')] == 0:
return []
"""Parse the experimental loop data
if six.b('pPeriod') not in loop_data:
return []
Args:
loop_data: dictionary of experiment loops
# take the first dictionary element, it contains all loop data
loops = loop_data[six.b('pPeriod')][list(loop_data[six.b('pPeriod')].keys())[0]]
Returns:
list: list of the parsed loops
"""
loops = get_loops_from_data(loop_data)
# take into account the absolute time in ms # take into account the absolute time in ms
time_offset = 0 time_offset = 0
@ -295,13 +429,14 @@ class RawMetadata(object):
for loop in loops: for loop in loops:
# duration of this loop # duration of this loop
duration = loop[six.b('dDuration')]
duration = get_from_dict_if_exists('dDuration', loop) or 0
interval = determine_sampling_interval(duration, loop)
# uiLoopType == 6 is a stimulation loop
is_stimulation = loop[six.b('uiLoopType')] == 6
# if duration is not saved, infer it
duration = self.get_duration_from_interval_and_loops(duration, interval, loop)
# sampling interval in ms
interval = loop[six.b('dAvgPeriodDiff')]
# uiLoopType == 6 is a stimulation loop
is_stimulation = get_from_dict_if_exists('uiLoopType', loop) == 6
parsed_loop = { parsed_loop = {
'start': time_offset, 'start': time_offset,
@ -317,99 +452,274 @@ class RawMetadata(object):
return parsed_loops return parsed_loops
def get_duration_from_interval_and_loops(self, duration, interval, loop):
"""Infers the duration of the loop from the number of measurements and the interval
Args:
duration: loop duration in milliseconds
duration: measurement interval in milliseconds
loop: loop dictionary
Returns:
float: the loop duration in milliseconds
"""
if duration == 0 and interval > 0:
number_of_loops = get_from_dict_if_exists('uiCount', loop)
number_of_loops = number_of_loops if number_of_loops is not None and number_of_loops > 0 else 1
duration = interval * number_of_loops
return duration
def _parse_events(self):
"""Extract events
"""
# list of event names manually extracted from an ND2 file that contains all manually
# insertable events from NIS-Elements software (4.60.00 (Build 1171) Patch 02)
event_names = {
1: 'Autofocus',
7: 'Command Executed',
9: 'Experiment Paused',
10: 'Experiment Resumed',
11: 'Experiment Stopped by User',
13: 'Next Phase Moved by User',
14: 'Experiment Paused for Refocusing',
16: 'External Stimulation',
33: 'User 1',
34: 'User 2',
35: 'User 3',
36: 'User 4',
37: 'User 5',
38: 'User 6',
39: 'User 7',
40: 'User 8',
44: 'No Acquisition Phase Start',
45: 'No Acquisition Phase End',
46: 'Hardware Error',
47: 'N-STORM',
48: 'Incubation Info',
49: 'Incubation Error'
}
self._metadata_parsed['events'] = []
events = read_metadata(read_chunk(self._fh, self._label_map.image_events), 1)
if events is None or six.b('RLxExperimentRecord') not in events:
return
events = events[six.b('RLxExperimentRecord')][six.b('pEvents')]
if len(events) == 0:
return
for event in events[six.b('')]:
event_info = {
'index': event[six.b('I')],
'time': event[six.b('T')],
'type': event[six.b('M')],
}
if event_info['type'] in event_names.keys():
event_info['name'] = event_names[event_info['type']]
self._metadata_parsed['events'].append(event_info)
@property @property
@ignore_missing
def image_text_info(self): def image_text_info(self):
"""Textual image information
Returns:
dict: containing the textual image info
"""
return read_metadata(read_chunk(self._fh, self._label_map.image_text_info), 1) return read_metadata(read_chunk(self._fh, self._label_map.image_text_info), 1)
@property @property
@ignore_missing
def image_metadata_sequence(self): def image_metadata_sequence(self):
"""Image metadata of the sequence
Returns:
dict: containing the metadata
"""
return read_metadata(read_chunk(self._fh, self._label_map.image_metadata_sequence), 1) return read_metadata(read_chunk(self._fh, self._label_map.image_metadata_sequence), 1)
@property @property
@ignore_missing
def image_calibration(self): def image_calibration(self):
"""The amount of pixels per micron.
Returns:
dict: pixels per micron
"""
return read_metadata(read_chunk(self._fh, self._label_map.image_calibration), 1) return read_metadata(read_chunk(self._fh, self._label_map.image_calibration), 1)
@property @property
@ignore_missing
def image_attributes(self): def image_attributes(self):
"""Image attributes
Returns:
dict: containing the image attributes
"""
return read_metadata(read_chunk(self._fh, self._label_map.image_attributes), 1) return read_metadata(read_chunk(self._fh, self._label_map.image_attributes), 1)
@property @property
@ignore_missing
def x_data(self): def x_data(self):
"""X data
Returns:
dict: x_data
"""
return read_array(self._fh, 'double', self._label_map.x_data) return read_array(self._fh, 'double', self._label_map.x_data)
@property @property
@ignore_missing
def y_data(self): def y_data(self):
"""Y data
Returns:
dict: y_data
"""
return read_array(self._fh, 'double', self._label_map.y_data) return read_array(self._fh, 'double', self._label_map.y_data)
@property @property
@ignore_missing
def z_data(self): def z_data(self):
return read_array(self._fh, 'double', self._label_map.z_data)
"""Z data
Returns:
dict: z_data
"""
try:
return read_array(self._fh, 'double', self._label_map.z_data)
except ValueError:
# Depending on the file format/exact settings, this value is
# sometimes saved as float instead of double
return read_array(self._fh, 'float', self._label_map.z_data)
@property @property
@ignore_missing
def roi_metadata(self): def roi_metadata(self):
"""Contains information about the defined ROIs: shape, position and type (reference/background/stimulation).
Returns:
dict: ROI metadata dictionary
"""
return read_metadata(read_chunk(self._fh, self._label_map.roi_metadata), 1) return read_metadata(read_chunk(self._fh, self._label_map.roi_metadata), 1)
@property @property
@ignore_missing
def pfs_status(self): def pfs_status(self):
"""Perfect focus system (PFS) status
Returns:
dict: Perfect focus system (PFS) status
"""
return read_array(self._fh, 'int', self._label_map.pfs_status) return read_array(self._fh, 'int', self._label_map.pfs_status)
@property @property
@ignore_missing
def pfs_offset(self): def pfs_offset(self):
"""Perfect focus system (PFS) offset
Returns:
dict: Perfect focus system (PFS) offset
"""
return read_array(self._fh, 'int', self._label_map.pfs_offset) return read_array(self._fh, 'int', self._label_map.pfs_offset)
@property @property
@ignore_missing
def camera_exposure_time(self): def camera_exposure_time(self):
"""Exposure time information
Returns:
dict: Camera exposure time
"""
return read_array(self._fh, 'double', self._label_map.camera_exposure_time) return read_array(self._fh, 'double', self._label_map.camera_exposure_time)
@property @property
@ignore_missing
def lut_data(self): def lut_data(self):
"""LUT information
Returns:
dict: LUT information
"""
return xmltodict.parse(read_chunk(self._fh, self._label_map.lut_data)) return xmltodict.parse(read_chunk(self._fh, self._label_map.lut_data))
@property @property
@ignore_missing
def grabber_settings(self): def grabber_settings(self):
"""Grabber settings
Returns:
dict: Acquisition settings
"""
return xmltodict.parse(read_chunk(self._fh, self._label_map.grabber_settings)) return xmltodict.parse(read_chunk(self._fh, self._label_map.grabber_settings))
@property @property
@ignore_missing
def custom_data(self): def custom_data(self):
"""Custom user data
Returns:
dict: custom user data
"""
return xmltodict.parse(read_chunk(self._fh, self._label_map.custom_data)) return xmltodict.parse(read_chunk(self._fh, self._label_map.custom_data))
@property @property
@ignore_missing
def app_info(self): def app_info(self):
"""NIS elements application info
Returns:
dict: (Version) information of the NIS Elements application
"""
return xmltodict.parse(read_chunk(self._fh, self._label_map.app_info)) return xmltodict.parse(read_chunk(self._fh, self._label_map.app_info))
@property @property
@ignore_missing
def camera_temp(self): def camera_temp(self):
"""Camera temperature
Yields:
float: the temperature
"""
camera_temp = read_array(self._fh, 'double', self._label_map.camera_temp) camera_temp = read_array(self._fh, 'double', self._label_map.camera_temp)
if camera_temp: if camera_temp:
for temp in map(lambda x: round(x * 100.0, 2), camera_temp): for temp in map(lambda x: round(x * 100.0, 2), camera_temp):
yield temp yield temp
@property @property
@ignore_missing
def acquisition_times(self): def acquisition_times(self):
"""Acquisition times
Yields:
float: the acquisition time
"""
acquisition_times = read_array(self._fh, 'double', self._label_map.acquisition_times) acquisition_times = read_array(self._fh, 'double', self._label_map.acquisition_times)
if acquisition_times: if acquisition_times:
for acquisition_time in map(lambda x: x / 1000.0, acquisition_times): for acquisition_time in map(lambda x: x / 1000.0, acquisition_times):
yield acquisition_time yield acquisition_time
@property @property
@ignore_missing
def image_metadata(self): def image_metadata(self):
"""Image metadata
Returns:
dict: Extra image metadata
"""
if self._label_map.image_metadata: if self._label_map.image_metadata:
return read_metadata(read_chunk(self._fh, self._label_map.image_metadata), 1) return read_metadata(read_chunk(self._fh, self._label_map.image_metadata), 1)
@property
def image_events(self):
"""Image events
Returns:
dict: Image events
"""
if self._label_map.image_metadata:
for event in self._metadata_parsed["events"]:
yield event

+ 208
- 41
nd2reader/reader.py View File

@ -1,17 +1,40 @@
from pims import FramesSequenceND
from pims import Frame
from pims.base_frames import FramesSequenceND
from nd2reader.exceptions import EmptyFileError, InvalidFileType
from nd2reader.parser import Parser from nd2reader.parser import Parser
import numpy as np
class ND2Reader(FramesSequenceND): class ND2Reader(FramesSequenceND):
"""PIMS wrapper for the ND2 parser.
This is the main class: use this to process your .nd2 files.
""" """
PIMS wrapper for the ND2 parser
"""
def __init__(self, filename):
self.filename = filename
_fh = None
class_priority = 12
def __init__(self, fh):
"""
Arguments:
fh {str} -- absolute path to .nd2 file
fh {IO} -- input buffer handler (opened with "rb" mode)
"""
super(ND2Reader, self).__init__()
self.filename = ""
if isinstance(fh, str):
if not fh.endswith(".nd2"):
raise InvalidFileType(
("The file %s you want to read with nd2reader" % fh)
+ " does not have extension .nd2."
)
self.filename = fh
fh = open(fh, "rb")
self._fh = fh
# first use the parser to parse the file
self._fh = open(filename, "rb")
self._parser = Parser(self._fh) self._parser = Parser(self._fh)
# Setup metadata # Setup metadata
@ -23,60 +46,204 @@ class ND2Reader(FramesSequenceND):
# Setup the axes # Setup the axes
self._setup_axes() self._setup_axes()
# Other properties
self._timesteps = None
@classmethod @classmethod
def class_exts(cls): def class_exts(cls):
"""Let PIMS open function use this reader for opening .nd2 files
""" """
So PIMS open function can use this reader for opening .nd2 files
:return:
"""
return {'nd2'} | super(ND2Reader, cls).class_exts()
return {"nd2"} | super(ND2Reader, cls).class_exts()
def close(self): def close(self):
"""
Correctly close the file handle
:return:
"""Correctly close the file handle
""" """
if self._fh is not None: if self._fh is not None:
self._fh.close() self._fh.close()
def get_frame(self, i):
"""
Return one frame
:param i:
:return:
def _get_default(self, coord):
try:
return self.default_coords[coord]
except KeyError:
return 0
def get_roi(self, roi, c=0, t=0, z=0, x=0, y=0, v=0):
height = self.metadata['height']
width = self.metadata['width']
ylim = roi[0].indices(height)
xlim = roi[1].indices(width)
y = ylim[0]
x = xlim[0]
w = xlim[1]-xlim[0]
h = ylim[1]-ylim[0]
return self._parser.get_slice_by_attributes(
(x, y, w, h), t, v, c, z, height, width
)
def get_frame_2D(self, c=0, t=0, z=0, x=0, y=0, v=0):
"""Gets a given frame using the parser
Args:
x: The x-index (pims expects this)
y: The y-index (pims expects this)
c: The color channel number
t: The frame number
z: The z stack number
v: The field of view index
Returns:
pims.Frame: The requested frame
""" """
return self._parser.get_image(i)
# This needs to be set to width/height to return an image
x = self.metadata["width"]
y = self.metadata["height"]
def get_frame_2D(self, c, t, z):
return self._parser.get_image_by_attributes(t, v, c, z, y, x)
@property
def parser(self):
""" """
Gets a given frame using the parser
:param c:
:param t:
:param z:
:return:
Returns the parser object.
Returns:
Parser: the parser object
""" """
c_name = self.metadata["channels"][c]
return self._parser.get_image_by_attributes(t, 0, c_name, z, self.metadata["width"], self.metadata["height"])
return self._parser
@property @property
def pixel_type(self): def pixel_type(self):
"""
Return the pixel data type
:return:
"""Return the pixel data type
Returns:
dtype: the pixel data type
""" """
return self._dtype return self._dtype
def _setup_axes(self):
@property
def timesteps(self):
"""Get the timesteps of the experiment
Returns:
np.ndarray: an array of times in milliseconds.
""" """
Setup the xyctz axes, iterate over t axis by default
:return:
if self._timesteps is None:
return self.get_timesteps()
return self._timesteps
@property
def events(self):
"""Get the events of the experiment
Returns:
iterator of events as dict
""" """
self._init_axis('x', self.metadata["width"])
self._init_axis('y', self.metadata["height"])
self._init_axis('c', len(self.metadata["channels"]))
self._init_axis('t', len(self.metadata["frames"]))
self._init_axis('z', len(self.metadata["z_levels"]))
return self._get_metadata_property("events")
@property
def frame_rate(self):
"""The (average) frame rate
Returns:
float: the (average) frame rate in frames per second
"""
total_duration = 0.0
for loop in self.metadata["experiment"]["loops"]:
total_duration += loop["duration"]
if total_duration == 0:
total_duration = self.timesteps[-1]
if total_duration == 0:
raise ValueError(
"Total measurement duration could not be determined from loops"
)
return self.metadata["num_frames"] / (total_duration / 1000.0)
def _get_metadata_property(self, key, default=None):
if self.metadata is None:
return default
if key not in self.metadata:
return default
if self.metadata[key] is None:
return default
return self.metadata[key]
def _setup_axes(self):
"""Setup the xyctz axes, iterate over t axis by default
"""
self._init_axis_if_exists("x", self._get_metadata_property("width", default=0))
self._init_axis_if_exists("y", self._get_metadata_property("height", default=0))
self._init_axis_if_exists(
"c", len(self._get_metadata_property("channels", default=[])), min_size=2
)
self._init_axis_if_exists(
"t", len(self._get_metadata_property("frames", default=[]))
)
self._init_axis_if_exists(
"z", len(self._get_metadata_property("z_levels", default=[])), min_size=2
)
self._init_axis_if_exists(
"v",
len(self._get_metadata_property("fields_of_view", default=[])),
min_size=2,
)
if len(self.sizes) == 0:
raise EmptyFileError("No axes were found for this .nd2 file.")
# provide the default # provide the default
self.iter_axes = 't'
self.iter_axes = self._guess_default_iter_axis()
self._register_get_frame(self.get_frame_2D, "yx")
def _init_axis_if_exists(self, axis, size, min_size=1):
if size >= min_size:
self._init_axis(axis, size)
def _guess_default_iter_axis(self):
"""
Guesses the default axis to iterate over based on axis sizes.
Returns:
the axis to iterate over
"""
priority = ["t", "z", "c", "v"]
found_axes = []
for axis in priority:
try:
current_size = self.sizes[axis]
except KeyError:
continue
if current_size > 1:
return axis
found_axes.append(axis)
return found_axes[0]
def get_timesteps(self):
"""Get the timesteps of the experiment
Returns:
np.ndarray: an array of times in milliseconds.
"""
if self._timesteps is not None and len(self._timesteps) > 0:
return self._timesteps
self._timesteps = (
np.array(list(self._parser._raw_metadata.acquisition_times), dtype=np.float)
* 1000.0
)
return self._timesteps

+ 54
- 0
nd2reader/stitched.py View File

@ -0,0 +1,54 @@
# -*- coding: utf-8 -*-
import numpy as np # type: ignore
import warnings
def get_unwanted_bytes_ids(image_group_data, image_data_start, height, width):
# Check if the byte array size conforms to the image axes size. If not, check
# that the number of unexpected (unwanted) bytes is a multiple of the number of
# rows (height), as the same unmber of unwanted bytes is expected to be
# appended at the end of each row. Then, returns the indexes of the unwanted
# bytes.
number_of_true_channels = int(len(image_group_data[4:]) / (height * width))
n_unwanted_bytes = (len(image_group_data[image_data_start:])) % (height * width)
if not n_unwanted_bytes:
return np.arange(0)
assert 0 == n_unwanted_bytes % height, (
"An unexpected number of extra bytes was encountered based on the expected"
+ " frame size, therefore the file could not be parsed."
)
return np.arange(
image_data_start + height * number_of_true_channels,
len(image_group_data) - n_unwanted_bytes + 1,
height * number_of_true_channels,
)
def remove_bytes_by_id(byte_ids, image_group_data, height):
# Remove bytes by ID.
bytes_per_row = len(byte_ids) // height
warnings.warn(
f"{len(byte_ids)} ({bytes_per_row}*{height}) unexpected zero "
+ "bytes were found in the ND2 file and removed to allow further parsing."
)
for i in range(len(byte_ids)):
del image_group_data[byte_ids[i] : (byte_ids[i] + bytes_per_row)]
def remove_parsed_unwanted_bytes(image_group_data, image_data_start, height, width):
# Stitched ND2 files have been reported to contain unexpected (according to
# image shape) zero bytes at the end of each image data row. This hinders
# proper reshaping of the data. Hence, here the unwanted zero bytes are
# identified and removed.
unwanted_byte_ids = get_unwanted_bytes_ids(
image_group_data, image_data_start, height, width
)
if 0 != len(unwanted_byte_ids):
assert np.all(
image_group_data[unwanted_byte_ids + np.arange(len(unwanted_byte_ids))] == 0
), (
f"{len(unwanted_byte_ids)} unexpected non-zero bytes were found"
+ " in the ND2 file, the file could not be parsed."
)
remove_bytes_by_id(unwanted_byte_ids, image_group_data, height)
return image_group_data

+ 17
- 0
release.txt View File

@ -0,0 +1,17 @@
Update version in 'nd2reader/__init__.py' file
Rebuild sphinx documentation
Commit & push both master and docs
Check if travis unittests are passing
Publish new release on GitHub
Run `python setup.py sdist bdist_wheel`
Run `twine upload dist/*`
Update the version in nd2reader-feedstock to update the conda version: in recipe/meta.yaml, update version and checksum using `sha256sum`
Create & merge PR in nd2reader-feedstock

+ 4
- 1
setup.cfg View File

@ -1,2 +1,5 @@
[metadata] [metadata]
description-file = README.md
description-file = README.md
[bdist_wheel]
universal=1

+ 9
- 10
setup.py View File

@ -1,23 +1,22 @@
from setuptools import setup from setuptools import setup
VERSION = '2.1.3'
#from nd2reader import __version__ as VERSION
if __name__ == '__main__': if __name__ == '__main__':
setup( setup(
name='nd2reader', name='nd2reader',
packages=['nd2reader', 'nd2reader.model', 'nd2reader.driver', 'nd2reader.parser', 'nd2reader.common'],
packages=['nd2reader'],
install_requires=[ install_requires=[
'numpy>=1.6.2, <2.0',
'six>=1.4, <2.0',
'xmltodict>=0.9.2, <1.0',
'pims>=0.3.0'
'numpy>=1.6.2',
'six>=1.4',
'xmltodict>=0.9.2',
'PIMS>=0.5.0'
], ],
version=VERSION,
version="3.2.3-zolfa-dev0",
description='A tool for reading ND2 files produced by NIS Elements', description='A tool for reading ND2 files produced by NIS Elements',
author='Ruben Verweij', author='Ruben Verweij',
author_email='verweij@physics.leidenuniv.nl',
author_email='ruben@lighthacking.nl',
url='https://github.com/rbnvrw/nd2reader', url='https://github.com/rbnvrw/nd2reader',
download_url='https://github.com/rbnvrw/nd2reader/tarball/%s' % VERSION,
download_url='https://github.com/rbnvrw/nd2reader/tarball/%s' % "3.2.3-zolfa-dev0",
keywords=['nd2', 'nikon', 'microscopy', 'NIS Elements'], keywords=['nd2', 'nikon', 'microscopy', 'NIS Elements'],
classifiers=['Development Status :: 5 - Production/Stable', classifiers=['Development Status :: 5 - Production/Stable',
'Intended Audience :: Science/Research', 'Intended Audience :: Science/Research',


+ 20
- 0
sphinx/Makefile View File

@ -0,0 +1,20 @@
# Makefile for Sphinx documentation
#
# You can set these variables from the command line.
SPHINXBUILD = python3 -msphinx
BUILDDIR = ../docs
# Internal variables.
ALLSPHINXOPTS = .
.PHONY: help
help:
@echo "Please use \`make <target>' where <target> is one of"
@echo " html to make standalone HTML files"
.PHONY: html
html:
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)."

+ 1
- 0
sphinx/_templates/layout.html View File

@ -0,0 +1 @@
{% extends "!layout.html" %}

+ 164
- 0
sphinx/conf.py View File

@ -0,0 +1,164 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sphinx_bootstrap_theme
from recommonmark.parser import CommonMarkParser
from nd2reader import __version__ as VERSION
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_parsers = {
'.md': CommonMarkParser,
}
source_suffix = ['.rst', '.md']
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'nd2reader'
copyright = '2017 - 2019, Ruben Verweij'
author = 'Ruben Verweij'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = VERSION
# The full version, including alpha/beta/rc tags.
release = VERSION
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'bootstrap'
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
'navbar_links': [
("Lighthacking", "http://lighthacking.nl", True),
],
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'nd2readerdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'nd2reader.tex', 'nd2reader Documentation',
'Ruben Verweij', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'nd2reader', 'nd2reader Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'nd2reader', 'nd2reader Documentation',
author, 'nd2reader', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']

+ 20
- 0
sphinx/index.rst View File

@ -0,0 +1,20 @@
``nd2reader``: a pure-Python package for reading Nikon .nd2 files
=================================================================
`nd2reader` is a pure-Python package that reads images produced by NIS Elements 4.0+. It has only been definitively tested on NIS Elements 4.30.02 Build 1053. Support for older versions is being actively worked on.
The reader is written in the `pims <https://github.com/soft-matter/pims>`_ framework, enabling easy access to multidimensional files, lazy slicing, and nice display in IPython. To get started, see the quick start tutorial.
.. toctree::
:maxdepth: 4
:caption: Contents:
nd2reader quick start tutorial <tutorial>
nd2reader API reference <nd2reader>
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`

+ 55
- 0
sphinx/make.bat View File

@ -0,0 +1,55 @@
@ECHO OFF
REM Command file for Sphinx documentation
pushd %~dp0
if "%SPHINXBUILD%" == "" (
set SPHINXBUILD=python3 -msphinx
)
set BUILDDIR=../docs
set ALLSPHINXOPTS= .
if "%1" == "" goto help
if "%1" == "help" (
:help
echo.Please use `make ^<target^>` where ^<target^> is one of
echo. html to make standalone HTML files
goto end
)
REM Check if sphinx-build is available and fallback to Python version if any
%SPHINXBUILD% 1>NUL 2>NUL
if errorlevel 9009 goto sphinx_python
goto sphinx_ok
:sphinx_python
set SPHINXBUILD=python3 -m sphinx.__init__
%SPHINXBUILD% 2> nul
if errorlevel 9009 (
echo.
echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
echo.installed, then set the SPHINXBUILD environment variable to point
echo.to the full path of the 'sphinx-build' executable. Alternatively you
echo.may add the Sphinx directory to PATH.
echo.
echo.If you don't have Sphinx installed, grab it from
echo.http://sphinx-doc.org/
exit /b 1
)
:sphinx_ok
if "%1" == "html" (
%SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The HTML pages are in %BUILDDIR%.
goto end
)
:end
popd

+ 72
- 0
sphinx/nd2reader.rst View File

@ -0,0 +1,72 @@
nd2reader package
=================
In general, you should only have to use the ``nd2reader.reader`` module. The rest of the submodules are for internal
use only.
Submodules
----------
nd2reader.reader module
-----------------------
.. automodule:: nd2reader.reader
:members:
:undoc-members:
:show-inheritance:
nd2reader.parser module
-----------------------
.. automodule:: nd2reader.parser
:members:
:undoc-members:
:show-inheritance:
nd2reader.raw_metadata module
-----------------------------
.. automodule:: nd2reader.raw_metadata
:members:
:undoc-members:
:show-inheritance:
nd2reader.label_map module
--------------------------
.. automodule:: nd2reader.label_map
:members:
:undoc-members:
:show-inheritance:
nd2reader.common module
-----------------------
.. automodule:: nd2reader.common
:members:
:undoc-members:
:show-inheritance:
nd2reader.exceptions module
---------------------------
.. automodule:: nd2reader.exceptions
:members:
:undoc-members:
:show-inheritance:
nd2reader.artificial module
-------------------------------
.. automodule:: nd2reader.artificial
:members:
:undoc-members:
:show-inheritance:
nd2reader.legacy module
-----------------------
.. automodule:: nd2reader.legacy
:members:
:undoc-members:
:show-inheritance:

+ 119
- 0
sphinx/tutorial.md View File

@ -0,0 +1,119 @@
# Tutorial
### Installation
The package is available on PyPi. Install it using:
```
pip install nd2reader
```
If you don't already have the packages `numpy`, `pims`, `six` and `xmltodict`, they will be installed automatically if you use the `setup.py` script.
`nd2reader` is an order of magnitude faster in Python 3. I recommend using it unless you have no other choice. Python 2.7 and Python >= 3.4 are supported.
#### Installation via Conda Forge
Installing `nd2reader` from the `conda-forge` channel can be achieved by adding `conda-forge` to your channels with:
```
conda config --add channels conda-forge
```
Once the `conda-forge` channel has been enabled, `nd2reader` can be installed with:
```
conda install nd2reader
```
It is possible to list all of the versions of `nd2reader` available on your platform with:
```
conda search nd2reader --channel conda-forge
```
### Opening ND2s
`nd2reader` follows the [pims](https://github.com/soft-matter/pims) framework. To open a file and show the first frame:
```python
from nd2reader import ND2Reader
import matplotlib.pyplot as plt
with ND2Reader('my_directory/example.nd2') as images:
plt.imshow(images[0])
```
After opening the file, all `pims` features are supported. Please refer to the [pims documentation](http://soft-matter.github.io/pims/).
### ND2 metadata
The ND2 file contains various metadata, such as acquisition information,
regions of interest and custom user comments. Most of this metadata is parsed
and available in dictionary form. For example:
```python
from nd2reader import ND2Reader
with ND2Reader('my_directory/example.nd2') as images:
# width and height of the image
print('%d x %d px' % (images.metadata['width'], images.metadata['height']))
```
All metadata properties are:
* `width`: the width of the image in pixels
* `height`: the height of the image in pixels
* `date`: the date the image was taken
* `fields_of_view`: the fields of view in the image
* `frames`: a list of all frame numbers
* `z_levels`: the z levels in the image
* `total_images_per_channel`: the number of images per color channel
* `channels`: the color channels
* `pixel_microns`: the amount of microns per pixel
* `rois`: the regions of interest (ROIs) defined by the user
* `experiment`: information about the nature and timings of the ND experiment
### Iterating over fields of view
Using `NDExperiments` in the Nikon software, it is possible to acquire images on different `(x, y)` positions.
This is referred to as different fields of view. Using this reader, the fields of view are on the `v` axis.
For example:
```python
from nd2reader import ND2Reader
with ND2Reader('my_directory/example.nd2') as images:
# width and height of the image
print(images.metadata)
```
will output
```python
{'channels': ['BF100xoil-1x-R', 'BF+RITC'],
'date': datetime.datetime(2017, 10, 30, 14, 35, 18),
'experiment': {'description': 'ND Acquisition',
'loops': [{'duration': 0,
'sampling_interval': 0.0,
'start': 0,
'stimulation': False}]},
'fields_of_view': [0, 1],
'frames': [0],
'height': 1895,
'num_frames': 1,
'pixel_microns': 0.09214285714285715,
'total_images_per_channel': 6,
'width': 2368,
'z_levels': [0, 1, 2]}
```
for our example file. As you can see from the metadata, it has two fields of view. We can also look at the sizes of the axes:
```python
print(images.sizes)
```
```python
{'c': 2, 't': 1, 'v': 2, 'x': 2368, 'y': 1895, 'z': 3}
```
As you can see, the fields of view are listed on the `v` axis. It is therefore possible to loop over them like this:
```python
images.iter_axes = 'v'
for fov in images:
print(fov) # Frame containing one field of view
```
For more information on axis bundling and iteration, refer to the [pims documentation](http://soft-matter.github.io/pims/v0.4/multidimensional.html#axes-bundling).

+ 22
- 0
tests/test_artificial.py View File

@ -0,0 +1,22 @@
import unittest
from os import path
import six
import struct
from nd2reader.artificial import ArtificialND2
from nd2reader.common import get_version, parse_version, parse_date, _add_to_metadata, _parse_unsigned_char, \
_parse_unsigned_int, _parse_unsigned_long, _parse_double, check_or_make_dir
from nd2reader.exceptions import InvalidVersionError
class TestArtificial(unittest.TestCase):
def setUp(self):
dir_path = path.dirname(path.realpath(__file__))
check_or_make_dir(path.join(dir_path, 'test_data/'))
self.test_file = path.join(dir_path, 'test_data/test.nd2')
self.create_test_nd2()
def create_test_nd2(self):
with ArtificialND2(self.test_file) as artificial:
self.assertIsNotNone(artificial.file_handle)
artificial.close()

+ 107
- 1
tests/test_common.py View File

@ -1,10 +1,27 @@
import unittest import unittest
from os import path
import array
import six import six
from nd2reader.common import parse_version, parse_date, _add_to_metadata
import struct
from nd2reader.artificial import ArtificialND2
from nd2reader.common import get_version, parse_version, parse_date, _add_to_metadata, _parse_unsigned_char, \
_parse_unsigned_int, _parse_unsigned_long, _parse_double, check_or_make_dir, _parse_string, _parse_char_array, \
get_from_dict_if_exists, read_chunk
from nd2reader.exceptions import InvalidVersionError
class TestCommon(unittest.TestCase): class TestCommon(unittest.TestCase):
def setUp(self):
dir_path = path.dirname(path.realpath(__file__))
check_or_make_dir(path.join(dir_path, 'test_data/'))
self.test_file = path.join(dir_path, 'test_data/test.nd2')
def create_test_nd2(self):
with ArtificialND2(self.test_file) as artificial:
artificial.close()
def test_parse_version_2(self): def test_parse_version_2(self):
data = 'ND2 FILE SIGNATURE CHUNK NAME01!Ver2.2' data = 'ND2 FILE SIGNATURE CHUNK NAME01!Ver2.2'
actual = parse_version(data) actual = parse_version(data)
@ -17,6 +34,17 @@ class TestCommon(unittest.TestCase):
expected = (3, 0) expected = (3, 0)
self.assertTupleEqual(actual, expected) self.assertTupleEqual(actual, expected)
def test_parse_version_invalid(self):
data = 'ND2 FILE SIGNATURE CHUNK NAME!Version2.2.3'
self.assertRaises(InvalidVersionError, parse_version, data)
def test_get_version_from_file(self):
self.create_test_nd2()
with open(self.test_file, 'rb') as fh:
version_tuple = get_version(fh)
self.assertTupleEqual(version_tuple, (3, 0))
def test_parse_date_24(self): def test_parse_date_24(self):
date_format = "%m/%d/%Y %H:%M:%S" date_format = "%m/%d/%Y %H:%M:%S"
date = '02/13/2016 23:43:37' date = '02/13/2016 23:43:37'
@ -31,6 +59,12 @@ class TestCommon(unittest.TestCase):
result = parse_date(textinfo) result = parse_date(textinfo)
self.assertEqual(result.strftime(date_format), date) self.assertEqual(result.strftime(date_format), date)
def test_parse_date_exception(self):
date = 'i am no date'
textinfo = {six.b('TextInfoItem9'): six.b(date)}
result = parse_date(textinfo)
self.assertIsNone(result)
def test_add_to_meta_simple(self): def test_add_to_meta_simple(self):
metadata = {} metadata = {}
_add_to_metadata(metadata, 'test', 'value') _add_to_metadata(metadata, 'test', 'value')
@ -46,4 +80,76 @@ class TestCommon(unittest.TestCase):
_add_to_metadata(metadata, 'test', 'value3') _add_to_metadata(metadata, 'test', 'value3')
self.assertDictEqual(metadata, {'test': ['value1', 'value2', 'value3']}) self.assertDictEqual(metadata, {'test': ['value1', 'value2', 'value3']})
@staticmethod
def _prepare_bin_stream(binary_format, *value):
file = six.BytesIO()
data = struct.pack(binary_format, *value)
file.write(data)
file.seek(0)
return file
def test_parse_functions(self):
file = self._prepare_bin_stream("B", 9)
self.assertEqual(_parse_unsigned_char(file), 9)
file = self._prepare_bin_stream("I", 333)
self.assertEqual(_parse_unsigned_int(file), 333)
file = self._prepare_bin_stream("Q", 7564332)
self.assertEqual(_parse_unsigned_long(file), 7564332)
file = self._prepare_bin_stream("d", 47.9)
self.assertEqual(_parse_double(file), 47.9)
test_string = 'colloid'
file = self._prepare_bin_stream("%ds" % len(test_string), six.b(test_string))
parsed = _parse_string(file)
self.assertEqual(parsed, six.b(test_string))
test_data = [1, 2, 3, 4, 5]
file = self._prepare_bin_stream("Q" + ''.join(['B'] * len(test_data)), len(test_data), *test_data)
parsed = _parse_char_array(file)
self.assertEqual(parsed, array.array('B', test_data))
def test_get_from_dict_if_exists(self):
test_dict = {
six.b('existing'): 'test',
'string': 'test2'
}
self.assertIsNone(get_from_dict_if_exists('nowhere', test_dict))
self.assertEqual(get_from_dict_if_exists('existing', test_dict), 'test')
self.assertEqual(get_from_dict_if_exists('string', test_dict, convert_key_to_binary=False), 'test2')
def test_read_chunk(self):
with ArtificialND2(self.test_file) as artificial:
fh = artificial.file_handle
chunk_location = artificial.locations['image_attributes'][0]
chunk_read = read_chunk(fh, chunk_location)
real_data = six.BytesIO(artificial.raw_text)
real_data.seek(chunk_location)
# The chunk metadata is always 16 bytes long
chunk_metadata = real_data.read(16)
header, relative_offset, data_length = struct.unpack("IIQ", chunk_metadata)
self.assertEquals(header, 0xabeceda)
# We start at the location of the chunk metadata, skip over the metadata, and then proceed to the
# start of the actual data field, which is at some arbitrary place after the metadata.
real_data.seek(chunk_location + 16 + relative_offset)
real_chunk = real_data.read(data_length)
self.assertEqual(real_chunk, chunk_read)
def test_read_chunk_fail_bad_header(self):
with ArtificialND2(self.test_file) as artificial:
fh = artificial.file_handle
chunk_location = artificial.locations['image_attributes'][0]
with self.assertRaises(ValueError) as context:
read_chunk(fh, chunk_location + 1)
self.assertEquals(str(context.exception), "The ND2 file seems to be corrupted.")

+ 79
- 0
tests/test_label_map.py View File

@ -0,0 +1,79 @@
import unittest
from nd2reader.label_map import LabelMap
from nd2reader.artificial import ArtificialND2
class TestLabelMap(unittest.TestCase):
def setUp(self):
self.nd2 = ArtificialND2('test_data/test_nd2_label_map001.nd2')
self.raw_text, self.locations = self.nd2.raw_text, self.nd2.locations
self.label_map = LabelMap(self.raw_text)
def test_image_data_location(self):
self.assertEqual(self.locations['image_frame_0'][0], self.label_map.get_image_data_location(0))
def test_image_text_info(self):
self.assertEqual(self.locations['image_text_info'][0], self.label_map.image_text_info)
def test_image_metadata(self):
self.assertEqual(self.locations['image_metadata'][0], self.label_map.image_metadata)
def test_image_attributes(self):
self.assertEqual(self.locations['image_attributes'][0], self.label_map.image_attributes)
def test_image_metadata_sequence(self):
self.assertEqual(self.locations['image_metadata_sequence'][0], self.label_map.image_metadata_sequence)
def test_image_calibration(self):
self.assertEqual(self.locations['image_calibration'][0], self.label_map.image_calibration)
def test_x_data(self):
self.assertEqual(self.locations['x_data'][0], self.label_map.x_data)
def test_y_data(self):
self.assertEqual(self.locations['y_data'][0], self.label_map.y_data)
def test_z_data(self):
self.assertEqual(self.locations['z_data'][0], self.label_map.z_data)
def test_roi_metadata(self):
self.assertEqual(self.locations['roi_metadata'][0], self.label_map.roi_metadata)
def test_pfs_status(self):
self.assertEqual(self.locations['pfs_status'][0], self.label_map.pfs_status)
def test_pfs_offset(self):
self.assertEqual(self.locations['pfs_offset'][0], self.label_map.pfs_offset)
def test_guid(self):
self.assertEqual(self.locations['guid'][0], self.label_map.guid)
def test_description(self):
self.assertEqual(self.locations['description'][0], self.label_map.description)
def test_camera_exposure_time(self):
self.assertEqual(self.locations['camera_exposure_time'][0], self.label_map.camera_exposure_time)
def test_camera_temp(self):
self.assertEqual(self.locations['camera_temp'][0], self.label_map.camera_temp)
def test_acquisition_times(self):
self.assertEqual(self.locations['acquisition_times'][0], self.label_map.acquisition_times)
def test_acquisition_times_2(self):
self.assertEqual(self.locations['acquisition_times_2'][0], self.label_map.acquisition_times_2)
def test_acquisition_frames(self):
self.assertEqual(self.locations['acquisition_frames'][0], self.label_map.acquisition_frames)
def test_lut_data(self):
self.assertEqual(self.locations['lut_data'][0], self.label_map.lut_data)
def test_grabber_settings(self):
self.assertEqual(self.locations['grabber_settings'][0], self.label_map.grabber_settings)
def test_custom_data(self):
self.assertEqual(self.locations['custom_data'][0], self.label_map.custom_data)
def test_app_info(self):
self.assertEqual(self.locations['app_info'][0], self.label_map.app_info)

+ 38
- 0
tests/test_legacy.py View File

@ -0,0 +1,38 @@
import unittest
import warnings
from nd2reader.legacy import Nd2
from nd2reader.reader import ND2Reader
from nd2reader.artificial import ArtificialND2
class TestLegacy(unittest.TestCase):
def test_init(self):
with ArtificialND2('test_data/legacy.nd2'):
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
with Nd2('test_data/legacy.nd2') as reader:
self.assertIsInstance(reader.reader, ND2Reader)
self.assertTrue(issubclass(w[0].category, DeprecationWarning))
self.assertEquals(str(w[0].message), "The 'Nd2' class is deprecated, please consider using the new" +
" ND2Reader interface which uses pims.")
def test_misc(self):
with ArtificialND2('test_data/legacy.nd2'):
with Nd2('test_data/legacy.nd2') as reader:
representation = "\n".join(["<Deprecated ND2 %s>" % reader.reader.filename,
"Created: Unknown",
"Image size: %sx%s (HxW)" % (reader.height, reader.width),
"Frames: %s" % len(reader.frames),
"Channels: %s" % ", ".join(["%s" % str(channel) for channel
in reader.channels]),
"Fields of View: %s" % len(reader.fields_of_view),
"Z-Levels: %s" % len(reader.z_levels)
])
self.assertEquals(representation, str(reader))
# not implemented yet
self.assertEquals(reader.pixel_microns, None)
self.assertEquals(len(reader), 1)

+ 36
- 0
tests/test_parser.py View File

@ -0,0 +1,36 @@
import unittest
from os import path
from nd2reader.artificial import ArtificialND2
from nd2reader.common import check_or_make_dir
from nd2reader.parser import Parser
import urllib.request
class TestParser(unittest.TestCase):
def create_test_nd2(self):
with ArtificialND2(self.test_file) as artificial:
artificial.close()
def setUp(self):
dir_path = path.dirname(path.realpath(__file__))
check_or_make_dir(path.join(dir_path, "test_data/"))
self.test_file = path.join(dir_path, "test_data/test.nd2")
self.create_test_nd2()
def test_can_open_test_file(self):
self.create_test_nd2()
with open(self.test_file, "rb") as fh:
parser = Parser(fh)
self.assertTrue(parser.supported)
def test_get_image(self):
stitched_path = "test_data/test_stitched.nd2"
if not path.isfile(stitched_path):
file_name, header = urllib.request.urlretrieve(
"https://downloads.openmicroscopy.org/images/ND2/karl/sample_image.nd2",
stitched_path,
)
with open(stitched_path, "rb") as fh:
parser = Parser(fh)
parser.get_image(0)

+ 71
- 8
tests/test_raw_metadata.py View File

@ -1,18 +1,81 @@
import unittest import unittest
import six
from nd2reader.artificial import ArtificialND2
from nd2reader.label_map import LabelMap
from nd2reader.raw_metadata import RawMetadata from nd2reader.raw_metadata import RawMetadata
from nd2reader.common_raw_metadata import parse_roi_shape, parse_roi_type, parse_dimension_text_line
class TestRawMetadata(unittest.TestCase): class TestRawMetadata(unittest.TestCase):
def setUp(self): def setUp(self):
self.metadata = RawMetadata(None, None)
self.nd2 = ArtificialND2('test_data/test_nd2_raw_metadata001.nd2')
self.raw_text, self.locations, self.file_data = self.nd2.raw_text, self.nd2.locations, self.nd2.data
self.label_map = LabelMap(self.raw_text)
self.metadata = RawMetadata(self.nd2.file_handle, self.label_map)
def test_parse_roi_shape(self): def test_parse_roi_shape(self):
self.assertEqual(self.metadata._parse_roi_shape(3), 'rectangle')
self.assertEqual(self.metadata._parse_roi_shape(9), 'circle')
self.assertIsNone(self.metadata._parse_roi_shape(-1))
self.assertEqual(parse_roi_shape(3), 'rectangle')
self.assertEqual(parse_roi_shape(9), 'circle')
self.assertIsNone(parse_roi_shape(-1))
def test_parse_roi_type(self): def test_parse_roi_type(self):
self.assertEqual(self.metadata._parse_roi_type(3), 'reference')
self.assertEqual(self.metadata._parse_roi_type(2), 'background')
self.assertEqual(self.metadata._parse_roi_type(4), 'stimulation')
self.assertIsNone(self.metadata._parse_roi_type(-1))
self.assertEqual(parse_roi_type(3), 'reference')
self.assertEqual(parse_roi_type(2), 'background')
self.assertEqual(parse_roi_type(4), 'stimulation')
self.assertIsNone(parse_roi_type(-1))
def test_parse_dimension_text(self):
line = six.b('Metadata:\r\nDimensions: T(443) x \xce\xbb(1)\r\nCamera Name: Andor Zyla VSC-01537')
self.assertEqual(parse_dimension_text_line(line), six.b('Dimensions: T(443) x \xce\xbb(1)'))
self.assertIsNone(parse_dimension_text_line(six.b('Dim: nothing')))
def test_parse_z_levels(self):
# smokescreen test to check if the fallback to z_coordinates is working
# for details, see RawMetadata._parse_z_levels()
dimension_text = self.metadata._parse_dimension_text()
z_levels = self.metadata._parse_dimension(r""".*?Z\((\d+)\).*?""", dimension_text)
z_coords = self.metadata._parse_z_coordinates()
self.assertEqual(len(dimension_text), 0)
self.assertEqual(len(z_levels), 0)
self.assertEqual(len(self.metadata._parse_z_levels()), len(z_coords))
def test_dict(self):
self.assertTrue(type(self.metadata.__dict__) is dict)
def test_parsed_metadata_has_all_keys(self):
metadata = self.metadata.get_parsed_metadata()
self.assertTrue(type(metadata) is dict)
required_keys = ["height", "width", "date", "fields_of_view", "frames", "z_levels", "total_images_per_channel",
"channels", "pixel_microns"]
for required in required_keys:
self.assertTrue(required in metadata)
def test_cached_metadata(self):
metadata_one = self.metadata.get_parsed_metadata()
metadata_two = self.metadata.get_parsed_metadata()
self.assertEqual(metadata_one, metadata_two)
def test_pfs_status(self):
self.assertEqual(self.file_data['pfs_status'], self.metadata.pfs_status[0])
def _assert_dicts_equal(self, parsed_dict, original_dict):
for attribute in original_dict.keys():
parsed_key = six.b(attribute)
self.assertIn(parsed_key, parsed_dict.keys())
if isinstance(parsed_dict[parsed_key], dict):
self._assert_dicts_equal(parsed_dict[parsed_key], original_dict[attribute])
else:
self.assertEqual(parsed_dict[parsed_key], original_dict[attribute])
def test_image_attributes(self):
parsed_dict = self.metadata.image_attributes
self._assert_dicts_equal(parsed_dict, self.file_data['image_attributes'])
def test_color_channels(self):
parsed_channels = self.metadata.get_parsed_metadata()['channels']
self.assertEquals(parsed_channels, ['TRITC'])

+ 79
- 0
tests/test_reader.py View File

@ -0,0 +1,79 @@
import unittest
import numpy as np
import struct
from pims import Frame
from nd2reader.artificial import ArtificialND2
from nd2reader.exceptions import EmptyFileError, InvalidFileType
from nd2reader.reader import ND2Reader
from nd2reader.parser import Parser
class TestReader(unittest.TestCase):
def test_invalid_file_extension(self):
self.assertRaises(InvalidFileType, lambda: ND2Reader('test_data/invalid_extension_file.inv'))
def test_extension(self):
self.assertTrue('nd2' in ND2Reader.class_exts())
def cmp_two_readers(self, r1, r2):
attributes = r1.data['image_attributes']['SLxImageAttributes']
self.assertEqual(r2.metadata['width'], attributes['uiWidth'])
self.assertEqual(r2.metadata['height'], attributes['uiHeight'])
self.assertEqual(r2.metadata['width'], r2.sizes['x'])
self.assertEqual(r2.metadata['height'], r2.sizes['y'])
self.assertEqual(r2.pixel_type, np.float64)
self.assertEqual(r2.iter_axes, ['t'])
def test_init_and_init_axes(self):
with ArtificialND2('test_data/test_nd2_reader.nd2') as artificial:
with ND2Reader('test_data/test_nd2_reader.nd2') as reader:
self.cmp_two_readers(artificial, reader)
def test_init_from_handler(self):
with ArtificialND2('test_data/test_nd2_reader.nd2') as artificial:
with open('test_data/test_nd2_reader.nd2', "rb") as FH:
with ND2Reader(FH) as reader:
self.cmp_two_readers(artificial, reader)
def test_init_empty_file(self):
with ArtificialND2('test_data/empty.nd2', skip_blocks=['label_map_marker']):
with self.assertRaises(EmptyFileError) as exception:
with ND2Reader('test_data/empty.nd2'):
pass
self.assertEqual(str(exception.exception), "No axes were found for this .nd2 file.")
def test_get_parser(self):
with ArtificialND2('test_data/test_nd2_reader.nd2') as _:
with ND2Reader('test_data/test_nd2_reader.nd2') as reader:
self.assertIsInstance(reader.parser, Parser)
def test_get_timesteps(self):
with ArtificialND2('test_data/test_nd2_reader.nd2') as _:
with ND2Reader('test_data/test_nd2_reader.nd2') as reader:
timesteps = reader.timesteps
self.assertEquals(len(timesteps), 0)
def test_get_frame_zero(self):
# Best test we can do for now:
# test everything up to the actual unpacking of the frame data
with ArtificialND2('test_data/test_nd2_reader.nd2') as _:
with ND2Reader('test_data/test_nd2_reader.nd2') as reader:
with self.assertRaises(struct.error) as exception:
frame = reader[0]
self.assertIn('unpack', str(exception.exception))
def test_get_frame_2D(self):
# Best test we can do for now:
# test everything up to the actual unpacking of the frame data
with ArtificialND2('test_data/test_nd2_reader.nd2') as _:
with ND2Reader('test_data/test_nd2_reader.nd2') as reader:
with self.assertRaises(struct.error) as exception:
frame = reader.get_frame_2D(c=0, t=0, z=0, x=0, y=0, v=0)
self.assertIn('unpack', str(exception.exception))

+ 2
- 6
tests/test_version.py View File

@ -1,12 +1,8 @@
import nd2reader
import unittest import unittest
from setup import VERSION
from nd2reader import __version__ as VERSION
class TestVersion(unittest.TestCase): class TestVersion(unittest.TestCase):
def test_module_version_type(self): def test_module_version_type(self):
# just make sure the version number exists and is the type we expect # just make sure the version number exists and is the type we expect
self.assertEqual(type(nd2reader.__version__), str)
def test_versions_in_sync(self):
self.assertEqual(nd2reader.__version__, VERSION)
self.assertEqual(type(VERSION), str)

Loading…
Cancel
Save