From 9dceec10f344ff754b32066263be49276e4bbbf1 Mon Sep 17 00:00:00 2001 From: jennmald Date: Tue, 10 Mar 2026 09:21:34 -0400 Subject: [PATCH 1/7] drafting pixi setup for chxtools requirements --- pixi.toml | 44 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) create mode 100644 pixi.toml diff --git a/pixi.toml b/pixi.toml new file mode 100644 index 0000000..4f0e78e --- /dev/null +++ b/pixi.toml @@ -0,0 +1,44 @@ +[workspace] +channels = ["conda-forge"] +name = "chxtools" +platforms = ["linux-64", "osx-64", "osx-arm64"] +version = "2026.3.0" + +[feature.base.dependencies] +numpy = "*" +pyepics = "*" +python = ">=3.12,<3.13" +databroker = "==2.0.0b68" +eiger-io = "*" +matplotlib = "*" +pymongo = "*" +scikit-beam = "*" +scikit-image = "*" +scipy = "*" + +[feature.terminal.dependencies] +ipython = ">=9.5.0" +pyside6 = "*" +numpy = ">2" + +[feature.dev.dependencies] +pre-commit = "*" +ruff = "*" +black = "*" +isort = "*" +prettier = "*" +nbstripout = "*" + +[feature.dev.tasks] +pre-commit-install = "pre-commit install" +pre-commit-run = "pre-commit run --all-files" +pre-commit-update = "pre-commit autoupdate" +lint = "ruff check ." +format = "ruff format ." +lint-fix = "ruff check --fix ." +black-format = "black ." +isort-fix = "isort ." + +[environments] +terminal = {features=["base", "terminal"], solve-group="profile"} +dev = {features=["dev"], no-default-feature = true} \ No newline at end of file From ae17e02d4935fe36fa5ce60e74539cde9ee0aab6 Mon Sep 17 00:00:00 2001 From: jennmald Date: Thu, 19 Mar 2026 13:26:26 -0400 Subject: [PATCH 2/7] add pre-commit rules from scientific python cookie cutter --- .pre-commit-config.yaml | 96 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 96 insertions(+) create mode 100644 .pre-commit-config.yaml diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..26e53c4 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,96 @@ +ci: + autoupdate_commit_msg: "chore(deps): update pre-commit hooks" + autofix_commit_msg: "style: pre-commit fixes" + autoupdate_schedule: "quarterly" + +exclude: "^({{cookiecutter\\.project_name}}|hooks/pre_gen_project.py$)" + +repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: "v6.0.0" + hooks: + - id: check-added-large-files + - id: check-case-conflict + - id: check-merge-conflict + - id: check-symlinks + - id: check-yaml + - id: debug-statements + - id: end-of-file-fixer + - id: mixed-line-ending + - id: name-tests-test + args: ["--pytest-test-first"] + - id: requirements-txt-fixer + - id: trailing-whitespace + + - repo: https://github.com/adamchainz/blacken-docs + rev: "1.20.0" + hooks: + - id: blacken-docs + additional_dependencies: [black==24.*] + + - repo: https://github.com/astral-sh/ruff-pre-commit + rev: "v0.15.6" + hooks: + - id: ruff-check + args: ["--fix", "--show-fixes"] + - id: ruff-format + + - repo: https://github.com/pre-commit/pygrep-hooks + rev: "v1.10.0" + hooks: + - id: rst-backticks + - id: rst-directive-colons + - id: rst-inline-touching-normal + + - repo: https://github.com/pre-commit/mirrors-mypy + rev: "v1.19.1" + hooks: + - id: mypy + files: "(src|tests|noxfile.py)" + args: [] + additional_dependencies: + - click + - markdown-it-py + - pytest + - nox + - orjson + - repo-review>=0.10.6 + - rich + - tomli>=2.0.2 + - types-PyYAML + + - repo: https://github.com/rbubley/mirrors-prettier + rev: "v3.8.1" + hooks: + - id: prettier + types_or: [yaml, markdown, html, css, scss, javascript, json] + args: [--prose-wrap=always] + + - repo: https://github.com/crate-ci/typos + rev: "v1.44.0" + hooks: + - id: typos + exclude: ^Gemfile\.lock$ + + - repo: local + hooks: + - id: disallow-caps + name: Disallow improper capitalization + language: pygrep + entry: PyBind|Numpy|Cmake|CCache|Github|PyTest|RST|PyLint + exclude: (.pre-commit-config.yaml|docs/pages/guides/style\.md)$ + - id: disallow-words + name: Disallow certain words + language: pygrep + entry: "[Ff]alsey" + exclude: .pre-commit-config.yaml$ + - id: disallow-bad-permalinks + name: Disallow _ in permalinks + language: pygrep + entry: "^permalink:.*_.*" + - id: cog + name: Cog the pages + language: python + entry: cog -P -r -I ./helpers + files: "^docs/pages/guides/(packaging_compiled|docs|tasks|gha_basic).md|^copier.yml|^docs/_includes/pyproject.md" + additional_dependencies: [cogapp, cookiecutter, tomlkit] \ No newline at end of file From eebcb9db484a57ab11e73c7c8ca602ad0ed4f1b9 Mon Sep 17 00:00:00 2001 From: jennmald Date: Thu, 19 Mar 2026 13:27:21 -0400 Subject: [PATCH 3/7] changes from pre-commit run --- .gitignore | 2 +- LICENSE | 2 +- README.md | 8 +- chxtools/X-ray_database/PhElAbsCross_Si.dat | 70 +- .../X-ray_database/id_CHX_IVU20_01162017.dat | 4 +- .../X-ray_database/id_CHX_IVU20_03182016.dat | 4 +- .../X-ray_database/id_CHX_IVU20_03222016.dat | 4 +- .../X-ray_database/id_CHX_IVU20_05272017.dat | 4 +- .../X-ray_database/id_CHX_IVU20_06062016.dat | 4 +- .../X-ray_database/id_CHX_IVU20_10042016.dat | 4 +- chxtools/X-ray_database/mu_Be.dat | 2 +- chxtools/X-ray_database/mu_Si.dat | 2 +- chxtools/X-ray_database/mu_Si3N4.dat | 2 +- chxtools/X-ray_database/n_Ag.dat | 2 +- chxtools/X-ray_database/n_Al.dat | 2 +- chxtools/X-ray_database/n_Au.dat | 2 +- chxtools/X-ray_database/n_Be.dat | 2 +- chxtools/X-ray_database/n_C.dat | 2 +- chxtools/X-ray_database/n_C_test.dat | 2 +- chxtools/X-ray_database/n_Cu.dat | 2 +- chxtools/X-ray_database/n_H2O.dat | 2 +- chxtools/X-ray_database/n_Hg.dat | 2 +- chxtools/X-ray_database/n_Pd.dat | 2 +- chxtools/X-ray_database/n_Pt.dat | 2 +- chxtools/X-ray_database/n_Si.dat | 2 +- chxtools/X-ray_database/n_Si3N4.dat | 2 +- chxtools/X-ray_database/n_SiO2.dat | 2 +- chxtools/X-ray_database/n_W.dat | 2 +- chxtools/X-ray_database/n_diamond.dat | 2 +- chxtools/X-ray_database/n_dna.dat | 2 +- chxtools/X-ray_database/n_ice.dat | 2 +- chxtools/X-ray_database/n_kapton.dat | 2 +- chxtools/X-ray_database/n_lipid.dat | 2 +- chxtools/X-ray_database/n_mica.dat | 2 +- chxtools/X-ray_database/n_nucleosome.dat | 2 +- chxtools/X-ray_database/n_protein.dat | 2 +- chxtools/X-ray_database/rc_Ge111.dat | 2 +- chxtools/X-ray_database/rc_Ge220.dat | 2 +- chxtools/X-ray_database/rc_Si111.dat | 2 +- chxtools/X-ray_database/rc_Si220.dat | 2 +- chxtools/__init__.py | 5 +- chxtools/_version.py | 123 +- chxtools/attfuncs.py | 283 +- chxtools/attfuncs2.py | 292 +- chxtools/bpm_stability.py | 408 +-- chxtools/chx_utilities.py | 2438 ++++++++++------- chxtools/chx_wrapper.py | 278 +- chxtools/handlers.py | 28 +- chxtools/plot_sid.py | 39 +- chxtools/pv_explorer/__init__.py | 2 +- chxtools/pv_explorer/model.py | 34 +- chxtools/pv_explorer/run.py | 6 +- chxtools/pv_explorer/view.enaml | 1 - chxtools/transfuncs.py | 336 ++- chxtools/xfuncs.py | 964 ++++--- pixi.toml | 2 +- requirements_backup.txt | 2 +- setup.py | 18 +- versioneer.py | 216 +- 59 files changed, 3363 insertions(+), 2278 deletions(-) diff --git a/.gitignore b/.gitignore index 534ef93..b78c68a 100644 --- a/.gitignore +++ b/.gitignore @@ -81,7 +81,7 @@ docs/_build/ *.tiff *.tif -#generated documntation files +#generated documentation files doc/resource/api/generated/ # Enaml diff --git a/LICENSE b/LICENSE index 192f156..f16cd3c 100644 --- a/LICENSE +++ b/LICENSE @@ -24,4 +24,4 @@ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/README.md b/README.md index 6d92295..3544c89 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,4 @@ -chxtools -======== - -Repository for data collection and analysis scripts that are useful at the -CHX beamline at NSLS-II (11-id). +# chxtools +Repository for data collection and analysis scripts that are useful at the CHX +beamline at NSLS-II (11-id). diff --git a/chxtools/X-ray_database/PhElAbsCross_Si.dat b/chxtools/X-ray_database/PhElAbsCross_Si.dat index 0658f60..2f828c5 100644 --- a/chxtools/X-ray_database/PhElAbsCross_Si.dat +++ b/chxtools/X-ray_database/PhElAbsCross_Si.dat @@ -1,35 +1,35 @@ -%Photon Photoel. -%Energy Absorb. -1.000E-03 1.567E+03 -1.500E-03 5.333E+02 -1.839E-03 3.071E+02 -1.839E-03 3.191E+03 -2.000E-03 2.775E+03 -3.000E-03 9.767E+02 -4.000E-03 4.514E+02 -5.000E-03 2.438E+02 -6.000E-03 1.458E+02 -7.000E-03 9.388E+01 -8.000E-03 6.379E+01 -9.000E-03 4.521E+01 -1.000E-02 3.315E+01 -1.100E-02 2.499E+01 -1.200E-02 1.928E+01 -1.300E-02 1.517E+01 -1.400E-02 1.213E+01 -1.500E-02 9.848E+00 -1.600E-02 8.096E+00 -1.700E-02 6.730E+00 -1.800E-02 5.651E+00 -1.900E-02 4.787E+00 -2.000E-02 4.089E+00 -2.100E-02 3.518E+00 -2.200E-02 3.048E+00 -2.300E-02 2.656E+00 -2.400E-02 2.328E+00 -2.500E-02 2.051E+00 -2.600E-02 1.815E+00 -2.700E-02 1.614E+00 -2.800E-02 1.440E+00 -2.900E-02 1.291E+00 -3.000E-02 1.161E+00 \ No newline at end of file +%Photon Photoel. +%Energy Absorb. +1.000E-03 1.567E+03 +1.500E-03 5.333E+02 +1.839E-03 3.071E+02 +1.839E-03 3.191E+03 +2.000E-03 2.775E+03 +3.000E-03 9.767E+02 +4.000E-03 4.514E+02 +5.000E-03 2.438E+02 +6.000E-03 1.458E+02 +7.000E-03 9.388E+01 +8.000E-03 6.379E+01 +9.000E-03 4.521E+01 +1.000E-02 3.315E+01 +1.100E-02 2.499E+01 +1.200E-02 1.928E+01 +1.300E-02 1.517E+01 +1.400E-02 1.213E+01 +1.500E-02 9.848E+00 +1.600E-02 8.096E+00 +1.700E-02 6.730E+00 +1.800E-02 5.651E+00 +1.900E-02 4.787E+00 +2.000E-02 4.089E+00 +2.100E-02 3.518E+00 +2.200E-02 3.048E+00 +2.300E-02 2.656E+00 +2.400E-02 2.328E+00 +2.500E-02 2.051E+00 +2.600E-02 1.815E+00 +2.700E-02 1.614E+00 +2.800E-02 1.440E+00 +2.900E-02 1.291E+00 +3.000E-02 1.161E+00 diff --git a/chxtools/X-ray_database/id_CHX_IVU20_01162017.dat b/chxtools/X-ray_database/id_CHX_IVU20_01162017.dat index 15fc539..dd2dcc8 100644 --- a/chxtools/X-ray_database/id_CHX_IVU20_01162017.dat +++ b/chxtools/X-ray_database/id_CHX_IVU20_01162017.dat @@ -1,7 +1,7 @@ % data from measurements 01/16/17 % manually taken for center of undulator cone -% K colkumn is a placeholder! -% ID gap [mm] K E_1 [keV] +% K colkumn is a placeholder! +% ID gap [mm] K E_1 [keV] 5.20 1.0 1.3782 5.40 1.0 1.4504 5.60 1.0 1.525 diff --git a/chxtools/X-ray_database/id_CHX_IVU20_03182016.dat b/chxtools/X-ray_database/id_CHX_IVU20_03182016.dat index 3a603d6..d64c48a 100644 --- a/chxtools/X-ray_database/id_CHX_IVU20_03182016.dat +++ b/chxtools/X-ray_database/id_CHX_IVU20_03182016.dat @@ -1,3 +1,3 @@ % data from measurements 03/18/16 -% K colkumn is a placeholder! -% ID gap [mm] K E_1 [keV] +% K colkumn is a placeholder! +% ID gap [mm] K E_1 [keV] diff --git a/chxtools/X-ray_database/id_CHX_IVU20_03222016.dat b/chxtools/X-ray_database/id_CHX_IVU20_03222016.dat index ed80b56..381166a 100644 --- a/chxtools/X-ray_database/id_CHX_IVU20_03222016.dat +++ b/chxtools/X-ray_database/id_CHX_IVU20_03222016.dat @@ -1,3 +1,3 @@ % data from measurements 03/22/16 -% K colkumn is a placeholder! -% ID gap [mm] K E_1 [keV] +% K colkumn is a placeholder! +% ID gap [mm] K E_1 [keV] diff --git a/chxtools/X-ray_database/id_CHX_IVU20_05272017.dat b/chxtools/X-ray_database/id_CHX_IVU20_05272017.dat index a9ede44..e2797a0 100644 --- a/chxtools/X-ray_database/id_CHX_IVU20_05272017.dat +++ b/chxtools/X-ray_database/id_CHX_IVU20_05272017.dat @@ -1,6 +1,6 @@ % data from measurements 05/27/17 -% K colkumn is a placeholder! -% ID gap [mm] K E_1 [keV] +% K colkumn is a placeholder! +% ID gap [mm] K E_1 [keV] 5.199965 1.0 1.378589094103352 5.400622 1.0 1.4505010580342685 5.600828 1.0 1.5248993303989322 diff --git a/chxtools/X-ray_database/id_CHX_IVU20_06062016.dat b/chxtools/X-ray_database/id_CHX_IVU20_06062016.dat index cb5532f..5a5101b 100644 --- a/chxtools/X-ray_database/id_CHX_IVU20_06062016.dat +++ b/chxtools/X-ray_database/id_CHX_IVU20_06062016.dat @@ -1,6 +1,6 @@ % data from measurements 06/06/16 -% K colkumn is a placeholder! -% ID gap [mm] K E_1 [keV] +% K colkumn is a placeholder! +% ID gap [mm] K E_1 [keV] 5.199418 1.0 1.3673620139594027 5.400744 1.0 1.4390290269467232 5.600851 1.0 1.5129974379749154 diff --git a/chxtools/X-ray_database/id_CHX_IVU20_10042016.dat b/chxtools/X-ray_database/id_CHX_IVU20_10042016.dat index f759e63..df04971 100644 --- a/chxtools/X-ray_database/id_CHX_IVU20_10042016.dat +++ b/chxtools/X-ray_database/id_CHX_IVU20_10042016.dat @@ -1,6 +1,6 @@ % data from measurements 10/04/16 -% K colkumn is a placeholder! -% ID gap [mm] K E_1 [keV] +% K colkumn is a placeholder! +% ID gap [mm] K E_1 [keV] 5.200081 1.0 1.3736578661340244 5.400614 1.0 1.4451752600116263 5.600811 1.0 1.5191900302629033 diff --git a/chxtools/X-ray_database/mu_Be.dat b/chxtools/X-ray_database/mu_Be.dat index ed04320..4fb1ad7 100644 --- a/chxtools/X-ray_database/mu_Be.dat +++ b/chxtools/X-ray_database/mu_Be.dat @@ -500,4 +500,4 @@ 29516.5 33249.5 29676.8 33285.3 29838.0 33320.4 - 30000.0 33354.8 \ No newline at end of file + 30000.0 33354.8 diff --git a/chxtools/X-ray_database/mu_Si.dat b/chxtools/X-ray_database/mu_Si.dat index fe77ab2..e1b7c71 100644 --- a/chxtools/X-ray_database/mu_Si.dat +++ b/chxtools/X-ray_database/mu_Si.dat @@ -500,4 +500,4 @@ 29516.5 3179.22 29676.8 3226.88 29838.0 3275.16 - 30000.0 3324.06 \ No newline at end of file + 30000.0 3324.06 diff --git a/chxtools/X-ray_database/mu_Si3N4.dat b/chxtools/X-ray_database/mu_Si3N4.dat index 0a4b6c0..a15c1f6 100644 --- a/chxtools/X-ray_database/mu_Si3N4.dat +++ b/chxtools/X-ray_database/mu_Si3N4.dat @@ -500,4 +500,4 @@ 29516.5 3175.11 29676.8 3219.54 29838.0 3264.44 - 30000.0 3309.83 \ No newline at end of file + 30000.0 3309.83 diff --git a/chxtools/X-ray_database/n_Ag.dat b/chxtools/X-ray_database/n_Ag.dat index 52d1e04..914f3f7 100644 --- a/chxtools/X-ray_database/n_Ag.dat +++ b/chxtools/X-ray_database/n_Ag.dat @@ -498,4 +498,4 @@ 29515.5312 2.13935346E-06 1.31475204E-07 29676.1465 2.1174576E-06 1.28917975E-07 29837.6348 2.09574932E-06 1.264102E-07 - 30000.002 2.07426297E-06 1.23951438E-07 \ No newline at end of file + 30000.002 2.07426297E-06 1.23951438E-07 diff --git a/chxtools/X-ray_database/n_Al.dat b/chxtools/X-ray_database/n_Al.dat index fb5cc7b..9d7195b 100644 --- a/chxtools/X-ray_database/n_Al.dat +++ b/chxtools/X-ray_database/n_Al.dat @@ -499,4 +499,4 @@ 29515.5312 6.20719788E-07 9.45491796E-10 29676.1465 6.14001522E-07 9.26816401E-10 29837.6348 6.07355958E-07 9.08543518E-10 - 30000.002 6.00782244E-07 8.90667595E-10 \ No newline at end of file + 30000.002 6.00782244E-07 8.90667595E-10 diff --git a/chxtools/X-ray_database/n_Au.dat b/chxtools/X-ray_database/n_Au.dat index edfc416..abba081 100644 --- a/chxtools/X-ray_database/n_Au.dat +++ b/chxtools/X-ray_database/n_Au.dat @@ -498,4 +498,4 @@ 29515.5312 3.67560688E-06 1.74805862E-07 29676.1465 3.63515437E-06 1.71332616E-07 29837.6348 3.59511068E-06 1.67928178E-07 - 30000.002 3.5555081E-06 1.6459181E-07 \ No newline at end of file + 30000.002 3.5555081E-06 1.6459181E-07 diff --git a/chxtools/X-ray_database/n_Be.dat b/chxtools/X-ray_database/n_Be.dat index 0dd8db8..0f5f5e2 100644 --- a/chxtools/X-ray_database/n_Be.dat +++ b/chxtools/X-ray_database/n_Be.dat @@ -499,4 +499,4 @@ 29515.5312 3.90868138E-07 1.0053737E-10 29676.1465 3.86648253E-07 9.98853569E-11 29837.6348 3.82474013E-07 9.92398316E-11 - 30000.002 3.78344794E-07 9.86006971E-11 \ No newline at end of file + 30000.002 3.78344794E-07 9.86006971E-11 diff --git a/chxtools/X-ray_database/n_C.dat b/chxtools/X-ray_database/n_C.dat index 9708bc9..5f2d251 100644 --- a/chxtools/X-ray_database/n_C.dat +++ b/chxtools/X-ray_database/n_C.dat @@ -499,4 +499,4 @@ 28779.6348 5.50912659E-07 1.691699E-10 29180.8066 5.35863933E-07 1.64976102E-10 29587.5723 5.21226639E-07 1.60958052E-10 - 30000.0059 5.06988613E-07 1.5710637E-10 \ No newline at end of file + 30000.0059 5.06988613E-07 1.5710637E-10 diff --git a/chxtools/X-ray_database/n_C_test.dat b/chxtools/X-ray_database/n_C_test.dat index 18c9b57..e87a5d1 100644 --- a/chxtools/X-ray_database/n_C_test.dat +++ b/chxtools/X-ray_database/n_C_test.dat @@ -497,4 +497,4 @@ 28779.6348 5.50912659E-07 1.691699E-10 29180.8066 5.35863933E-07 1.64976102E-10 29587.5723 5.21226639E-07 1.60958052E-10 - 30000.0059 5.06988613E-07 1.5710637E-10 \ No newline at end of file + 30000.0059 5.06988613E-07 1.5710637E-10 diff --git a/chxtools/X-ray_database/n_Cu.dat b/chxtools/X-ray_database/n_Cu.dat index e75cbaa..e7c2907 100644 --- a/chxtools/X-ray_database/n_Cu.dat +++ b/chxtools/X-ray_database/n_Cu.dat @@ -498,4 +498,4 @@ 29515.5312 1.96632163E-06 3.31323164E-08 29676.1465 1.94497534E-06 3.24445217E-08 29837.6348 1.92386119E-06 3.17710409E-08 - 30000.002 1.90297612E-06 3.11116963E-08 \ No newline at end of file + 30000.002 1.90297612E-06 3.11116963E-08 diff --git a/chxtools/X-ray_database/n_H2O.dat b/chxtools/X-ray_database/n_H2O.dat index e4e3a6b..da98524 100644 --- a/chxtools/X-ray_database/n_H2O.dat +++ b/chxtools/X-ray_database/n_H2O.dat @@ -497,4 +497,4 @@ 29515.5312 2.64595712E-07 1.1064933E-10 29676.1465 2.61737739E-07 1.09216497E-10 29837.6348 2.5891066E-07 1.07810774E-10 - 30000.002 2.56114106E-07 1.06431745E-10 \ No newline at end of file + 30000.002 2.56114106E-07 1.06431745E-10 diff --git a/chxtools/X-ray_database/n_Hg.dat b/chxtools/X-ray_database/n_Hg.dat index 650d3cb..c4276fb 100644 --- a/chxtools/X-ray_database/n_Hg.dat +++ b/chxtools/X-ray_database/n_Hg.dat @@ -496,4 +496,4 @@ 29515.5312 2.56283852E-06 1.2784939E-07 29676.1465 2.53465032E-06 1.25311402E-07 29837.6348 2.5067452E-06 1.22823621E-07 - 30000.002 2.47914681E-06 1.20385522E-07 \ No newline at end of file + 30000.002 2.47914681E-06 1.20385522E-07 diff --git a/chxtools/X-ray_database/n_Pd.dat b/chxtools/X-ray_database/n_Pd.dat index 809860a..2c2b9b8 100644 --- a/chxtools/X-ray_database/n_Pd.dat +++ b/chxtools/X-ray_database/n_Pd.dat @@ -496,4 +496,4 @@ 29515.5312 2.44286343E-06 1.43845782E-07 29676.1465 2.41790895E-06 1.4104296E-07 29837.6348 2.39320229E-06 1.38294439E-07 - 30000.002 2.36874689E-06 1.35599763E-07 \ No newline at end of file + 30000.002 2.36874689E-06 1.35599763E-07 diff --git a/chxtools/X-ray_database/n_Pt.dat b/chxtools/X-ray_database/n_Pt.dat index eb8b55d..c14e8e8 100644 --- a/chxtools/X-ray_database/n_Pt.dat +++ b/chxtools/X-ray_database/n_Pt.dat @@ -498,4 +498,4 @@ 29515.5312 4.06940399E-06 1.90456674E-07 29676.1465 4.02460273E-06 1.86660998E-07 29837.6348 3.98025441E-06 1.82940738E-07 - 30000.002 3.93639448E-06 1.792951E-07 \ No newline at end of file + 30000.002 3.93639448E-06 1.792951E-07 diff --git a/chxtools/X-ray_database/n_Si.dat b/chxtools/X-ray_database/n_Si.dat index 37335f1..f287f6f 100644 --- a/chxtools/X-ray_database/n_Si.dat +++ b/chxtools/X-ray_database/n_Si.dat @@ -497,4 +497,4 @@ 29515.5312 5.54628173E-07 1.0515453E-09 29676.1465 5.48626929E-07 1.03037501E-09 29837.6348 5.42691055E-07 1.00966036E-09 - 30000.002 5.36819357E-07 9.89395676E-10 \ No newline at end of file + 30000.002 5.36819357E-07 9.89395676E-10 diff --git a/chxtools/X-ray_database/n_Si3N4.dat b/chxtools/X-ray_database/n_Si3N4.dat index 4cf874d..8d7b8da 100644 --- a/chxtools/X-ray_database/n_Si3N4.dat +++ b/chxtools/X-ray_database/n_Si3N4.dat @@ -499,4 +499,4 @@ 29515.5312 8.19071943E-07 1.0528991E-09 29676.1465 8.10216079E-07 1.03272146E-09 29837.6348 8.01456281E-07 1.01297337E-09 - 30000.002 7.9279107E-07 9.93649496E-10 \ No newline at end of file + 30000.002 7.9279107E-07 9.93649496E-10 diff --git a/chxtools/X-ray_database/n_SiO2.dat b/chxtools/X-ray_database/n_SiO2.dat index 159f637..3997fe7 100644 --- a/chxtools/X-ray_database/n_SiO2.dat +++ b/chxtools/X-ray_database/n_SiO2.dat @@ -497,4 +497,4 @@ 29515.5312 5.24062159E-07 5.92594085E-10 29676.1465 5.18396519E-07 5.81460435E-10 29837.6348 5.12792383E-07 5.70563152E-10 - 30000.002 5.07248785E-07 5.59899072E-10 \ No newline at end of file + 30000.002 5.07248785E-07 5.59899072E-10 diff --git a/chxtools/X-ray_database/n_W.dat b/chxtools/X-ray_database/n_W.dat index 9e2950f..b0074c3 100644 --- a/chxtools/X-ray_database/n_W.dat +++ b/chxtools/X-ray_database/n_W.dat @@ -498,4 +498,4 @@ 29515.5312 3.68020096E-06 1.40336724E-07 29676.1465 3.64016569E-06 1.3754503E-07 29837.6348 3.60056629E-06 1.34808843E-07 - 30000.002 3.56139412E-06 1.32127752E-07 \ No newline at end of file + 30000.002 3.56139412E-06 1.32127752E-07 diff --git a/chxtools/X-ray_database/n_diamond.dat b/chxtools/X-ray_database/n_diamond.dat index 2c72ce4..e09abf7 100644 --- a/chxtools/X-ray_database/n_diamond.dat +++ b/chxtools/X-ray_database/n_diamond.dat @@ -499,4 +499,4 @@ 29109.7539 8.60350383E-07 2.64745809E-10 29403.5273 8.4323932E-07 2.60027E-10 29700.2656 8.2646801E-07 2.55451521E-10 - 30000. 8.1002986E-07 2.51013349E-10 \ No newline at end of file + 30000. 8.1002986E-07 2.51013349E-10 diff --git a/chxtools/X-ray_database/n_dna.dat b/chxtools/X-ray_database/n_dna.dat index c6ea395..915f17d 100644 --- a/chxtools/X-ray_database/n_dna.dat +++ b/chxtools/X-ray_database/n_dna.dat @@ -499,4 +499,4 @@ 29515.5312 4.18539742E-07 2.31885511E-10 29676.1465 4.14018047E-07 2.28375624E-10 29837.6348 4.09545294E-07 2.24935751E-10 - 30000.002 4.05120772E-07 2.21564864E-10 \ No newline at end of file + 30000.002 4.05120772E-07 2.21564864E-10 diff --git a/chxtools/X-ray_database/n_ice.dat b/chxtools/X-ray_database/n_ice.dat index 6d2f9a7..d1b7e14 100644 --- a/chxtools/X-ray_database/n_ice.dat +++ b/chxtools/X-ray_database/n_ice.dat @@ -499,4 +499,4 @@ 29515.5312 2.48719971E-07 1.04010363E-10 29676.1465 2.46033466E-07 1.02663503E-10 29837.6348 2.43376007E-07 1.01342129E-10 - 30000.002 2.40747255E-07 1.00045833E-10 \ No newline at end of file + 30000.002 2.40747255E-07 1.00045833E-10 diff --git a/chxtools/X-ray_database/n_kapton.dat b/chxtools/X-ray_database/n_kapton.dat index 943086e..00aa859 100644 --- a/chxtools/X-ray_database/n_kapton.dat +++ b/chxtools/X-ray_database/n_kapton.dat @@ -499,4 +499,4 @@ 29515.5312 3.49399869E-07 1.19084395E-10 29676.1465 3.45626404E-07 1.17802504E-10 29837.6348 3.41893781E-07 1.16542928E-10 - 30000.002 3.38201431E-07 1.15305286E-10 \ No newline at end of file + 30000.002 3.38201431E-07 1.15305286E-10 diff --git a/chxtools/X-ray_database/n_lipid.dat b/chxtools/X-ray_database/n_lipid.dat index 601a13b..3b0538c 100644 --- a/chxtools/X-ray_database/n_lipid.dat +++ b/chxtools/X-ray_database/n_lipid.dat @@ -499,4 +499,4 @@ 29515.5312 2.65408033E-07 8.54330703E-11 29676.1465 2.62541903E-07 8.45814321E-11 29837.6348 2.59706752E-07 8.3743755E-11 - 30000.002 2.56902212E-07 8.29198307E-11 \ No newline at end of file + 30000.002 2.56902212E-07 8.29198307E-11 diff --git a/chxtools/X-ray_database/n_mica.dat b/chxtools/X-ray_database/n_mica.dat index 4cfe157..e72c611 100644 --- a/chxtools/X-ray_database/n_mica.dat +++ b/chxtools/X-ray_database/n_mica.dat @@ -499,4 +499,4 @@ 29515.5312 7.5594528E-07 1.0187694E-09 29676.1465 7.47770741E-07 9.99192618E-10 29837.6348 7.39684822E-07 9.80032056E-10 - 30000.002 7.31686157E-07 9.6128161E-10 \ No newline at end of file + 30000.002 7.31686157E-07 9.6128161E-10 diff --git a/chxtools/X-ray_database/n_nucleosome.dat b/chxtools/X-ray_database/n_nucleosome.dat index 2a3d814..f3ef69d 100644 --- a/chxtools/X-ray_database/n_nucleosome.dat +++ b/chxtools/X-ray_database/n_nucleosome.dat @@ -499,4 +499,4 @@ 29515.5312 3.74230325E-07 1.83536644E-10 29676.1465 3.70187848E-07 1.80928966E-10 29837.6348 3.66189056E-07 1.78372123E-10 - 30000.002 3.62233436E-07 1.75865406E-10 \ No newline at end of file + 30000.002 3.62233436E-07 1.75865406E-10 diff --git a/chxtools/X-ray_database/n_protein.dat b/chxtools/X-ray_database/n_protein.dat index 27a4b29..bde82b2 100644 --- a/chxtools/X-ray_database/n_protein.dat +++ b/chxtools/X-ray_database/n_protein.dat @@ -499,4 +499,4 @@ 29515.5312 3.42775508E-07 1.37699144E-10 29676.1465 3.39073381E-07 1.3598804E-10 29837.6348 3.35411357E-07 1.34308564E-10 - 30000.002 3.3178884E-07 1.3266023E-10 \ No newline at end of file + 30000.002 3.3178884E-07 1.3266023E-10 diff --git a/chxtools/X-ray_database/rc_Ge111.dat b/chxtools/X-ray_database/rc_Ge111.dat index 0b5ba60..6ba4516 100644 --- a/chxtools/X-ray_database/rc_Ge111.dat +++ b/chxtools/X-ray_database/rc_Ge111.dat @@ -13,4 +13,4 @@ 20000 33.4 22000 30.5 24000 28.0 -27000 24.9 \ No newline at end of file +27000 24.9 diff --git a/chxtools/X-ray_database/rc_Ge220.dat b/chxtools/X-ray_database/rc_Ge220.dat index e9f4eba..2f0b6e0 100644 --- a/chxtools/X-ray_database/rc_Ge220.dat +++ b/chxtools/X-ray_database/rc_Ge220.dat @@ -15,4 +15,4 @@ 20000 25 22000 22.9 24000 21.1 -27000 18.8 \ No newline at end of file +27000 18.8 diff --git a/chxtools/X-ray_database/rc_Si111.dat b/chxtools/X-ray_database/rc_Si111.dat index 15326de..f9ee8fa 100644 --- a/chxtools/X-ray_database/rc_Si111.dat +++ b/chxtools/X-ray_database/rc_Si111.dat @@ -14,4 +14,4 @@ 20000 14.2 22000 12.9 24000 11.8 -27000 10.5 \ No newline at end of file +27000 10.5 diff --git a/chxtools/X-ray_database/rc_Si220.dat b/chxtools/X-ray_database/rc_Si220.dat index 3b537cd..0866f77 100644 --- a/chxtools/X-ray_database/rc_Si220.dat +++ b/chxtools/X-ray_database/rc_Si220.dat @@ -15,4 +15,4 @@ 20000 10.5 22000 9.4 24000 8.6 -27000 7.6 \ No newline at end of file +27000 7.6 diff --git a/chxtools/__init__.py b/chxtools/__init__.py index 868b115..63d7cb7 100644 --- a/chxtools/__init__.py +++ b/chxtools/__init__.py @@ -1,5 +1,6 @@ -__author__ = 'edill' +__author__ = "edill" from ._version import get_versions -__version__ = get_versions()['version'] + +__version__ = get_versions()["version"] del get_versions diff --git a/chxtools/_version.py b/chxtools/_version.py index c94424a..519f9f1 100644 --- a/chxtools/_version.py +++ b/chxtools/_version.py @@ -1,4 +1,3 @@ - # This file helps to compute a version number in source trees obtained from # git-archive tarball (such as those provided by githubs download-from-tag # feature). Distribution tarballs (built by setup.py sdist) and build @@ -57,6 +56,7 @@ def decorate(f): HANDLERS[vcs] = {} HANDLERS[vcs][method] = f return f + return decorate @@ -67,9 +67,12 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False): try: dispcmd = str([c] + args) # remember shell=False, so use git.cmd on windows, not just git - p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE, - stderr=(subprocess.PIPE if hide_stderr - else None)) + p = subprocess.Popen( + [c] + args, + cwd=cwd, + stdout=subprocess.PIPE, + stderr=(subprocess.PIPE if hide_stderr else None), + ) break except EnvironmentError: e = sys.exc_info()[1] @@ -99,12 +102,17 @@ def versions_from_parentdir(parentdir_prefix, root, verbose): dirname = os.path.basename(root) if not dirname.startswith(parentdir_prefix): if verbose: - print("guessing rootdir is '%s', but '%s' doesn't start with " - "prefix '%s'" % (root, dirname, parentdir_prefix)) + print( + "guessing rootdir is '%s', but '%s' doesn't start with " + "prefix '%s'" % (root, dirname, parentdir_prefix) + ) raise NotThisMethod("rootdir doesn't start with parentdir_prefix") - return {"version": dirname[len(parentdir_prefix):], - "full-revisionid": None, - "dirty": False, "error": None} + return { + "version": dirname[len(parentdir_prefix) :], + "full-revisionid": None, + "dirty": False, + "error": None, + } @register_vcs_handler("git", "get_keywords") @@ -144,7 +152,7 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose): # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of # just "foo-1.0". If we see a "tag: " prefix, prefer those. TAG = "tag: " - tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)]) + tags = set([r[len(TAG) :] for r in refs if r.startswith(TAG)]) if not tags: # Either we're using git < 1.8.3, or there really are no tags. We use # a heuristic: assume all version tags have a digit. The old git %d @@ -153,27 +161,32 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose): # between branches and tags. By ignoring refnames without digits, we # filter out many common branch names like "release" and # "stabilization", as well as "HEAD" and "master". - tags = set([r for r in refs if re.search(r'\d', r)]) + tags = set([r for r in refs if re.search(r"\d", r)]) if verbose: - print("discarding '%s', no digits" % ",".join(refs-tags)) + print("discarding '%s', no digits" % ",".join(refs - tags)) if verbose: print("likely tags: %s" % ",".join(sorted(tags))) for ref in sorted(tags): # sorting will prefer e.g. "2.0" over "2.0rc1" if ref.startswith(tag_prefix): - r = ref[len(tag_prefix):] + r = ref[len(tag_prefix) :] if verbose: print("picking %s" % r) - return {"version": r, - "full-revisionid": keywords["full"].strip(), - "dirty": False, "error": None - } + return { + "version": r, + "full-revisionid": keywords["full"].strip(), + "dirty": False, + "error": None, + } # no suitable tags, so version is "0+unknown", but full hex is still there if verbose: print("no suitable tags, using unknown + full revision id") - return {"version": "0+unknown", - "full-revisionid": keywords["full"].strip(), - "dirty": False, "error": "no suitable tags"} + return { + "version": "0+unknown", + "full-revisionid": keywords["full"].strip(), + "dirty": False, + "error": "no suitable tags", + } @register_vcs_handler("git", "pieces_from_vcs") @@ -193,9 +206,9 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): GITS = ["git.cmd", "git.exe"] # if there is a tag, this yields TAG-NUM-gHEX[-dirty] # if there are no tags, this yields HEX[-dirty] (no NUM) - describe_out = run_command(GITS, ["describe", "--tags", "--dirty", - "--always", "--long"], - cwd=root) + describe_out = run_command( + GITS, ["describe", "--tags", "--dirty", "--always", "--long"], cwd=root + ) # --long was added in git-1.5.5 if describe_out is None: raise NotThisMethod("'git describe' failed") @@ -218,17 +231,16 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): dirty = git_describe.endswith("-dirty") pieces["dirty"] = dirty if dirty: - git_describe = git_describe[:git_describe.rindex("-dirty")] + git_describe = git_describe[: git_describe.rindex("-dirty")] # now we have TAG-NUM-gHEX or HEX if "-" in git_describe: # TAG-NUM-gHEX - mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) + mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe) if not mo: - # unparseable. Maybe git-describe is misbehaving? - pieces["error"] = ("unable to parse git-describe output: '%s'" - % describe_out) + # unparsable. Maybe git-describe is misbehaving? + pieces["error"] = "unable to parse git-describe output: '%s'" % describe_out return pieces # tag @@ -237,10 +249,12 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): if verbose: fmt = "tag '%s' doesn't start with prefix '%s'" print(fmt % (full_tag, tag_prefix)) - pieces["error"] = ("tag '%s' doesn't start with prefix '%s'" - % (full_tag, tag_prefix)) + pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % ( + full_tag, + tag_prefix, + ) return pieces - pieces["closest-tag"] = full_tag[len(tag_prefix):] + pieces["closest-tag"] = full_tag[len(tag_prefix) :] # distance: number of commits since tag pieces["distance"] = int(mo.group(2)) @@ -251,8 +265,7 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): else: # HEX: no tags pieces["closest-tag"] = None - count_out = run_command(GITS, ["rev-list", "HEAD", "--count"], - cwd=root) + count_out = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root) pieces["distance"] = int(count_out) # total number of commits return pieces @@ -281,8 +294,7 @@ def render_pep440(pieces): rendered += ".dirty" else: # exception #1 - rendered = "0+untagged.%d.g%s" % (pieces["distance"], - pieces["short"]) + rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" return rendered @@ -389,10 +401,12 @@ def render_git_describe_long(pieces): def render(pieces, style): if pieces["error"]: - return {"version": "unknown", - "full-revisionid": pieces.get("long"), - "dirty": None, - "error": pieces["error"]} + return { + "version": "unknown", + "full-revisionid": pieces.get("long"), + "dirty": None, + "error": pieces["error"], + } if not style or style == "default": style = "pep440" # the default @@ -412,8 +426,12 @@ def render(pieces, style): else: raise ValueError("unknown style '%s'" % style) - return {"version": rendered, "full-revisionid": pieces["long"], - "dirty": pieces["dirty"], "error": None} + return { + "version": rendered, + "full-revisionid": pieces["long"], + "dirty": pieces["dirty"], + "error": None, + } def get_versions(): @@ -426,8 +444,7 @@ def get_versions(): verbose = cfg.verbose try: - return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, - verbose) + return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose) except NotThisMethod: pass @@ -436,12 +453,15 @@ def get_versions(): # versionfile_source is the relative path from the top of the source # tree (where the .git directory might live) to this file. Invert # this to find the root from __file__. - for i in cfg.versionfile_source.split('/'): + for i in cfg.versionfile_source.split("/"): root = os.path.dirname(root) except NameError: - return {"version": "0+unknown", "full-revisionid": None, - "dirty": None, - "error": "unable to find root of source tree"} + return { + "version": "0+unknown", + "full-revisionid": None, + "dirty": None, + "error": "unable to find root of source tree", + } try: pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) @@ -455,6 +475,9 @@ def get_versions(): except NotThisMethod: pass - return {"version": "0+unknown", "full-revisionid": None, - "dirty": None, - "error": "unable to compute version"} + return { + "version": "0+unknown", + "full-revisionid": None, + "dirty": None, + "error": "unable to compute version", + } diff --git a/chxtools/attfuncs.py b/chxtools/attfuncs.py index 97ed466..1b31ad3 100644 --- a/chxtools/attfuncs.py +++ b/chxtools/attfuncs.py @@ -6,6 +6,7 @@ set optimal transmission -> set_T by LW 12/08/2015 """ + import numpy as np from . import xfuncs as xf @@ -16,18 +17,17 @@ # XF:11IDB-BI{Attn:03}Cmd:Out-Cmd # XF:11IDB-BI{Attn:03}Pos-Sts # caget ('XF:11IDB-BI{Attn:03}Pos-Sts') #to get the status -# caput ('XF:11IDB-BI{Attn:03}Cmd:Out-Cmd',1) #take out +# caput ('XF:11IDB-BI{Attn:03}Cmd:Out-Cmd',1) #take out # caput ('XF:11IDB-BI{Attn:03}Cmd:In-Cmd',1) #put in - #### -#Wayne help create two pv +# Wayne help create two pv # for set value XF:11IDB-BI{Attn}Val:Trans-SP # for display value XF:11IDB-BI{Attn}Val:Trans-I -#caput ('XF:11IDB-BI{Attn:03}Cmd:Out-Cmd',1) +# caput ('XF:11IDB-BI{Attn:03}Cmd:Out-Cmd',1) def att_setup(): @@ -41,16 +41,17 @@ def att_setup(): # slot9 slot8 slot7 slot6 slot5 slot4 slot3 slot2 slot1 <------ beam direction # Si Si Si Si Si Si Si Cu Cu absorber material # 25um 50um 100um 200um 400um 800um 1600um 120um 220um absorber thickness - Cu_th=[220,120];Si_th=[1600,800,400,200,100,50,25] - att_mat=['Cu','Cu','Si','Si','Si','Si','Si','Si','Si'] - return [att_mat,Cu_th,Si_th] + Cu_th = [220, 120] + Si_th = [1600, 800, 400, 200, 100, 50, 25] + att_mat = ["Cu", "Cu", "Si", "Si", "Si", "Si", "Si", "Si", "Si"] + return [att_mat, Cu_th, Si_th] -def calc_T(T,E='auto', foil_mode='Si'): +def calc_T(T, E="auto", foil_mode="Si"): """ - calc_T(T,E='auto',foil_mode='Si'): funtion to calculate CHX attenuator settings for a commanded transmission - required arument: - T: commanded tranmission (e.g. 1e-3) + calc_T(T,E='auto',foil_mode='Si'): function to calculate CHX attenuator settings for a commanded transmission + required argument: + T: commanded transmission (e.g. 1e-3) optional arguments: E='auto': get beamline energy from PV (currently: DCM) | E=7894: manual overwrite of energy parameter foil_mode='Si'|'mix': 'Si': calculating optimal configuration using only Si wafers (for coherence applications) | 'mix': using Si wafers and metal foils @@ -61,104 +62,125 @@ def calc_T(T,E='auto', foil_mode='Si'): dependencies: imports numpy and xfuncs calls att_setup() to get physical configuration of attenuator system by LW 12/09/2015 - """ - T=np.array(T) - if E is 'auto': - #E=8000 # temporary: don't have channel access -> set E to 8000eV - E=caget('XF:11IDA-OP{Mono:DCM-Ax:Energy}Mtr.RBV') ### get energy automatically with channel access - print ('getting energy from global PV: E=',E,'eV' ) - if E> 30000 or E< 2000: - raise attfuncs_Exception("error: Input argument E has to be 2000 set E to 8000eV + E = caget( + "XF:11IDA-OP{Mono:DCM-Ax:Energy}Mtr.RBV" + ) ### get energy automatically with channel access + print("getting energy from global PV: E=", E, "eV") + if E > 30000 or E < 2000: + raise attfuncs_Exception( + "error: Input argument E has to be 2000=2000: - E=np.array(E) - print ('manual input: E= ',E,'eV') + if E <= 30000 and E >= 2000: + E = np.array(E) + print("manual input: E= ", E, "eV") else: - raise attfuncs_Exception("error: could not convert energy argument. Input argument E has to be 2000 only Si wafers will be used for attenuation') - elif foil_mode is 'mix': - print ('selected foil mode is "mix" -> both Si wafers and metal foils will be used for attenuation') + raise attfuncs_Exception( + "error: could not convert energy argument. Input argument E has to be 2000 only Si wafers will be used for attenuation' + ) + elif foil_mode == "mix": + print( + 'selected foil mode is "mix" -> both Si wafers and metal foils will be used for attenuation' + ) else: - raise attfuncs_Exception("error: foil_mode has to be either 'Si' or 'mix'") - + raise attfuncs_Exception("error: foil_mode has to be either 'Si' or 'mix'") + # attenuator setup - att_conf=att_setup() - Si_th=np.array(att_conf[2]) - Cu_th=np.array(att_conf[1]) - abs_mat=att_conf[0] - - if foil_mode is 'Si': - Cu_th=Cu_th*0 - + att_conf = att_setup() + Si_th = np.array(att_conf[2]) + Cu_th = np.array(att_conf[1]) + abs_mat = att_conf[0] + + if foil_mode == "Si": + Cu_th = Cu_th * 0 + # calculate all available absorption possibilities - abs_th=np.append(Cu_th,Si_th) - f=str('{:0'+str(len(abs_th))+'b}') - a=np.zeros(len(abs_th)) - T_tot=np.zeros(2**len(abs_th)) - sT=np.zeros(len(abs_th)) + abs_th = np.append(Cu_th, Si_th) + f = str("{:0" + str(len(abs_th)) + "b}") + a = np.zeros(len(abs_th)) + T_tot = np.zeros(2 ** len(abs_th)) + sT = np.zeros(len(abs_th)) for m in range(0, len(abs_th)): - sT[m]=xf.get_T(abs_mat[m],E/1000,abs_th[m]) - for l in range(0, 2**len(abs_th)): - k=f.format(l) - a=np.zeros(len(abs_th)) + sT[m] = xf.get_T(abs_mat[m], E / 1000, abs_th[m]) + for l in range(0, 2 ** len(abs_th)): + k = f.format(l) + a = np.zeros(len(abs_th)) for h in range(0, len(abs_th)): - a[h]=int(k[h]) - x=sT*a;x[x==0]=1 - T_tot[l]=np.product(x) - - # determine best attenuator configuration - diff=np.abs(T_tot-T); - best_T=T_tot[np.argmin(diff)] - k=f.format(np.argmin(diff)) + a[h] = int(k[h]) + x = sT * a + x[x == 0] = 1 + T_tot[l] = np.product(x) + + # determine best attenuator configuration + diff = np.abs(T_tot - T) + best_T = T_tot[np.argmin(diff)] + k = f.format(np.argmin(diff)) for m in range(0, len(abs_th)): - a[m]=int(k[m]) - Si_ind=a[len(Cu_th):len(a)] - Cu_ind=a[0:len(Cu_th)] - - #print ('*'*40) - #print (T, best_T) - #print ('*'*40) - - - caput ('XF:11IDB-BI{Attn}Val:Trans-SP', T) #set point - caput ('XF:11IDB-BI{Attn}Val:Trans-I', best_T) #the best available - + a[m] = int(k[m]) + Si_ind = a[len(Cu_th) : len(a)] + Cu_ind = a[0 : len(Cu_th)] + + # print ('*'*40) + # print (T, best_T) + # print ('*'*40) + + caput("XF:11IDB-BI{Attn}Val:Trans-SP", T) # set point + caput("XF:11IDB-BI{Attn}Val:Trans-I", best_T) # the best available + # some output and return value - print ('requested transmission: ','{:.2e}'.format(float(T)),' closest match: ','{:.2e}'.format(best_T),' difference: ','{:.2e}'.format(best_T-T)) - print ('Si wafer configuration: ',Si_th*Si_ind,' Cu foil configuration: ',Cu_th*Cu_ind) - - return {'tot_index':a,'Si_index':Si_ind,'Cu_index':Cu_ind} + print( + "requested transmission: ", + "{:.2e}".format(float(T)), + " closest match: ", + "{:.2e}".format(best_T), + " difference: ", + "{:.2e}".format(best_T - T), + ) + print( + "Si wafer configuration: ", + Si_th * Si_ind, + " Cu foil configuration: ", + Cu_th * Cu_ind, + ) + + return {"tot_index": a, "Si_index": Si_ind, "Cu_index": Cu_ind} + -def set_T(T,E='auto',foil_mode='Si'): +def set_T(T, E="auto", foil_mode="Si"): """ call: set_T(T,E='auto',foil_mode='Si') function calls calc_T(T,E='auto',foil_mode='Si') - required arument: - T: commanded tranmission (e.g. 1e-3) + required argument: + T: commanded transmission (e.g. 1e-3) optional arguments: E='auto': get beamline energy from PV (currently: DCM) | E=7894: manual overwrite of energy parameter foil_mode='Si'|'mix': 'Si': calculating optimal configuration using only Si wafers (for coherence applications) | 'mix': using Si wafers and metal foils function sets attenuator configuration via channel access according to calculation by calc_T() - function reads back configuration and cross-checks that attenuator configuration was successfully set + function reads back configuration and cross-checks that attenuator configuration was successfully set by LW 12/10/2015 """ - setting=calc_T(T,E,foil_mode) - + setting = calc_T(T, E, foil_mode) + # setting the attenuators according to calculation - for m in range(0,len(setting['tot_index'])): - if int(setting['tot_index'][m]): - caput ('XF:11IDB-BI{Attn:%02d}Cmd:In-Cmd'%(m+1),1) #put in - print ('the %s--attenuator is put in.'%(m+1)) + for m in range(0, len(setting["tot_index"])): + if int(setting["tot_index"][m]): + caput("XF:11IDB-BI{Attn:%02d}Cmd:In-Cmd" % (m + 1), 1) # put in + print("the %s--attenuator is put in." % (m + 1)) else: - caput ('XF:11IDB-BI{Attn:%02d}Cmd:Out-Cmd'%(m+1),1) #take out - print ('the %s--attenuator is taken out.'%(m+1)) - + caput("XF:11IDB-BI{Attn:%02d}Cmd:Out-Cmd" % (m + 1), 1) # take out + print("the %s--attenuator is taken out." % (m + 1)) - -#just printing, adjust PV name, remove 'print' and remove quotes for activating channel access command +# just printing, adjust PV name, remove 'print' and remove quotes for activating channel access command # # cross check that attenuators were set correctly #### Uncomment when PVs are available # check_ind=np.zeros(len(setting['tot_index'])) # for l in range(0,len(setting['tot_index'])): @@ -166,9 +188,9 @@ def set_T(T,E='auto',foil_mode='Si'): ## if np.array_equal(np.array(setting['tot_index']),np.array(check_ind))==True: # print 'check: attenuators successfully set!' # else: raise attfuncs_Exception("ERROR: something went wrong when setting the attenuators") -## NEED to set foil_mode, E_mode and E in CSS screen -> add when screen and PVs are available -# -def get_T(E='auto'): +## NEED to set foil_mode, E_mode and E in CSS screen -> add when screen and PVs are available +# +def get_T(E="auto"): """ get transmission from current CHX attenuator settings call: get_T(E='auto') @@ -180,46 +202,59 @@ def get_T(E='auto'): by LW 12/10/2015 """ # attenuator setup - att_conf=att_setup() - Si_th=np.array(att_conf[2]) - Cu_th=np.array(att_conf[1]) - abs_mat=att_conf[0] - if E is 'auto': - #E=8000 # temporary: don't have channel access -> set E to 8000eV - E=caget('XF:11IDA-OP{Mono:DCM-Ax:Energy}Mtr.RBV') ### get energy automatically with channel access - print ('getting energy from global PV: E=',E,'eV (currently not implemented in test version (no channel access) -> 8000eV default)') # future: add PV name for house keeping - if E> 30000 or E< 2000: - raise attfuncs_Exception("error: Input argument E has to be 2000 set E to 8000eV + E = caget( + "XF:11IDA-OP{Mono:DCM-Ax:Energy}Mtr.RBV" + ) ### get energy automatically with channel access + print( + "getting energy from global PV: E=", + E, + "eV (currently not implemented in test version (no channel access) -> 8000eV default)", + ) # future: add PV name for house keeping + if E > 30000 or E < 2000: + raise attfuncs_Exception( + "error: Input argument E has to be 2000=2000: - E=np.array(E) - print ('manual input: E= ',E,'eV') + if E <= 30000 and E >= 2000: + E = np.array(E) + print("manual input: E= ", E, "eV") else: - raise attfuncs_Exception("error: could not convert energy argument. Input argument E has to be 2000 30000 or E< 2000: - raise attfuncs_Exception("error: Input argument E has to be 2000 30000 or E < 2000: + raise attfuncs_Exception( + "error: Input argument E has to be 2000=2000: - E=np.array(E) + if E <= 30000 and E >= 2000: + E = np.array(E) else: - raise attfuncs_Exception("error: could not convert energy argument. Input argument E has to be 2000 0: - ava_T=xf.get_T('Si',E/1E3,Si_th) - ratio_T=ava_T/T + ava_T = xf.get_T("Si", E / 1e3, Si_th) + ratio_T = ava_T / T for i in range(len(ratio_T)): - if ratio_T[i] <1: - ratio_T[i]=1/ratio_T[i] - ind=np.argmin(abs(ratio_T-1)) - bestT=ava_T[ind] - th_Si=Si_th[ind] - pos_att=att_pos[ind] - if T==1: - th_Si=0;pos_att=0;bestT=1 + if ratio_T[i] < 1: + ratio_T[i] = 1 / ratio_T[i] + ind = np.argmin(abs(ratio_T - 1)) + bestT = ava_T[ind] + th_Si = Si_th[ind] + pos_att = att_pos[ind] + if T == 1: + th_Si = 0 + pos_att = 0 + bestT = 1 elif T == 0: - th_Si=max(Si_th);pos_att=max(np.array(att_pos));bestT=xf.get_T('Si',E/1E3,th_Si) - print('calculations for Si absorbers in monitor chamber:') - print('requested transmission: '+str(T)+' at '+str(E/1E3)+'keV' ) - print('best match: '+str(bestT)+' using '+str(th_Si)+'um of Si -> T_request/T_available= '+str(T/bestT)) - return [pos_att,bestT] + th_Si = max(Si_th) + pos_att = max(np.array(att_pos)) + bestT = xf.get_T("Si", E / 1e3, th_Si) + print("calculations for Si absorbers in monitor chamber:") + print("requested transmission: " + str(T) + " at " + str(E / 1e3) + "keV") + print( + "best match: " + + str(bestT) + + " using " + + str(th_Si) + + "um of Si -> T_request/T_available= " + + str(T / bestT) + ) + return [pos_att, bestT] elif T == 0: - ava_T=xf.get_T('W',E/1E3,W_th) - pos_att=-3.3 # assume edge at -21.8 - print('calculations for absorbers in monitor chamber:') - print('requested transmission: '+str(T)+' at '+str(E/1E3)+'keV' ) - print('best match: '+str(ava_T)+' using '+str(W_th)+'um of W -> Tungsten edge') - return [pos_att,ava_T] - + ava_T = xf.get_T("W", E / 1e3, W_th) + pos_att = -3.3 # assume edge at -21.8 + print("calculations for absorbers in monitor chamber:") + print("requested transmission: " + str(T) + " at " + str(E / 1e3) + "keV") + print( + "best match: " + + str(ava_T) + + " using " + + str(W_th) + + "um of W -> Tungsten edge" + ) + return [pos_att, ava_T] + -def get_T(E='auto'): +def get_T(E="auto"): """ call: get_T(E='auto') function calls att_setup() required arguments: none optional argument: E='auto': get beamline energy from PV (currently: DCM) | E=7894: manual overwrite of energy parameter function reads back configuration of Si absorbers in monitor chamber and returns transmission value - pre-requisit: center position of 'empty' slot is correct + pre-requisite: center position of 'empty' slot is correct by LW 09/25/2017 """ - if E is 'auto': - E=caget('XF:11IDA-OP{Mono:DCM-Ax:Energy}Mtr.RBV') ### get energy automatically with channel access - print('getting energy from global PV: E='+str(E)+'eV') - if E> 30000 or E< 2000: - raise attfuncs_Exception("error: Input argument E has to be 2000 30000 or E < 2000: + raise attfuncs_Exception( + "error: Input argument E has to be 2000=2000: - E=np.array(E) + if E <= 30000 and E >= 2000: + E = np.array(E) else: - raise attfuncs_Exception("error: could not convert energy argument. Input argument E has to be 2000=empty_pos+min(att_pos)-tol and current_pos <= empty_pos+max(att_pos)+tol: - ind=np.argmin(abs(empty_pos+att_pos-current_pos)) - if abs(empty_pos-current_pos+att_pos[ind]) <= tol: - currentT=xf.get_T('Si',E/1E3,Si_th[ind]) - print('current transmission at '+str(E/1E3)+'keV: '+str(currentT)+' ('+str(Si_th[ind])+'um Si)') + att_conf = att_setup() + W_th = np.array(att_conf[2]) + Si_th = np.array(att_conf[1]) + att_pos = np.array(att_conf[0]) + if ( + current_pos >= empty_pos + min(att_pos) - tol + and current_pos <= empty_pos + max(att_pos) + tol + ): + ind = np.argmin(abs(empty_pos + att_pos - current_pos)) + if abs(empty_pos - current_pos + att_pos[ind]) <= tol: + currentT = xf.get_T("Si", E / 1e3, Si_th[ind]) + print( + "current transmission at " + + str(E / 1e3) + + "keV: " + + str(currentT) + + " (" + + str(Si_th[ind]) + + "um Si)" + ) else: - print('Neither empty slot nor any Si absorber is aligned with the beam...transmission unknown') - currentT=float('nan') - elif abs(current_pos-empty_pos) <= tol2: - currentT=1 - print('empty slot is in the beam, transmission T=1') - elif abs(current_pos-W_pos) <= tol: - currentT=xf.get_T('W',E/1E3,W_th) - print('Tungsten edge is in the beam, transmission T= '+str(currentT)) + print( + "Neither empty slot nor any Si absorber is aligned with the beam...transmission unknown" + ) + currentT = float("nan") + elif abs(current_pos - empty_pos) <= tol2: + currentT = 1 + print("empty slot is in the beam, transmission T=1") + elif abs(current_pos - W_pos) <= tol: + currentT = xf.get_T("W", E / 1e3, W_th) + print("Tungsten edge is in the beam, transmission T= " + str(currentT)) else: - print('Neither empty slot nor any Si absorber is aligned with the beam...transmission unknown') - currentT=float('nan') + print( + "Neither empty slot nor any Si absorber is aligned with the beam...transmission unknown" + ) + currentT = float("nan") return currentT + def set_T(T): """ call: set_T(T) function calls calc_T(T) - required arument: - T: commanded tranmission (e.g. 1e-3) + required argument: + T: commanded transmission (e.g. 1e-3) optional arguments: E='auto': get beamline energy from PV (currently: DCM) | E=7894: manual overwrite of energy parameter function moves Si absorbers in monitor chamber according to calculation by calc_T() function reads back configuration and cross-checks that absorber positions where successfully reached by LW 09/25/2017 """ - E=caget('XF:11IDA-OP{Mono:DCM-Ax:Energy}Mtr.RBV') ### get energy automatically with channel access - tol=0.5 - empty_pos=-18.5 # assume that center of open slot is always set to -18.5 - [pos_att,bestT]=calc_T(T,E) - target_pos=empty_pos+pos_att - print('moving foil_x to target_pos') - caput('XF:11IDB-OP{Mon:Foil-Ax:X}Mtr.VAL',target_pos,wait=True) - if abs(caget('XF:11IDB-OP{Mon:Foil-Ax:X}Mtr.RBV')-target_pos) <= tol: - print('absorbers set successfully!') - else: raise attfuncs_Exception("something went wrong...position for requested absorber not reached...") + E = caget( + "XF:11IDA-OP{Mono:DCM-Ax:Energy}Mtr.RBV" + ) ### get energy automatically with channel access + tol = 0.5 + empty_pos = -18.5 # assume that center of open slot is always set to -18.5 + [pos_att, bestT] = calc_T(T, E) + target_pos = empty_pos + pos_att + print("moving foil_x to target_pos") + caput("XF:11IDB-OP{Mon:Foil-Ax:X}Mtr.VAL", target_pos, wait=True) + if abs(caget("XF:11IDB-OP{Mon:Foil-Ax:X}Mtr.RBV") - target_pos) <= tol: + print("absorbers set successfully!") + else: + raise attfuncs_Exception( + "something went wrong...position for requested absorber not reached..." + ) + def set_abs(abs_num=3): """ @@ -167,32 +220,47 @@ def set_abs(abs_num=3): function returns current transmission value by LW 09/25/2017 """ - tol=0.5 - empty_pos=-18.5 + tol = 0.5 + empty_pos = -18.5 # attenuator setup - att_conf=att_setup() - Si_th=np.array(att_conf[1]) - att_pos=np.array(att_conf[0]) - if abs_num >=0 and abs_num <=9 and isinstance(abs_num,int): - print('moving absorber #'+str(abs_num)+' in monitor chamber into the beam') + att_conf = att_setup() + Si_th = np.array(att_conf[1]) + att_pos = np.array(att_conf[0]) + if abs_num >= 0 and abs_num <= 9 and isinstance(abs_num, int): + print("moving absorber #" + str(abs_num) + " in monitor chamber into the beam") if abs_num == 0: - target_pos=empty_pos - E=caget('XF:11IDA-OP{Mono:DCM-Ax:Energy}Mtr.RBV') - curr_T=1 - curr_Sith=0 + target_pos = empty_pos + E = caget("XF:11IDA-OP{Mono:DCM-Ax:Energy}Mtr.RBV") + curr_T = 1 + curr_Sith = 0 else: - if abs_num >0: - target_pos=empty_pos+att_pos[abs_num-1] - E=caget('XF:11IDA-OP{Mono:DCM-Ax:Energy}Mtr.RBV') ### get energy automatically with channel access - curr_T=xf.get_T('Si',E/1E3,Si_th[abs_num-1]) - curr_Sith=Si_th[abs_num-1] - caput('XF:11IDB-OP{Mon:Foil-Ax:X}Mtr.VAL',target_pos,wait=True) - if abs(caget('XF:11IDB-OP{Mon:Foil-Ax:X}Mtr.VAL')-target_pos) <= tol: - print('absorbers in monitor chamber set successfully!') - print('transmission is '+str(curr_T)+' with '+str(curr_Sith)+'um Si at '+str(E/1E3)+'keV') - else: raise attfuncs_Exception("something went wrong...position for requested absorber not reached...") - else: raise attfuncs_Exception("error: absorber number needs to be an integer between 0 [empty slot] and 9 [9x200um Si]") - + if abs_num > 0: + target_pos = empty_pos + att_pos[abs_num - 1] + E = caget( + "XF:11IDA-OP{Mono:DCM-Ax:Energy}Mtr.RBV" + ) ### get energy automatically with channel access + curr_T = xf.get_T("Si", E / 1e3, Si_th[abs_num - 1]) + curr_Sith = Si_th[abs_num - 1] + caput("XF:11IDB-OP{Mon:Foil-Ax:X}Mtr.VAL", target_pos, wait=True) + if abs(caget("XF:11IDB-OP{Mon:Foil-Ax:X}Mtr.VAL") - target_pos) <= tol: + print("absorbers in monitor chamber set successfully!") + print( + "transmission is " + + str(curr_T) + + " with " + + str(curr_Sith) + + "um Si at " + + str(E / 1e3) + + "keV" + ) + else: + raise attfuncs_Exception( + "something went wrong...position for requested absorber not reached..." + ) + else: + raise attfuncs_Exception( + "error: absorber number needs to be an integer between 0 [empty slot] and 9 [9x200um Si]" + ) class attfuncs_Exception(Exception): diff --git a/chxtools/bpm_stability.py b/chxtools/bpm_stability.py index 6fbca0a..c7a2d46 100644 --- a/chxtools/bpm_stability.py +++ b/chxtools/bpm_stability.py @@ -1,308 +1,318 @@ from epics import caput, caget import numpy as np -import matplotlib as mpl import matplotlib.pyplot as plt -from matplotlib.colors import LogNorm from datetime import datetime import time as ti -pv_ca = 'XF:11IDB-BI{XBPM:02}FA-A' -pv_cb = 'XF:11IDB-BI{XBPM:02}FA-B' -pv_cc = 'XF:11IDB-BI{XBPM:02}FA-C' -pv_cd = 'XF:11IDB-BI{XBPM:02}FA-D' -pv_px = 'XF:11IDB-BI{XBPM:02}FA-X' -pv_py = 'XF:11IDB-BI{XBPM:02}FA-Y' -pv_sumi='XF:11IDB-BI{XBPM:02}FA-S' +pv_ca = "XF:11IDB-BI{XBPM:02}FA-A" +pv_cb = "XF:11IDB-BI{XBPM:02}FA-B" +pv_cc = "XF:11IDB-BI{XBPM:02}FA-C" +pv_cd = "XF:11IDB-BI{XBPM:02}FA-D" +pv_px = "XF:11IDB-BI{XBPM:02}FA-X" +pv_py = "XF:11IDB-BI{XBPM:02}FA-Y" +pv_sumi = "XF:11IDB-BI{XBPM:02}FA-S" meta_pvdict = dict( - -pv_Px = 'XF:11IDB-BI{XBPM:02}Fdbk:AKp-SP', -pv_Ix = 'XF:11IDB-BI{XBPM:02}Fdbk:AKi-SP', -pv_Dx = 'XF:11IDB-BI{XBPM:02}Fdbk:AKd-SP', -pv_fdx= 'XF:11IDB-BI{XBPM:02}Fdbk:AEn-SP', -pv_sklx= 'XF:11IDB-BI{XBPM:02}Fdbk:ACtrlScaleFactor-SP', -pv_Kx = 'XF:11IDB-BI{XBPM:02}Pos:Kx-SP', - -pv_Py = 'XF:11IDB-BI{XBPM:02}Fdbk:BKp-SP', -pv_Iy = 'XF:11IDB-BI{XBPM:02}Fdbk:BKi-SP', -pv_Dy = 'XF:11IDB-BI{XBPM:02}Fdbk:BKd-SP', -pv_fdy= 'XF:11IDB-BI{XBPM:02}Fdbk:BEn-SP', -pv_skly= 'XF:11IDB-BI{XBPM:02}Fdbk:BCtrlScaleFactor-SP', -pv_Ky = 'XF:11IDB-BI{XBPM:02}Pos:Ky-SP', - -pv_fdHz = 'XF:11IDB-BI{XBPM:02}Fdbk:delT-I', - ) + pv_Px="XF:11IDB-BI{XBPM:02}Fdbk:AKp-SP", + pv_Ix="XF:11IDB-BI{XBPM:02}Fdbk:AKi-SP", + pv_Dx="XF:11IDB-BI{XBPM:02}Fdbk:AKd-SP", + pv_fdx="XF:11IDB-BI{XBPM:02}Fdbk:AEn-SP", + pv_sklx="XF:11IDB-BI{XBPM:02}Fdbk:ACtrlScaleFactor-SP", + pv_Kx="XF:11IDB-BI{XBPM:02}Pos:Kx-SP", + pv_Py="XF:11IDB-BI{XBPM:02}Fdbk:BKp-SP", + pv_It="XF:11IDB-BI{XBPM:02}Fdbk:BKi-SP", + pv_Dy="XF:11IDB-BI{XBPM:02}Fdbk:BKd-SP", + pv_fdy="XF:11IDB-BI{XBPM:02}Fdbk:BEn-SP", + pv_skly="XF:11IDB-BI{XBPM:02}Fdbk:BCtrlScaleFactor-SP", + pv_Ky="XF:11IDB-BI{XBPM:02}Pos:Ky-SP", + pv_fdHz="XF:11IDB-BI{XBPM:02}Fdbk:delT-I", +) #'pv_Px', 'pv_Ix', 'pv_Dx', 'pv_fdx', 'pv_sklx', 'pv_Kx', -#'pv_Py', 'pv_Iy', 'pv_Dy', 'pv_fdy', 'pv_skly', 'pv_Ky', +#'pv_Py', 'pv_It', 'pv_Dy', 'pv_fdy', 'pv_skly', 'pv_Ky', #'pv_fdHz' +pv_num_sam = "XF:11IDB-BI{XBPM:02}Trig:NumSamples-SP" +pv_trig = "XF:11IDB-BI{XBPM:02}FaSoftTrig-SP" +data_path = "/XF11ID/analysis/Commissioning/BPM_Stability/Data/" +res_path = "/XF11ID/analysis/Commissioning/BPM_Stability/Result/" -pv_num_sam = 'XF:11IDB-BI{XBPM:02}Trig:NumSamples-SP' -pv_trig = 'XF:11IDB-BI{XBPM:02}FaSoftTrig-SP' - -data_path = '/XF11ID/analysis/Commissioning/BPM_Stability/Data/' -res_path = '/XF11ID/analysis/Commissioning/BPM_Stability/Result/' +# filename -#filename - - -def get_fft( t,y ): +def get_fft(t, y): import numpy as np - import scipy.fftpack + L = len(t) - tp = np.array(t, dtype = float) - yp=np.array( y, dtype = float) - ts = (t[L-1] - t[0])/float(L) - tm= 2 * L * ts - xs = 1/ tm - ps = np.abs( np.fft.fft( yp ) ) - time_step = ts + tp = np.array(t, dtype=float) + yp = np.array(y, dtype=float) + ts = (t[L - 1] - t[0]) / float(L) + tm = 2 * L * ts + xs = 1 / tm + ps = np.abs(np.fft.fft(yp)) + time_step = ts freqs = np.fft.fftfreq(yp.size, time_step) idx = np.argsort(freqs) - F,Y = freqs[idx][int(L/2+1):], ps[idx][int(L/2+1):] + F, Y = freqs[idx][int(L / 2 + 1) :], ps[idx][int(L / 2 + 1) :] ## Find the peak in the coefficients - idxm = np.argmax( Y ) + idxm = np.argmax(Y) freq = F[idxm] freq_in_hertz = freq - print ('The maximum frequency is: %s'%(freq_in_hertz))## + print("The maximum frequency is: %s" % (freq_in_hertz)) ## - return F,Y + return F, Y -def plot_current(t, ca,cb,cc,cd,sumi, res_path, filename, metadata ): - fig = plt.figure(figsize=(8,12)) - plt.axis('off') - plt.title(filename + '_' + str( round(1/metadata['pv_fdHz'],1) ) + ' Hz') - #plt.title(filename) - colms = ['A', 'B','C','D', 'Sum'] - for n,i in enumerate( [ ca,cb,cc,cd,sumi ] ): +def plot_current(t, ca, cb, cc, cd, sumi, res_path, filename, metadata): + fig = plt.figure(figsize=(8, 12)) + plt.axis("off") + plt.title(filename + "_" + str(round(1 / metadata["pv_fdHz"], 1)) + " Hz") + # plt.title(filename) + colms = ["A", "B", "C", "D", "Sum"] + for n, i in enumerate([ca, cb, cc, cd, sumi]): y = i - ax = fig.add_subplot( 5,1,n+1 ) - ax.plot(t, y, '--o', label="Col-- "+ str( colms[n] )) + ax = fig.add_subplot(5, 1, n + 1) + ax.plot(t, y, "--o", label="Col-- " + str(colms[n])) ax.set_xlabel("Time, (s)") - ax.set_ylabel(str( colms[n] )) - #ax.set_ylim(0, 6E2) - #ax.set_xlim(0, 1) - ax.legend( loc='best', fontsize = 16) - #try filename = filename.rstrip('.txt') - plt.savefig( res_path + filename + '-current.png') + ax.set_ylabel(str(colms[n])) + # ax.set_ylim(0, 6E2) + # ax.set_xlim(0, 1) + ax.legend(loc="best", fontsize=16) + # try filename = filename.rstrip('.txt') + plt.savefig(res_path + filename + "-current.png") plt.show() +def plot_posxy_fft( + t, posx, posy, res_path, filename, metadata, frequency_range=[0, 500] +): -def plot_posxy_fft(t,posx,posy,res_path, filename, metadata, frequency_range=[0,500] ): - - fig = plt.figure(figsize=(12,10)) - plt.axis('off') - plt.title(filename + '_' + str( round(1/metadata['pv_fdHz'],1) ) + ' Hz') - ax = fig.add_subplot( 221 ) - #ax.set_title(filename ) + fig = plt.figure(figsize=(12, 10)) + plt.axis("off") + plt.title(filename + "_" + str(round(1 / metadata["pv_fdHz"], 1)) + " Hz") + ax = fig.add_subplot(221) + # ax.set_title(filename ) y = posx ym = y.mean() - std = (y-ym).std() - ax.plot( t, y - ym, '--o', label="X-" ) - #ax.plot(t, std+ np.zeros_like(t), '--o', label="%s +/-"%round(ym,2) + str( round(std,3) ) + ' um' ) - ax.plot(t, std+ np.zeros_like(t), '--o', label= 'rms-'+ str( round(std,3) ) + ' um' ) - ax.plot(t, -std+ np.zeros_like(t), '--o')#, label="rms-- "+ str( colms[i] )) + std = (y - ym).std() + ax.plot(t, y - ym, "--o", label="X-") + # ax.plot(t, std+ np.zeros_like(t), '--o', label="%s +/-"%round(ym,2) + str( round(std,3) ) + ' um' ) + ax.plot(t, std + np.zeros_like(t), "--o", label="rms-" + str(round(std, 3)) + " um") + ax.plot(t, -std + np.zeros_like(t), "--o") # , label="rms-- "+ str( colms[i] )) ax.set_xlabel("Time, (s)") ax.set_ylabel("X") - ax.legend( loc='best', fontsize = 16) + ax.legend(loc="best", fontsize=16) - ax = fig.add_subplot( 223 ) + ax = fig.add_subplot(223) y = posy - ym=y.mean() - - std = (y-ym).std() - ax.plot(t, y - ym, '--o', label="Y-") - # ax.plot(t, std + np.zeros_like(t), '--o', label="%s +/-"%round(ym,2) + str( round(std,3)) + ' um' ) - ax.plot(t, std + np.zeros_like(t), '--o', label="rms-" + str( round(std,3)) + ' um' ) - ax.plot(t, -std+ np.zeros_like(t), '--o')#, label="rms-- "+ str( colms[i] )) + ym = y.mean() + std = (y - ym).std() + ax.plot(t, y - ym, "--o", label="Y-") + # ax.plot(t, std + np.zeros_like(t), '--o', label="%s +/-"%round(ym,2) + str( round(std,3)) + ' um' ) + ax.plot(t, std + np.zeros_like(t), "--o", label="rms-" + str(round(std, 3)) + " um") + ax.plot(t, -std + np.zeros_like(t), "--o") # , label="rms-- "+ str( colms[i] )) ax.set_xlabel("Time, (s)") ax.set_ylabel("Y") - ax.legend( loc='best', fontsize = 16) - #try filename = filename.rstrip('.txt') + ax.legend(loc="best", fontsize=16) + # try filename = filename.rstrip('.txt') - ax = fig.add_subplot( 222 ) - #ax.set_title(filename ) + ax = fig.add_subplot(222) + # ax.set_title(filename ) yt = posx - freq,fft = get_fft( t,yt ) - ax.plot(freq,fft, '--o', label="FFT-posX" ) + freq, fft = get_fft(t, yt) + ax.plot(freq, fft, "--o", label="FFT-posX") ax.set_xlabel("freq, (Hz)") ax.set_ylabel("fft_x") - ax.set_xlim( frequency_range) - ax.legend( loc='best', fontsize = 16) - - pp = 'x' - txt1 = 'P I D: %s %s %s, Scaling: %s'%( metadata['pv_P%s'%pp], metadata['pv_I%s'%pp], metadata['pv_D%s'%pp],metadata['pv_skl%s'%pp] ) - ax.text(x = .4, y= 0.7, s= txt1, fontsize=14, transform=ax.transAxes) - txt2 = 'K%s: %s, Feedback: %s'%(pp, metadata['pv_K%s'%pp], metadata['pv_fd%s'%pp] ) - ax.text(x = .4, y= 0.6, s= txt2, fontsize=14, transform=ax.transAxes) + ax.set_xlim(frequency_range) + ax.legend(loc="best", fontsize=16) + + pp = "x" + txt1 = "P I D: %s %s %s, Scaling: %s" % ( + metadata["pv_P%s" % pp], + metadata["pv_I%s" % pp], + metadata["pv_D%s" % pp], + metadata["pv_skl%s" % pp], + ) + ax.text(x=0.4, y=0.7, s=txt1, fontsize=14, transform=ax.transAxes) + txt2 = "K%s: %s, Feedback: %s" % ( + pp, + metadata["pv_K%s" % pp], + metadata["pv_fd%s" % pp], + ) + ax.text(x=0.4, y=0.6, s=txt2, fontsize=14, transform=ax.transAxes) - ax = fig.add_subplot( 224 ) + ax = fig.add_subplot(224) yt = posy - freq,fft = get_fft( t,yt ) - ax.plot(freq,fft, '--o', label="FFT-PosY") + freq, fft = get_fft(t, yt) + ax.plot(freq, fft, "--o", label="FFT-PosY") ax.set_xlabel("freq, (Hz)") ax.set_ylabel("fft_y") - ax.set_xlim( frequency_range) - ax.legend( loc='best', fontsize = 16) - - pp = 'y' - txt1 = 'P I D: %s %s %s, Scaling: %s'%( metadata['pv_P%s'%pp], metadata['pv_I%s'%pp], metadata['pv_D%s'%pp],metadata['pv_skl%s'%pp] ) - ax.text(x = .4, y= 0.7, s= txt1, fontsize=14, transform=ax.transAxes) - txt2 = 'K%s: %s, Feedback: %s'%(pp, metadata['pv_K%s'%pp], metadata['pv_fd%s'%pp] ) - ax.text(x = .4, y= 0.6, s= txt2, fontsize=14, transform=ax.transAxes) + ax.set_xlim(frequency_range) + ax.legend(loc="best", fontsize=16) + + pp = "y" + txt1 = "P I D: %s %s %s, Scaling: %s" % ( + metadata["pv_P%s" % pp], + metadata["pv_I%s" % pp], + metadata["pv_D%s" % pp], + metadata["pv_skl%s" % pp], + ) + ax.text(x=0.4, y=0.7, s=txt1, fontsize=14, transform=ax.transAxes) + txt2 = "K%s: %s, Feedback: %s" % ( + pp, + metadata["pv_K%s" % pp], + metadata["pv_fd%s" % pp], + ) + ax.text(x=0.4, y=0.6, s=txt2, fontsize=14, transform=ax.transAxes) fig.tight_layout() - plt.savefig( res_path + filename + '-fft-time_posX-Y-rms.png') + plt.savefig(res_path + filename + "-fft-time_posX-Y-rms.png") plt.show() -def plot_posxy_rms(t,posx,posy,res_path, filename ): - fig = plt.figure(figsize=(8,6)) - plt.axis('off') - ax = fig.add_subplot( 211 ) - ax.set_title(filename ) +def plot_posxy_rms(t, posx, posy, res_path, filename): + fig = plt.figure(figsize=(8, 6)) + plt.axis("off") + ax = fig.add_subplot(211) + ax.set_title(filename) y = posx ym = y.mean() - std = (y-ym).std() - ax.plot( t, y - ym, '--o', label="X-" ) - #ax.plot(t, std+ np.zeros_like(t), '--o', label="%s +/-"%round(ym,2) + str( round(std,3) ) + ' um' ) - ax.plot(t, std+ np.zeros_like(t), '--o', label= 'rms-'+ str( round(std,3) ) + ' um' ) - ax.plot(t, -std+ np.zeros_like(t), '--o')#, label="rms-- "+ str( colms[i] )) + std = (y - ym).std() + ax.plot(t, y - ym, "--o", label="X-") + # ax.plot(t, std+ np.zeros_like(t), '--o', label="%s +/-"%round(ym,2) + str( round(std,3) ) + ' um' ) + ax.plot(t, std + np.zeros_like(t), "--o", label="rms-" + str(round(std, 3)) + " um") + ax.plot(t, -std + np.zeros_like(t), "--o") # , label="rms-- "+ str( colms[i] )) ax.set_xlabel("Time, (s)") ax.set_ylabel("X") - ax.legend( loc='best', fontsize = 16) + ax.legend(loc="best", fontsize=16) - ax = fig.add_subplot( 212 ) + ax = fig.add_subplot(212) y = posy - ym=y.mean() - - std = (y-ym).std() - ax.plot(t, y - ym, '--o', label="Y-") - # ax.plot(t, std + np.zeros_like(t), '--o', label="%s +/-"%round(ym,2) + str( round(std,3)) + ' um' ) - ax.plot(t, std + np.zeros_like(t), '--o', label="rms-" + str( round(std,3)) + ' um' ) - ax.plot(t, -std+ np.zeros_like(t), '--o')#, label="rms-- "+ str( colms[i] )) + ym = y.mean() + std = (y - ym).std() + ax.plot(t, y - ym, "--o", label="Y-") + # ax.plot(t, std + np.zeros_like(t), '--o', label="%s +/-"%round(ym,2) + str( round(std,3)) + ' um' ) + ax.plot(t, std + np.zeros_like(t), "--o", label="rms-" + str(round(std, 3)) + " um") + ax.plot(t, -std + np.zeros_like(t), "--o") # , label="rms-- "+ str( colms[i] )) ax.set_xlabel("Time, (s)") ax.set_ylabel("Y") - - ax.legend( loc='best', fontsize = 16) - #try filename = filename.rstrip('.txt') - plt.savefig( res_path + filename + '-time_posX-Y-rms.png') + ax.legend(loc="best", fontsize=16) + # try filename = filename.rstrip('.txt') + plt.savefig(res_path + filename + "-time_posX-Y-rms.png") plt.show() - -def plot_posxy(t,posx,posy,res_path, filename ): - fig = plt.figure(figsize=(8,6)) - plt.axis('off') - ax = fig.add_subplot( 211 ) - ax.set_title(filename ) +def plot_posxy(t, posx, posy, res_path, filename): + fig = plt.figure(figsize=(8, 6)) + plt.axis("off") + ax = fig.add_subplot(211) + ax.set_title(filename) y = posx - ax.plot(t, y, '--o', label="Pos-X") + ax.plot(t, y, "--o", label="Pos-X") ax.set_xlabel("Time, (s)") ax.set_ylabel("X") - ax.legend( loc='best', fontsize = 16) + ax.legend(loc="best", fontsize=16) - ax = fig.add_subplot( 212 ) + ax = fig.add_subplot(212) y = posy - ax.plot(t, y, '--o', label="Pos-Y") + ax.plot(t, y, "--o", label="Pos-Y") ax.set_xlabel("Time, (s)") ax.set_ylabel("Y") - - ax.legend( loc='best', fontsize = 16) - #try filename = filename.rstrip('.txt') - plt.savefig( res_path + filename + '-time_posX-Y.png') + ax.legend(loc="best", fontsize=16) + # try filename = filename.rstrip('.txt') + plt.savefig(res_path + filename + "-time_posX-Y.png") plt.show() - - - -def plot_fft_posxy(t,posx,posy,res_path, filename,frequency_range=[0,500] ): - fig = plt.figure(figsize=(8,6)) - plt.axis('off') - ax = fig.add_subplot( 211 ) - ax.set_title(filename ) +def plot_fft_posxy(t, posx, posy, res_path, filename, frequency_range=[0, 500]): + fig = plt.figure(figsize=(8, 6)) + plt.axis("off") + ax = fig.add_subplot(211) + ax.set_title(filename) yt = posx - freq,fft = get_fft( t,yt ) - ax.plot(freq,fft, '--o', label="FFT-posX" ) + freq, fft = get_fft(t, yt) + ax.plot(freq, fft, "--o", label="FFT-posX") ax.set_xlabel("freq, (Hz)") ax.set_ylabel("fft_x") - ax.set_xlim( frequency_range) - ax.legend( loc='best', fontsize = 16) - ax = fig.add_subplot( 212 ) + ax.set_xlim(frequency_range) + ax.legend(loc="best", fontsize=16) + ax = fig.add_subplot(212) yt = posy - freq,fft = get_fft( t,yt ) - ax.plot(freq,fft, '--o', label="FFT-PosY") + freq, fft = get_fft(t, yt) + ax.plot(freq, fft, "--o", label="FFT-PosY") ax.set_xlabel("freq, (Hz)") ax.set_ylabel("fft_y") - ax.set_xlim( frequency_range) - ax.legend( loc='best', fontsize = 16) - #try filename = filename.rstrip('.txt') - plt.savefig( res_path + filename.rstrip('.txt') + '-fft_posX-Y.png') + ax.set_xlim(frequency_range) + ax.legend(loc="best", fontsize=16) + # try filename = filename.rstrip('.txt') + plt.savefig(res_path + filename.rstrip(".txt") + "-fft_posX-Y.png") plt.show() -def bpm_read( num_sample, filename=None,rate=10, show_trace=False,frequency_range=[0,500],triger=True): - '''rate: the data acq rate in unit K''' - rate = rate*1000./1.018 #10 KHz - dt =datetime.now() - times = '%s%02d%02d-%02d%02d' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) +def bpm_read( + num_sample, + filename=None, + rate=10, + show_trace=False, + frequency_range=[0, 500], + trigger=True, +): + """rate: the data acq rate in unit K""" + rate = rate * 1000.0 / 1.018 # 10 KHz + dt = datetime.now() + times = "%s%02d%02d-%02d%02d" % (dt.year, dt.month, dt.day, dt.hour, dt.minute) if filename is None: filename = times else: - filename = filename + '_' + times + filename = filename + "_" + times - colms = ['A', 'B','C','D', 'X','Y','Sum'] + colms = ["A", "B", "C", "D", "X", "Y", "Sum"] num_sample *= rate metadata = {} for k in list(meta_pvdict.keys()): - metadata[k] = caget( meta_pvdict[k] ) + metadata[k] = caget(meta_pvdict[k]) - if num_sample<13E4: - caput( pv_num_sam, num_sample) - if triger: - caput( pv_trig, 0 ) - caput( pv_trig, 1) + if num_sample < 13e4: + caput(pv_num_sam, num_sample) + if trigger: + caput(pv_trig, 0) + caput(pv_trig, 1) else: - caput( pv_trig, 0 ) - ti.sleep(num_sample/rate+3) - # need to wait for the acquisition to finish before pulling data from buffer! + caput(pv_trig, 0) + ti.sleep(num_sample / rate + 3) + # need to wait for the acquisition to finish before pulling data from buffer! for n, pv in enumerate([pv_ca, pv_cb, pv_cc, pv_cd, pv_px, pv_py, pv_sumi]): d = caget(pv) - if n==0: + if n == 0: try: - zero_point = np.where( d ==0 )[0][0] + zero_point = np.where(d == 0)[0][0] except: - zero_point = len( d ) + zero_point = len(d) data = d[:zero_point] else: - data = np.vstack( [ data,d[:zero_point] ] ) + data = np.vstack([data, d[:zero_point]]) - np.savetxt( data_path + filename +'.txt', data) - t = np.arange( 0, zero_point ) * 1/rate + np.savetxt(data_path + filename + ".txt", data) + t = np.arange(0, zero_point) * 1 / rate ca = data[0] cb = data[1] @@ -310,12 +320,20 @@ def bpm_read( num_sample, filename=None,rate=10, show_trace=False,frequency_rang cd = data[3] posx = data[4] posy = data[5] - sumi= data[6] - #plot_posxy(t,posx,posy, save_path, filename ) - #plot_posxy_rms(t,posx,posy,res_path, filename ) - #plot_fft_posxy(t,posx,posy,res_path, filename ) - plot_posxy_fft(t,posx,posy,res_path, filename, metadata = metadata,frequency_range=frequency_range ) + sumi = data[6] + # plot_posxy(t,posx,posy, save_path, filename ) + # plot_posxy_rms(t,posx,posy,res_path, filename ) + # plot_fft_posxy(t,posx,posy,res_path, filename ) + plot_posxy_fft( + t, + posx, + posy, + res_path, + filename, + metadata=metadata, + frequency_range=frequency_range, + ) if show_trace: - plot_current(t, ca,cb,cc,cd, sumi, res_path, filename, metadata = metadata ) + plot_current(t, ca, cb, cc, cd, sumi, res_path, filename, metadata=metadata) else: - print ('The sample number is too large,this number should < 13E4!') + print("The sample number is too large,this number should < 13E4!") diff --git a/chxtools/chx_utilities.py b/chxtools/chx_utilities.py index 2a579ee..b2ab95b 100644 --- a/chxtools/chx_utilities.py +++ b/chxtools/chx_utilities.py @@ -2,7 +2,7 @@ """ Created on Wed Mar 25 14:02:59 2015 by LW March 2015 -set of utility functions for beamline alingment and commissioning +set of utility functions for beamline alignment and commissioning v 0.0.1 (this version): might have created a typo in E-calibration!!! added dcm_roll for calculating DCM Roll correction """ @@ -14,13 +14,13 @@ ############ ################## #### -def E_calibration(file,Edge='Cu',xtal='Si111cryo',B_off=0): +def E_calibration(file, Edge="Cu", xtal="Si111cryo", B_off=0): """ by LW 3/25/2015 function to read energy scan file and determine offset correction calling sequence: E_calibration(file,Edge='Cu',xtal='Si111cryo',B_off=0) file: path/filename of experimental data; 'ia' opens interactive dialog - Edge: elment used for calibration + Edge: element used for calibration xtal: monochromator crystal under calibration B_off (optional): apply offset to Bragg angle data currently there is no check on input parameters! @@ -30,91 +30,137 @@ def E_calibration(file,Edge='Cu',xtal='Si111cryo',B_off=0): import numpy as np import matplotlib.pyplot as plt import xfuncs as xf - import Tkinter, tkFileDialog + import Tkinter + import tkFileDialog - if file=='ia': # open file dialog + if file == "ia": # open file dialog root = Tkinter.Tk() root.withdraw() file_path = tkFileDialog.askopenfilename() - else: file_path=file + else: + file_path = file plt.close("all") - Bragg=[] - Gap=[] - Intensity=[] - Edge_data={'Cu': 8.979, 'Ti': 4.966} - with open(file_path, 'rb') as csvfile: - filereader = csv.reader(csvfile, delimiter=' ') - filereader.next() # skip header lines + Bragg = [] + Gap = [] + Intensity = [] + Edge_data = {"Cu": 8.979, "Ti": 4.966} + with open(file_path, "rb") as csvfile: + filereader = csv.reader(csvfile, delimiter=" ") + filereader.next() # skip header lines filereader.next() filereader.next() - for row in filereader: # read data - try: Bragg.append(float(row[2])) - except: print('could not convert: ',row[2]) - try: Gap.append(float(row[5])) - except: print('could not convert: ',row[5]) - try: Intensity.append(float(row[7])) - except: print('could not convert: ',row[8]) - B=np.array(Bragg)*-1.0+B_off - G=np.array(Gap[0:len(B)]) # not currently used, but converted for future use - Int=np.array(Intensity[0:len(B)]) + for row in filereader: # read data + try: + Bragg.append(float(row[2])) + except: + print("could not convert: ", row[2]) + try: + Gap.append(float(row[5])) + except: + print("could not convert: ", row[5]) + try: + Intensity.append(float(row[7])) + except: + print("could not convert: ", row[8]) + B = np.array(Bragg) * -1.0 + B_off + G = np.array(Gap[0 : len(B)]) # not currently used, but converted for future use + Int = np.array(Intensity[0 : len(B)]) # normalize and remove background: - Int=Int-min(Int) - Int=Int/max(Int) + Int = Int - min(Int) + Int = Int / max(Int) plt.figure(1) - plt.plot(B,Int,'ko-',label='experimental data') - plt.plot([xf.get_Bragg(xtal,Edge_data[Edge])[0],xf.get_Bragg(xtal,Edge_data[Edge])[0]],[0,1],'r--',label='Edge for: '+Edge) - plt.legend(loc='best') - plt.xlabel(r'$\theta_B$ [deg.]') - plt.ylabel('intensity') - plt.title(['Energy Calibration using: ',file]) + plt.plot(B, Int, "ko-", label="experimental data") + plt.plot( + [ + xf.get_Bragg(xtal, Edge_data[Edge])[0], + xf.get_Bragg(xtal, Edge_data[Edge])[0], + ], + [0, 1], + "r--", + label="Edge for: " + Edge, + ) + plt.legend(loc="best") + plt.xlabel(r"$\theta_B$ [deg.]") + plt.ylabel("intensity") + plt.title(["Energy Calibration using: ", file]) plt.grid() plt.figure(2) - Eexp=xf.get_EBragg(xtal,B) - plt.plot(Eexp,Int,'ko-',label='experimental data') - plt.plot([Edge_data[Edge],Edge_data[Edge]],[0,1],'r--',label='Edge for: '+Edge) - plt.legend(loc='best') - plt.xlabel('E [keV.]') - plt.ylabel('intensity') - plt.title(['Energy Calibration using: ',file]) + Eexp = xf.get_EBragg(xtal, B) + plt.plot(Eexp, Int, "ko-", label="experimental data") + plt.plot( + [Edge_data[Edge], Edge_data[Edge]], [0, 1], "r--", label="Edge for: " + Edge + ) + plt.legend(loc="best") + plt.xlabel("E [keV.]") + plt.ylabel("intensity") + plt.title(["Energy Calibration using: ", file]) plt.grid() # calculate derivative and analyze: - Bragg_Edge=xf.get_Bragg(xtal,Edge_data[Edge])[0] + Bragg_Edge = xf.get_Bragg(xtal, Edge_data[Edge])[0] plt.figure(3) - diffdat=np.diff(Int) - plt.plot(B[0:len(diffdat)],diffdat,'ko-',label='diff experimental data') - plt.plot([Bragg_Edge,Bragg_Edge],[min(diffdat),max(diffdat)],'r--',label='Edge for: '+Edge) - plt.legend(loc='best') - plt.xlabel(r'$\theta_B$ [deg.]') - plt.ylabel('diff(int)') - plt.title(['Energy Calibration using: ',file]) + diffdat = np.diff(Int) + plt.plot(B[0 : len(diffdat)], diffdat, "ko-", label="diff experimental data") + plt.plot( + [Bragg_Edge, Bragg_Edge], + [min(diffdat), max(diffdat)], + "r--", + label="Edge for: " + Edge, + ) + plt.legend(loc="best") + plt.xlabel(r"$\theta_B$ [deg.]") + plt.ylabel("diff(int)") + plt.title(["Energy Calibration using: ", file]) plt.grid() plt.figure(4) - plt.plot(xf.get_EBragg(xtal,B[0:len(diffdat)]),diffdat,'ko-',label='diff experimental data') - plt.plot([Edge_data[Edge],Edge_data[Edge]],[min(diffdat),max(diffdat)],'r--',label='Edge for: '+Edge) - plt.legend(loc='best') - plt.xlabel('E [keV.]') - plt.ylabel('diff(int)') - plt.title(['Energy Calibration using: ',file]) + plt.plot( + xf.get_EBragg(xtal, B[0 : len(diffdat)]), + diffdat, + "ko-", + label="diff experimental data", + ) + plt.plot( + [Edge_data[Edge], Edge_data[Edge]], + [min(diffdat), max(diffdat)], + "r--", + label="Edge for: " + Edge, + ) + plt.legend(loc="best") + plt.xlabel("E [keV.]") + plt.ylabel("diff(int)") + plt.title(["Energy Calibration using: ", file]) plt.grid() - edge_index=np.argmax(diffdat) - B_edge=xf.get_Bragg(xtal,Edge_data[Edge])[0] - - print('') - print('Energy calibration for: ',file_path) - print('Edge used for calibration: ',Edge) - print('Crystal used for calibration: ',xtal) - print('Bragg angle offset: ', B_edge-B[edge_index],'deg. (CHX coordinate system: ',-(B_edge-B[edge_index]),'deg.)') - print('=> move Bragg to ',-B[edge_index],'deg. and set value to ',-Bragg_Edge,'deg.') - print( 'Energy offset: ',Eexp[edge_index]-Edge_data[Edge],' keV') - -def dcm_roll(Bragg,offset,distance,offmode='mm',pixsize=5.0): + edge_index = np.argmax(diffdat) + B_edge = xf.get_Bragg(xtal, Edge_data[Edge])[0] + + print("") + print("Energy calibration for: ", file_path) + print("Edge used for calibration: ", Edge) + print("Crystal used for calibration: ", xtal) + print( + "Bragg angle offset: ", + B_edge - B[edge_index], + "deg. (CHX coordinate system: ", + -(B_edge - B[edge_index]), + "deg.)", + ) + print( + "=> move Bragg to ", + -B[edge_index], + "deg. and set value to ", + -Bragg_Edge, + "deg.", + ) + print("Energy offset: ", Eexp[edge_index] - Edge_data[Edge], " keV") + + +def dcm_roll(Bragg, offset, distance, offmode="mm", pixsize=5.0): """ by LW 03/27/2015 function to calculate Roll correction on the DCM @@ -130,146 +176,165 @@ def dcm_roll(Bragg,offset,distance,offmode='mm',pixsize=5.0): import numpy as np from scipy import optimize from matplotlib import pyplot as plt - Bragg=np.array(Bragg) - if offmode=='mm': - offset=np.array(offset) - elif offmode=='pixel': - offset=np.array(offset)*pixsize/1000.0 - else: raise CHX_utilities_Exception('Eror: offmode must be either "mm" or "pixel"') - if distance=='dcm_bpm': - d=3000.0 # distance dcm-bpm in mm - elif distance=='dcm_mbs': - d=2697.6 #distance dcm-mbs in mm - elif distance=='dcm_sample': - d=16200 #distance dcm-sample in mm + + Bragg = np.array(Bragg) + if offmode == "mm": + offset = np.array(offset) + elif offmode == "pixel": + offset = np.array(offset) * pixsize / 1000.0 + else: + raise CHX_utilities_Exception('Error: offmode must be either "mm" or "pixel"') + if distance == "dcm_bpm": + d = 3000.0 # distance dcm-bpm in mm + elif distance == "dcm_mbs": + d = 2697.6 # distance dcm-mbs in mm + elif distance == "dcm_sample": + d = 16200 # distance dcm-sample in mm else: try: - d=float(distance) + d = float(distance) except: - raise CHX_utilities_Exception('Eror: distance must be a recognized string or numerical value') + raise CHX_utilities_Exception( + "Error: distance must be a recognized string or numerical value" + ) # data fitting - fitfunc = lambda p, x: p[0]+2*d*p[1]*np.sin(x/180.*np.pi) # Target function - errfunc = lambda p, x, y: fitfunc(p, Bragg) - y # Distance to the target function - p0 = [np.mean(offset), -.5] # Initial guess for the parameters + fitfunc = lambda p, x: ( + p[0] + 2 * d * p[1] * np.sin(x / 180.0 * np.pi) + ) # Target function + errfunc = lambda p, x, y: fitfunc(p, Bragg) - y # Distance to the target function + p0 = [np.mean(offset), -0.5] # Initial guess for the parameters p1, success = optimize.leastsq(errfunc, p0[:], args=(Bragg, offset)) # plotting the result: plt.close(1) plt.figure(1) B = np.linspace(Bragg.min(), Bragg.max(), 100) - plt.plot(Bragg,offset,'ro',label='measured offset') - plt.plot(B,fitfunc(p1,B),'k-',label=r'$x_o$+2*D*$\Delta$$\Phi$*sin($\theta_B$)') - plt.legend(loc='best') - plt.ylabel('beam offset [mm]') - plt.xlabel('Bragg angle [deg.]') - print('x_0= ',p1[0],'mm') - print('\Delta \Phi= ',p1[1]*180.0/np.pi,'deg') + plt.plot(Bragg, offset, "ro", label="measured offset") + plt.plot(B, fitfunc(p1, B), "k-", label=r"$x_o$+2*D*$\Delta$$\Phi$*sin($\theta_B$)") + plt.legend(loc="best") + plt.ylabel("beam offset [mm]") + plt.xlabel("Bragg angle [deg.]") + print("x_0= ", p1[0], "mm") + print("\Delta \Phi= ", p1[1] * 180.0 / np.pi, "deg") + def sum_image(filename, firstim=0, lastim=9999): """ by LW 03/30/2015 function to sum up tiff files and display cuts calling sequence: sum_image(filename, firstim=0, lastim=9999) - filename can be 'ia' for chosing path and first image via GUI + filename can be 'ia' for choosing path and first image via GUI """ from PIL import Image - import Tkinter, tkFileDialog + import Tkinter + import tkFileDialog from matplotlib import pyplot as plt import numpy as np import time - if filename=='ia': # open file dialog + if filename == "ia": # open file dialog root = Tkinter.Tk() root.withdraw() file_path = tkFileDialog.askopenfilename() - else: file_path=filename + else: + file_path = filename print(file_path) # extract filename and first image number: - a=file_path.split('/') - fn=a[len(a)-1].split('.')[0] # file name - b=fn.split('.') - #fe=b[len(b)-1] # file ending, e.g .tiff - fe=a[len(a)-1].split('.')[1] - c=fn.split('_') - fnum=c[len(c)-1].split('.')[0] # file number - nlen=len(fnum) # length of number format - fp=file_path.split(fn)[0] - fn=fn.split('_'+fnum)[0] - print('file path: ',fp) - print('file name: ',fn) - print('file ending: ',fe) - print('file number: ',fnum) - - if filename=='ia': # get first image number from filebame in interactive mode - firstim=int(fnum) - #lastim=30 # maximum number of images to search in autmatic mode - - imcount=firstim - #print 'imcount: ',imcount - #print 'firstim: ',firstim - get_out=0 - image_data=0 + a = file_path.split("/") + fn = a[len(a) - 1].split(".")[0] # file name + b = fn.split(".") + # fe=b[len(b)-1] # file ending, e.g .tiff + fe = a[len(a) - 1].split(".")[1] + c = fn.split("_") + fnum = c[len(c) - 1].split(".")[0] # file number + nlen = len(fnum) # length of number format + fp = file_path.split(fn)[0] + fn = fn.split("_" + fnum)[0] + print("file path: ", fp) + print("file name: ", fn) + print("file ending: ", fe) + print("file number: ", fnum) + + if filename == "ia": # get first image number from filename in interactive mode + firstim = int(fnum) + # lastim=30 # maximum number of images to search in automatic mode + + imcount = firstim + # print 'imcount: ',imcount + # print 'firstim: ',firstim + get_out = 0 + image_data = 0 while imcount <= lastim: - ic=str(imcount) - while len(ic) < nlen: # create file number - ic='0'+ic - #ic=str(ic) - #print ic - #assemble the image name to be loaded: - file_to_open=fp+fn+'_'+ic+'.'+fe + ic = str(imcount) + while len(ic) < nlen: # create file number + ic = "0" + ic + # ic=str(ic) + # print ic + # assemble the image name to be loaded: + file_to_open = fp + fn + "_" + ic + "." + fe try: im = Image.open(file_to_open) - #im.show() - print('loaded file: ',file_to_open) + # im.show() + print("loaded file: ", file_to_open) except: - print('waiting for file: '+file_to_open) - #plt.close(1) # use the waiting time to plot some intermediate result -> does not work, as python is waiting until the end before printing.... - #plt.figure(1) - #imgplot= plt.imshow(image_data/(imcount-firstim+1),origin='lower') - #imgplot.set_cmap('gray') + print("waiting for file: " + file_to_open) + # plt.close(1) # use the waiting time to plot some intermediate result -> does not work, as python is waiting until the end before printing.... + # plt.figure(1) + # imgplot= plt.imshow(image_data/(imcount-firstim+1),origin='lower') + # imgplot.set_cmap('gray') time.sleep(5) try: im = Image.open(file_to_open) - #im.show() - except:get_out=1 - if get_out==1: + # im.show() + except: + get_out = 1 + if get_out == 1: return image_data - plt.close(1) # only plot at the end for now: too much overhead to plot every iteration + plt.close( + 1 + ) # only plot at the end for now: too much overhead to plot every iteration plt.figure(1) - raise CHX_utilities_Exception('no more images found, images summed up to: '+file_to_open) - if imcount==firstim: + raise CHX_utilities_Exception( + "no more images found, images summed up to: " + file_to_open + ) + if imcount == firstim: # print 'made it here!' - im=np.array(im)*1.0 - image_data=np.sum(im, axis=2) - imgplot= plt.imshow(image_data,origin='lower') - imgplot.set_cmap('gray') - else: image_data=image_data+np.array(np.sum(im, axis=2)) - #plt.close(1) - #plt.figure(1) - #plt.imshow(image_data/((imcount-firstim)+1)) - imcount=imcount+1 - plt.close(1) # only plot at the end for now: too much overhead to plot every iteration + im = np.array(im) * 1.0 + image_data = np.sum(im, axis=2) + imgplot = plt.imshow(image_data, origin="lower") + imgplot.set_cmap("gray") + else: + image_data = image_data + np.array(np.sum(im, axis=2)) + # plt.close(1) + # plt.figure(1) + # plt.imshow(image_data/((imcount-firstim)+1)) + imcount = imcount + 1 + plt.close( + 1 + ) # only plot at the end for now: too much overhead to plot every iteration plt.figure(1) - #finalimage=Image.fromarray(image_data/(imcount-firstim+1)) - imgplot= plt.imshow(image_data/(imcount-firstim+1),origin='lower') - imgplot.set_cmap('gray') - plt.title('summed :'+fp+fn+fnum+'-'+ic) - #finalimage.show() - return image_data/(imcount-firstim+1) - -def gauss(x,*p): + # finalimage=Image.fromarray(image_data/(imcount-firstim+1)) + imgplot = plt.imshow(image_data / (imcount - firstim + 1), origin="lower") + imgplot.set_cmap("gray") + plt.title("summed :" + fp + fn + fnum + "-" + ic) + # finalimage.show() + return image_data / (imcount - firstim + 1) + + +def gauss(x, *p): """ LW 04/06/2015 Gaussian help function of the form: yo+A*exp(-(x-xc).^2/(2*w^2)) """ import numpy as np - #print p - yo,A,xc,w=p - return yo+A*np.exp(-(x-xc)**2/(2.*w**2)) + + # print p + yo, A, xc, w = p + return yo + A * np.exp(-((x - xc) ** 2) / (2.0 * w**2)) ########################################### @@ -277,35 +342,35 @@ def gauss(x,*p): #####PVs PV_dict = dict( - #Acc_Vert='SR:C31-{AI}Aie11:Angle-y-Cal', - T_DCM_Bragg = 'XF:11IDA-OP{Mono:DCM-Ax:Bragg}T-I', - T_DCM_Pitch = 'XF:11IDA-OP{Mono:DCM-Ax:Pitch}T-I', - T_DCM_X='XF:11IDA-OP{Mono:DCM-Ax:X}T-I', - T_DCM_R = 'XF:11IDA-OP{Mono:DCM-Ax:Roll}T-I', - T_DCM_WaterOut= 'XF:11IDA-OP{Mono:DCM}T:H2O_Out-I', - T_DCM_plate1= 'XF:11IDA-OP{Mono:DCM}T:Plate1-I', - T_Cryo = 'XF:11IDA-UT{Cryo:1}T:05-I', - P_Cryo = 'XF:11IDA-UT{Cryo:1}P:03-I', - #DCM_Pitch_Encoder='XF:11IDA-OP{Mono:DCM-Ax:P}Mtr.DRBV', - E_DCM_Pitch_RBV='XF:11IDA-OP{Mono:DCM-Ax:P}Mtr.RBV', - E_DCM_Pitch_OFF='XF:11IDA-OP{Mono:DCM-Ax:P}Mtr.OFF', - E_DCM_Pitch_ERES='XF:11IDA-OP{Mono:DCM-Ax:P}Mtr.ERES', - E_HDM_Pitch ='XF:11IDA-OP{Mir:HDM-Ax:P}Pos-I', - ) + # Acc_Vert='SR:C31-{AI}Aie11:Angle-y-Cal', + T_DCM_Bragg="XF:11IDA-OP{Mono:DCM-Ax:Bragg}T-I", + T_DCM_Pitch="XF:11IDA-OP{Mono:DCM-Ax:Pitch}T-I", + T_DCM_X="XF:11IDA-OP{Mono:DCM-Ax:X}T-I", + T_DCM_R="XF:11IDA-OP{Mono:DCM-Ax:Roll}T-I", + T_DCM_WaterOut="XF:11IDA-OP{Mono:DCM}T:H2O_Out-I", + T_DCM_plate1="XF:11IDA-OP{Mono:DCM}T:Plate1-I", + T_Cryo="XF:11IDA-UT{Cryo:1}T:05-I", + P_Cryo="XF:11IDA-UT{Cryo:1}P:03-I", + # DCM_Pitch_Encoder='XF:11IDA-OP{Mono:DCM-Ax:P}Mtr.DRBV', + E_DCM_Pitch_RBV="XF:11IDA-OP{Mono:DCM-Ax:P}Mtr.RBV", + E_DCM_Pitch_OFF="XF:11IDA-OP{Mono:DCM-Ax:P}Mtr.OFF", + E_DCM_Pitch_ERES="XF:11IDA-OP{Mono:DCM-Ax:P}Mtr.ERES", + E_HDM_Pitch="XF:11IDA-OP{Mir:HDM-Ax:P}Pos-I", +) PV_dict = dict( - E_HDM_Pitch ='XF:11IDA-OP{Mir:HDM-Ax:P}Pos-I', - ) + E_HDM_Pitch="XF:11IDA-OP{Mir:HDM-Ax:P}Pos-I", +) -label = PV_dict.keys() -PV = [ PV_dict[lab] for lab in label ] +label = PV_dict.keys() +PV = [PV_dict[lab] for lab in label] PV_Ind = dict( - Freq_Cryo = 'XF:11IDA-UT{Cryo:1-Pmp}Freq-I', - ) + Freq_Cryo="XF:11IDA-UT{Cryo:1-Pmp}Freq-I", +) -labeli = PV_Ind.keys() -PVi = [ PV_Ind[lab] for lab in labeli ] +labeli = PV_Ind.keys() +PVi = [PV_Ind[lab] for lab in labeli] ############################# @@ -315,1121 +380,1411 @@ def determine_aspect(shape, extent): return dx / dy -def get_cuts(img, cut=1000,direction='horz'): - if direction=='horz': - cdat=img[cut,:]; - elif direction=='vert': - cdat=img[:,cut]; +def get_cuts(img, cut=1000, direction="horz"): + if direction == "horz": + cdat = img[cut, :] + elif direction == "vert": + cdat = img[:, cut] return cdat -def show_cut(img, cut=1000, direction='horz',width=10,title=None): - #fig,ax=plt.subplots() - img_=img.copy() - if direction=='horz': - img_[cut-width/2:cut+width/2,:] = 0 - elif direction=='vert': - img_[:,cut-width/2:cut+width/2,] = 0 - if title==None:title = 'Img-&-Cut_%s'%cut - - show_img(img_,title=title) - - -def show_img(img, ax=None,save=False, vmin=None, vmax=None,cmap='spectral',fontsize=24, - axis_on=True,title_on=True,xlabel=None,ylabel=None,aspect=None, - title='img_', show=True, logs=False,outDir=None, sizex=9,sizey=9, - ylim=None, xlim=None, xticks=True, yticks=True,extent=None): - #rescale=None): #extent=None): +def show_cut(img, cut=1000, direction="horz", width=10, title=None): + # fig,ax=plt.subplots() + img_ = img.copy() + + if direction == "horz": + img_[cut - width / 2 : cut + width / 2, :] = 0 + elif direction == "vert": + img_[ + :, + cut - width / 2 : cut + width / 2, + ] = 0 + if title == None: + title = "Img-&-Cut_%s" % cut + + show_img(img_, title=title) + + +def show_img( + img, + ax=None, + save=False, + vmin=None, + vmax=None, + cmap="spectral", + fontsize=24, + axis_on=True, + title_on=True, + xlabel=None, + ylabel=None, + aspect=None, + title="img_", + show=True, + logs=False, + outDir=None, + sizex=9, + sizey=9, + ylim=None, + xlim=None, + xticks=True, + yticks=True, + extent=None, +): + # rescale=None): #extent=None): """show a two-D image""" import matplotlib.pyplot as plt - from matplotlib import cm import numpy as np - if ax is None:fig, ax = plt.subplots(nrows=1) - if vmin==None:vmin=img.min() - if vmax==None:vmax=img.max() + if ax is None: + fig, ax = plt.subplots(nrows=1) + if vmin == None: + vmin = img.min() + if vmax == None: + vmax = img.max() shape = img.shape - dy,dx= shape + dy, dx = shape - #ax.set_xlim( 0,dx) - #ax.set_ylim(0,dy) + # ax.set_xlim( 0,dx) + # ax.set_ylim(0,dy) - if xlim is not None:ax.set_xlim(xlim) + if xlim is not None: + ax.set_xlim(xlim) if extent is not None: - x1,x2,y1,y2=extent - if ylim is None:ylim=[y2,y1] - aspect_=determine_aspect(shape, extent) - #rsx,rsy = [ (x2-x1)/float(dx), (y2-y1)/float(dy)] #rescale x, and rescale y - else:aspect_=None - if ylim is None:ylim=[0,dy] - #print ylim + x1, x2, y1, y2 = extent + if ylim is None: + ylim = [y2, y1] + aspect_ = determine_aspect(shape, extent) + # rsx,rsy = [ (x2-x1)/float(dx), (y2-y1)/float(dy)] #rescale x, and rescale y + else: + aspect_ = None + if ylim is None: + ylim = [0, dy] + # print ylim ax.set_ylim([ylim[0], ylim[1]]) if not logs: - cax=ax.imshow( img,cmap=cmap, vmin=vmin,vmax=vmax, - aspect = aspect_,extent=extent)#,interpolation='none') + cax = ax.imshow( + img, cmap=cmap, vmin=vmin, vmax=vmax, aspect=aspect_, extent=extent + ) # ,interpolation='none') if logs: - img= np.log(img) - if vmin==None:vmin=img[nonzero(img)].min() #img.min() - cax=ax.imshow( img,cmap=cmap,vmin=vmin, vmax=vmax, - aspect = aspect_, extent=extent)#,interpolation='none') + img = np.log(img) + if vmin == None: + vmin = img[nonzero(img)].min() # img.min() + cax = ax.imshow( + img, cmap=cmap, vmin=vmin, vmax=vmax, aspect=aspect_, extent=extent + ) # ,interpolation='none') if aspect is not None: im = ax.get_images() - x1,x2,y1,y2 = im[0].get_extent() - if ylim is not None:y1,y2=ylim - if xlim is not None:x1.x2=xlim - ax.set_aspect(abs((x2-x1)/(y2-y1))/aspect) - if title_on:plt.title( title,fontsize=fontsize ) - if not axis_on:plt.axis('off') - if xlabel is not None:plt.xlabel(xlabel,fontsize=fontsize) - else:plt.xlabel('') - if ylabel is not None:plt.ylabel(ylabel,fontsize=fontsize) - else:plt.ylabel('') - - if xticks is None:plt.xticks( [] ) - elif xticks is True:plt.setp( ax.get_xticklabels(),fontsize=fontsize*.8, visible=True)# - else:plt.xticks( xticks,fontsize=fontsize *.8 ) - if yticks is None:plt.yticks( [] ) - elif yticks is True:plt.setp( ax.get_yticklabels(),fontsize=fontsize*.8, visible=True)# - else:plt.yticks( yticks,fontsize=fontsize *.8 ) - if ax is None:cbar = fig.colorbar(cax, ticks=[vmin,vmax]) - - #if ylim is not None:ax.set_ylim(ylim) - #if xlim is not None:ax.set_xlim(xlim) + x1, x2, y1, y2 = im[0].get_extent() + if ylim is not None: + y1, y2 = ylim + if xlim is not None: + x1.x2 = xlim + ax.set_aspect(abs((x2 - x1) / (y2 - y1)) / aspect) + if title_on: + plt.title(title, fontsize=fontsize) + if not axis_on: + plt.axis("off") + if xlabel is not None: + plt.xlabel(xlabel, fontsize=fontsize) + else: + plt.xlabel("") + if ylabel is not None: + plt.ylabel(ylabel, fontsize=fontsize) + else: + plt.ylabel("") + + if xticks is None: + plt.xticks([]) + elif xticks is True: + plt.step(ax.get_xticklabels(), fontsize=fontsize * 0.8, visible=True) # + else: + plt.xticks(xticks, fontsize=fontsize * 0.8) + if yticks is None: + plt.yticks([]) + elif yticks is True: + plt.step(ax.get_yticklabels(), fontsize=fontsize * 0.8, visible=True) # + else: + plt.yticks(yticks, fontsize=fontsize * 0.8) + if ax is None: + cbar = fig.colorbar(cax, ticks=[vmin, vmax]) + + # if ylim is not None:ax.set_ylim(ylim) + # if xlim is not None:ax.set_xlim(xlim) if save: - if outDir!=None:fp=outDir + title + '_.png' - else:fp= title + '_.png' - plt.savefig( fp ) - if show:plt.show() - -def trans_data_to_pd(data, label=None,dtype='list'): - #lists a [ list1, list2...] all the list have the same length - from numpy import arange,array - import pandas as pd,sys - if dtype == 'list': - data=array(data).T - elif dtype == 'array': - data=array(data) + if outDir != None: + fp = outDir + title + "_.png" + else: + fp = title + "_.png" + plt.savefig(fp) + if show: + plt.show() + + +def trans_data_to_pd(data, label=None, dtype="list"): + # lists a [ list1, list2...] all the list have the same length + from numpy import arange, array + import pandas as pd + + if dtype == "list": + data = array(data).T + elif dtype == "array": + data = array(data) else: - print("Wrong data type! Now only support 'list' and 'array' tpye") - N,M=data.shape - index = arange( N ) - if label is None:label=['data%s'%i for i in range(M)] - #print label - df = pd.DataFrame( data, index=index, columns= label ) + print("Wrong data type! Now only support 'list' and 'array' type") + N, M = data.shape + index = arange(N) + if label is None: + label = ["data%s" % i for i in range(M)] + # print label + df = pd.DataFrame(data, index=index, columns=label) return df + def trans_tf_to_td(tf): - #trans epoch time to datetime - import pandas as pd + # trans epoch time to datetime import numpy as np import datetime - td =np.array( [datetime.datetime.fromtimestamp( tf[i] ) - for i in range( 0, len(tf) )] ) + + td = np.array([datetime.datetime.fromtimestamp(tf[i]) for i in range(0, len(tf))]) return td + def trans_td_to_tf(td): - import time,numpy as np + import time + import numpy as np + t = np.array(td) - tf = np.array( [time.mktime( t[i].timetuple() ) - for i in range( len(t))] ) + tf = np.array([time.mktime(t[i].timetuple()) for i in range(len(t))]) return tf -def plot_pv_values( dict_tv, time, keys=None,title=None, xlabel='Time', ylim_tv=None, - data= None, ylim_data=None, img=None,pixsize=0.79, - aspect=5.0, ylim_img = None): + +def plot_pv_values( + dict_tv, + time, + keys=None, + title=None, + xlabel="Time", + ylim_tv=None, + data=None, + ylim_data=None, + img=None, + pixsize=0.79, + aspect=5.0, + ylim_img=None, +): import pandas.tools.plotting as ptg - import matplotlib.dates as md - #from numpy import arange,array + + # from numpy import arange,array import numpy as np - import datetime - import matplotlib.pyplot as plt if keys is None: - keys=dict_tv.keys() + keys = dict_tv.keys() M = len(keys) if img is not None: - N=M+1 - N0=1 - N_img=N0-1 + N = M + 1 + N0 = 1 + N_img = N0 - 1 else: - N=M - N0=0 - N_img=-1 + N = M + N0 = 0 + N_img = -1 if data is not None: - N +=1 - N0+=1 - N_data=N_img +1 - - sharex=True - #fig, axs = plt.subplots(N,sharex = True) - fig,axs=ptg._subplots( N, sharex=sharex,sharey=False, layout=[N,1]) - #tf=time - axs[0].set_xlim( np.min(time), np.max(time) ) - for n in range(N0,N): - i=n-N0 - d = dict_tv[ keys[i] ] - if ylim_tv is None: - ymean = d[ keys[i] ].mean() - ymax,ymin = d[ keys[i] ].max(), d[ keys[i] ].min() - width = min( [ ymax-ymean, ymean - ymin] ) + N += 1 + N0 += 1 + N_data = N_img + 1 + + sharex = True + # fig, axs = plt.subplots(N,sharex = True) + fig, axs = ptg._subplots(N, sharex=sharex, sharey=False, layout=[N, 1]) + # tf=time + axs[0].set_xlim(np.min(time), np.max(time)) + for n in range(N0, N): + i = n - N0 + d = dict_tv[keys[i]] + if ylim_tv is None: + ymean = d[keys[i]].mean() + ymax, ymin = d[keys[i]].max(), d[keys[i]].min() + width = min([ymax - ymean, ymean - ymin]) ymax_ = ymean + width ymin_ = ymean - width - #print ymean, ymax_, ymin_ - #ylim=[ ymin_ - ymean*.2, ymax_ + ymean*.2] - ylim = [ymean - width*5, ymean + width *5] - else:ylim=ylim_tv[i] - - d.plot( x='tf', y = [ keys[i] ],subplots=False, ylim=ylim, - marker='o',c='b', ls = '--', ax=axs[n]) + # print ymean, ymax_, ymin_ + # ylim=[ ymin_ - ymean*.2, ymax_ + ymean*.2] + ylim = [ymean - width * 5, ymean + width * 5] + else: + ylim = ylim_tv[i] + + d.plot( + x="tf", + y=[keys[i]], + subplots=False, + ylim=ylim, + marker="o", + c="b", + ls="--", + ax=axs[n], + ) yt = axs[n].get_yticks() - axs[n].set_yticklabels(yt,fontsize=24) - axs[n].set_ylabel( keys[i] ,fontsize=24) - ki=keys[i] - if n!=N-1: - m=axs[n].get_xticklabels() - axs[n].set_xlabel('') - for x in m:x.set_visible(False) - - #dd =np.array( [datetime.datetime.fromtimestamp( tf[i] ) - #for i in range( 0, len(tf) )] ) + axs[n].set_yticklabels(yt, fontsize=24) + axs[n].set_ylabel(keys[i], fontsize=24) + ki = keys[i] + if n != N - 1: + m = axs[n].get_xticklabels() + axs[n].set_xlabel("") + for x in m: + x.set_visible(False) + + # dd =np.array( [datetime.datetime.fromtimestamp( tf[i] ) + # for i in range( 0, len(tf) )] ) if img is not None: - dy,dx = img.shape - shape=img.shape - extent= [ np.min(time), np.max(time), dy* pixsize, 0 ] - show_img(img, aspect =aspect, ax=axs[N_img], - extent=extent, title='', ylim=ylim_img,xticks=True) - m=axs[N_img].get_xticklabels() - for x in m:x.set_visible(False) - #axs[N_img].set_xlim( np.min(time), np.max(time) ) - axs[N_img].set_ylabel('size, um',fontsize=24) + dy, dx = img.shape + shape = img.shape + extent = [np.min(time), np.max(time), dy * pixsize, 0] + show_img( + img, + aspect=aspect, + ax=axs[N_img], + extent=extent, + title="", + ylim=ylim_img, + xticks=True, + ) + m = axs[N_img].get_xticklabels() + for x in m: + x.set_visible(False) + # axs[N_img].set_xlim( np.min(time), np.max(time) ) + axs[N_img].set_ylabel("size, um", fontsize=24) if data is not None: - data.plot( x=time, subplots=False, ylim=ylim_data, - marker='s',c='g', ls = '--', ax=axs[N_data]) #x= tf, - m=axs[N_data].get_xticklabels() - for x in m:x.set_visible(False) - axs[N_data].set_xlabel('') - axs[N_data].set_ylabel( data.keys()[0] ,fontsize=24) + data.plot( + x=time, + subplots=False, + ylim=ylim_data, + marker="s", + c="g", + ls="--", + ax=axs[N_data], + ) # x= tf, + m = axs[N_data].get_xticklabels() + for x in m: + x.set_visible(False) + axs[N_data].set_xlabel("") + axs[N_data].set_ylabel(data.keys()[0], fontsize=24) yt = axs[N_data].get_yticks() - axs[N_data].set_yticklabels(yt,fontsize=24) + axs[N_data].set_yticklabels(yt, fontsize=24) - if title is not None:axs[0].set_title(title, fontsize=28) + if title is not None: + axs[0].set_title(title, fontsize=28) - axs[n].set_xlim( np.min(time), np.max(time) ) + axs[n].set_xlim(np.min(time), np.max(time)) - #print trans_tf_to_td( [np.min(time), np.max(time) ]) + # print trans_tf_to_td( [np.min(time), np.max(time) ]) xt = axs[n].get_xticks() - xticks = trans_tf_to_td(xt) - axs[n].set_xlabel(xlabel,fontsize=24) - axs[n].set_xticklabels([x.strftime('%m-%d %H:%M') for x in xticks],fontsize=24) + xticks = trans_tf_to_td(xt) + axs[n].set_xlabel(xlabel, fontsize=24) + axs[n].set_xticklabels([x.strftime("%m-%d %H:%M") for x in xticks], fontsize=24) - axs[n].set_ylabel( ki ,fontsize=24) + axs[n].set_ylabel(ki, fontsize=24) - #axs[0].set_xlim( np.min(time), np.max(time) ) + # axs[0].set_xlim( np.min(time), np.max(time) ) -def make_wave_data2( x,y): +def make_wave_data2(x, y): import numpy as np - x=np.array(x) - y=np.array(y) - X = np.zeros( len(x)*2 -1 ) - Y = np.zeros( len(x)*2 -1 ) + + x = np.array(x) + y = np.array(y) + X = np.zeros(len(x) * 2 - 1) + Y = np.zeros(len(x) * 2 - 1) X[::2] = x X[1::2] = x[1:] Y[::2] = y Y[1::2] = y[:-1] - return X,Y + return X, Y -def make_wave_data( x,dtype='x'): + +def make_wave_data(x, dtype="x"): import numpy as np - x=np.array(x) - X = np.zeros( len(x)*2 -1, dtype = object ) + + x = np.array(x) + X = np.zeros(len(x) * 2 - 1, dtype=object) X[::2] = x - if dtype=='x': + if dtype == "x": X[1::2] = x[1:] else: X[1::2] = x[:-1] return X -def get_pvlist_values(PVs,labels): - '''get current PV-list, PV-Label-list, - Return a dict - { label: value}''' - dict_tv ={} - N= len(PVs) +def get_pvlist_values(PVs, labels): + """get current PV-list, PV-Label-list, + Return a dict + { label: value}""" + dict_tv = {} + N = len(PVs) for i in range(N): - dict_tv[ labels[i]] = get_pv_value( PVs[i] ) + dict_tv[labels[i]] = get_pv_value(PVs[i]) return dict_tv -def get_pv_value( PV ): - '''get current PV-list, PV-Label-list, - return PV-value''' +def get_pv_value(PV): + """get current PV-list, PV-Label-list, + return PV-value""" import epics as ep - return ep.caget( PV ) + return ep.caget(PV) -def get_archive_pvlist_values(PVs, labels, start_time, end_time, - limit=100000,interpolation='raw'): - '''get PV-list, PV-Label-list from archiver from start_time to end_time, - Return: - a dict { label: - pandas.DataFrame with keys as time-str, time-float, label-values - } - ''' + +def get_archive_pvlist_values( + PVs, labels, start_time, end_time, limit=100000, interpolation="raw" +): + """get PV-list, PV-Label-list from archiver from start_time to end_time, + Return: + a dict { label: + pandas.DataFrame with keys as time-str, time-float, label-values + } + """ from channelarchiver import Archiver - archiver = Archiver('http://xf11id-ca.cs.nsls2.local/cgi-bin/ArchiveDataServer.cgi') - archiver_acc = Archiver('http://ca.cs.nsls2.local:8888/cgi-bin/ArchiveDataServer.cgi') - #archiver.scan_archives() + archiver = Archiver("http://xf11id-ca.cs.nsls2.local/cgi-bin/ArchiveDataServer.cgi") + archiver_acc = Archiver( + "http://ca.cs.nsls2.local:8888/cgi-bin/ArchiveDataServer.cgi" + ) - dict_tv ={} - N= len(PVs) - for i in range(N): - PV=PVs[i] - label=labels[i] - if i==0:scan_archives=True - else:scan_archives=False + # archiver.scan_archives() - scan_archives=True - dict_tv[label]=get_archive_pv_value(PV, label, start_time, end_time, - scan_archives=scan_archives, limit=limit, - interpolation=interpolation,archiver=[archiver,archiver_acc] ) + dict_tv = {} + N = len(PVs) + for i in range(N): + PV = PVs[i] + label = labels[i] + if i == 0: + scan_archives = True + else: + scan_archives = False + + scan_archives = True + dict_tv[label] = get_archive_pv_value( + PV, + label, + start_time, + end_time, + scan_archives=scan_archives, + limit=limit, + interpolation=interpolation, + archiver=[archiver, archiver_acc], + ) return dict_tv -def get_archive_pv_value(PV, label, start_time, end_time,scan_archives=True, - limit=100000,interpolation='raw',archiver=None,): - - '''get PV, PV-Label from archiver from start_time to end_time +def get_archive_pv_value( + PV, + label, + start_time, + end_time, + scan_archives=True, + limit=100000, + interpolation="raw", + archiver=None, +): + """get PV, PV-Label from archiver from start_time to end_time Return: pandas.DataFrame with keys as time-str, time-float, label-values - ''' + """ import pandas as pd import numpy as np - import time - if archiver is None: + if archiver is None: from channelarchiver import Archiver - archiver = [Archiver('http://xf11id-ca.cs.nsls2.local/cgi-bin/ArchiveDataServer.cgi')] - #if scan_archives: - #archiver.scan_archives() - if label[:3] == 'Acc':arch= archiver[1] - else:arch= archiver[0] - data = arch.get(PV, start_time, end_time,scan_archives=scan_archives, - limit=limit,interpolation=interpolation ) + + archiver = [ + Archiver("http://xf11id-ca.cs.nsls2.local/cgi-bin/ArchiveDataServer.cgi") + ] + # if scan_archives: + # archiver.scan_archives() + if label[:3] == "Acc": + arch = archiver[1] + else: + arch = archiver[0] + data = arch.get( + PV, + start_time, + end_time, + scan_archives=scan_archives, + limit=limit, + interpolation=interpolation, + ) td = np.array(data.times) - v =np.array(data.values, dtype = float) - td = make_wave_data( td,dtype='x') - v = make_wave_data( v,dtype='y') + v = np.array(data.values, dtype=float) + td = make_wave_data(td, dtype="x") + v = make_wave_data(v, dtype="y") tf = trans_td_to_tf(td) - tv = np.array([ td, tf, v] ).T - index = np.arange(len(tv) ) + tv = np.array([td, tf, v]).T + index = np.arange(len(tv)) data = tv - df = pd.DataFrame( data, index=index, columns= ['td','tf',label] ) + df = pd.DataFrame(data, index=index, columns=["td", "tf", label]) - print('seraching %s from: %s---to: %s'%(label, start_time, end_time)) - print('Found results: from: %s to :%s with %s points.' %( - df.td[0],df.td[ len(df.td)-1], len(df.td))) + print("searching %s from: %s---to: %s" % (label, start_time, end_time)) + print( + "Found results: from: %s to :%s with %s points." + % (df.td[0], df.td[len(df.td) - 1], len(df.td)) + ) return df -def read_scan( sid,fill=True ): - ''' read data from sid = scan_id''' +def read_scan(sid, fill=True): + """read data from sid = scan_id""" - from dataportal import (DataBroker as db, - StepScan as ss, - StepScan, DataBroker, - DataMuxer as dm) + from dataportal import DataMuxer as dm import datetime hdr = db[sid] - ev=db.fetch_events(hdr,fill=fill) #, fill=True) - muxer = dm.from_events( ev ) + ev = db.fetch_events(hdr, fill=fill) # , fill=True) + muxer = dm.from_events(ev) data = muxer.to_sparse_dataframe() dt = data.time - #print dt - #print dt[0] - #print dt[len(dt)-1] - #data = list( db.fetch_events(hdr)) + # print dt + # print dt[0] + # print dt[len(dt)-1] + # data = list( db.fetch_events(hdr)) - t1 = datetime.datetime.fromtimestamp(dt[0]).strftime('%Y-%m-%d %H:%M:%S') - t2 = datetime.datetime.fromtimestamp(dt[len(dt)-1]).strftime('%Y-%m-%d %H:%M:%S') + t1 = datetime.datetime.fromtimestamp(dt[0]).strftime("%Y-%m-%d %H:%M:%S") + t2 = datetime.datetime.fromtimestamp(dt[len(dt) - 1]).strftime("%Y-%m-%d %H:%M:%S") - #t1 = dt[0].strftime('%Y-%m-%d %H:%M:%S') - #t2 = dt[len(dt)-1].strftime('%Y-%m-%d %H:%M:%S') + # t1 = dt[0].strftime('%Y-%m-%d %H:%M:%S') + # t2 = dt[len(dt)-1].strftime('%Y-%m-%d %H:%M:%S') - print('the first scan time is: %s'%t1) - print('the last scan time is: %s'%t2) - start_time=t1 - end_time =t2 + print("the first scan time is: %s" % t1) + print("the last scan time is: %s" % t2) + start_time = t1 + end_time = t2 return data, start_time, end_time -#w,t = get_waterfall( data,direction='vert', cuts=[1000],firstim=0, lastim=1200,detector="xray_cam_img_image_lightfield",) +# w,t = get_waterfall( data,direction='vert', cuts=[1000],firstim=0, lastim=1200,detector="xray_cam_img_image_lightfield",) + +# show_img(w[1000].T,aspect=1.2, ylim=[700,1400]) -#show_img(w[1000].T,aspect=1.2, ylim=[700,1400]) -def get_waterfall( data,direction='horz', cuts=[1,2,3],firstim=0, - lastim=1,detector="xray_cam_img_image_lightfield",): +def get_waterfall( + data, + direction="horz", + cuts=[1, 2, 3], + firstim=0, + lastim=1, + detector="xray_cam_img_image_lightfield", +): import numpy as np - imcount=firstim - #im_time=[] - waterfall={} - notime = lastim +1-firstim + imcount = firstim + # im_time=[] + waterfall = {} + notime = lastim + 1 - firstim in_time = data.time imgs = data[detector] - for n in range(0,notime): - t= firstim + n - #im_time.append( data[t].time ) - dat=imgs[n] - #print t, data[t].time - #dat = data[t].data[detector] + for n in range(0, notime): + t = firstim + n + # im_time.append( data[t].time ) + dat = imgs[n] + # print t, data[t].time + # dat = data[t].data[detector] for i in cuts: - if t==0:waterfall[i] = [] - if direction=='horz': - cdat=dat[i,:]; - elif direction=='vert': - cdat=dat[:,i]; - waterfall[i].append( cdat ) - for i in cuts:waterfall[i]=np.array(waterfall[i]) + if t == 0: + waterfall[i] = [] + if direction == "horz": + cdat = dat[i, :] + elif direction == "vert": + cdat = dat[:, i] + waterfall[i].append(cdat) + for i in cuts: + waterfall[i] = np.array(waterfall[i]) return waterfall, in_time -def get_img_waterfall( imgs,direction='horz', cuts=[1,2,3],firstim=0, lastim=1, - ): +def get_img_waterfall( + imgs, + direction="horz", + cuts=[1, 2, 3], + firstim=0, + lastim=1, +): import numpy as np - imcount=firstim - #im_time=[] - waterfall={} - notime = lastim +1-firstim - #in_time = data.time - #imgs = data[detector] - for n in range(0,notime): - t= firstim + n - #im_time.append( data[t].time ) - dat=imgs[n] - #print t, data[t].time - #dat = data[t].data[detector] + imcount = firstim + # im_time=[] + waterfall = {} + notime = lastim + 1 - firstim + # in_time = data.time + # imgs = data[detector] + for n in range(0, notime): + t = firstim + n + # im_time.append( data[t].time ) + dat = imgs[n] + # print t, data[t].time + # dat = data[t].data[detector] for i in cuts: - if t==0:waterfall[i] = [] - if direction=='horz': - cdat=dat[i,:]; - elif direction=='vert': - cdat=dat[:,i]; - waterfall[i].append( cdat ) - for i in cuts:waterfall[i]=np.array(waterfall[i]) + if t == 0: + waterfall[i] = [] + if direction == "horz": + cdat = dat[i, :] + elif direction == "vert": + cdat = dat[:, i] + waterfall[i].append(cdat) + for i in cuts: + waterfall[i] = np.array(waterfall[i]) return waterfall -def line_focus(filename,imgs=None,times=None, direction='horz', - cuts=[1,2,3], firstim=0, lastim=1, - ): +def line_focus( + filename, + imgs=None, + times=None, + direction="horz", + cuts=[1, 2, 3], + firstim=0, + lastim=1, +): """ LW 04/06/2015 function to analyse line focus data for e.g. stability """ from PIL import Image - import Tkinter, tkFileDialog + import Tkinter + import tkFileDialog from matplotlib import pyplot as plt import numpy as np from scipy.optimize import curve_fit - #import time - import os.path, time - import pandas as pd - if filename=='ia': - if filename=='ia': # open file dialog + + # import time + import os.path + import time + + if filename == "ia": + if filename == "ia": # open file dialog root = Tkinter.Tk() root.withdraw() file_path = tkFileDialog.askopenfilename() - else: file_path=filename + else: + file_path = filename print(file_path) # extract filename and first image number: - a=file_path.split('/') - fn=a[len(a)-1].split('.')[0] # file name - b=fn.split('.') - #fe=b[len(b)-1] # file ending, e.g .tiff - fe=a[len(a)-1].split('.')[1] - c=fn.split('_') - fnum=c[len(c)-1].split('.')[0] # file number - nlen=len(fnum) # length of number format - fp=file_path.split(fn)[0] - fn=fn.split('_'+fnum)[0] - print('file path: ',fp) - print('file name: ',fn) - print('file ending: ',fe) - print('file number: ',fnum) - - if filename=='ia': # get first image number from filebame in interactive mode - firstim=int(fnum) - #lastim=30 # maximum number of images to search in autmatic mode - - imcount=firstim - #print 'imcount: ',imcount - #print 'firstim: ',firstim - get_out=0 - image_data=0 - baseline=[] - amp=[] - center=[] - width=[] - im_time=[] + a = file_path.split("/") + fn = a[len(a) - 1].split(".")[0] # file name + b = fn.split(".") + # fe=b[len(b)-1] # file ending, e.g .tiff + fe = a[len(a) - 1].split(".")[1] + c = fn.split("_") + fnum = c[len(c) - 1].split(".")[0] # file number + nlen = len(fnum) # length of number format + fp = file_path.split(fn)[0] + fn = fn.split("_" + fnum)[0] + print("file path: ", fp) + print("file name: ", fn) + print("file ending: ", fe) + print("file number: ", fnum) + + if filename == "ia": # get first image number from filename in interactive mode + firstim = int(fnum) + # lastim=30 # maximum number of images to search in automatic mode + + imcount = firstim + # print 'imcount: ',imcount + # print 'firstim: ',firstim + get_out = 0 + image_data = 0 + baseline = [] + amp = [] + center = [] + width = [] + im_time = [] while imcount <= lastim: - ic=str(imcount) - while len(ic) < nlen: # create file number - ic='0'+ic - #ic=str(ic) - #print ic - #assemble the image name to be loaded: - file_to_open=fp+fn+'_'+ic+'.'+fe + ic = str(imcount) + while len(ic) < nlen: # create file number + ic = "0" + ic + # ic=str(ic) + # print ic + # assemble the image name to be loaded: + file_to_open = fp + fn + "_" + ic + "." + fe try: - im = Image.open(file_to_open); - (mode, ino, dev, nlink, uid, gid, size, atime, mtime, ctime) = os.stat(file_to_open) + im = Image.open(file_to_open) + (mode, ino, dev, nlink, uid, gid, size, atime, mtime, ctime) = os.stat( + file_to_open + ) im_time.append(time.ctime(mtime)) - #im.show() - #im.show() - print('loaded file: ',file_to_open) + # im.show() + # im.show() + print("loaded file: ", file_to_open) except: - print('waiting for file: '+file_to_open) - #plt.close(1) # use the waiting time to plot some intermediate result -> does not work, as python is waiting until the end before printing.... - #plt.figure(1) - #imgplot= plt.imshow(image_data/(imcount-firstim+1),origin='lower') - #imgplot.set_cmap('gray') + print("waiting for file: " + file_to_open) + # plt.close(1) # use the waiting time to plot some intermediate result -> does not work, as python is waiting until the end before printing.... + # plt.figure(1) + # imgplot= plt.imshow(image_data/(imcount-firstim+1),origin='lower') + # imgplot.set_cmap('gray') time.sleep(5) try: - im = Image.open(file_to_open); - (mode, ino, dev, nlink, uid, gid, size, atime, mtime, ctime) = os.stat(file_to_open) + im = Image.open(file_to_open) + (mode, ino, dev, nlink, uid, gid, size, atime, mtime, ctime) = ( + os.stat(file_to_open) + ) im_time.append(time.ctime(mtime)) - #im.show() - except:get_out=1 - if get_out==1: - return image_data ### THIS NEEDS TO BE CHANGED!!! - plt.close(1) # only plot at the end for now: too much overhead to plot every iteration + # im.show() + except: + get_out = 1 + if get_out == 1: + return image_data ### THIS NEEDS TO BE CHANGED!!! + plt.close( + 1 + ) # only plot at the end for now: too much overhead to plot every iteration plt.figure(1) - raise CHX_utilities_Exception('no more images found, images summed up to: '+file_to_open) + raise CHX_utilities_Exception( + "no more images found, images summed up to: " + file_to_open + ) # do the analysis - im=np.array(im)*1.0; - #print 'shape of image: ',im.shape - #plt.figure(1) - #plt.imshow(im) + im = np.array(im) * 1.0 + # print 'shape of image: ',im.shape + # plt.figure(1) + # plt.imshow(im) try: - dat=np.sum(im,axis=2); # sum RGB channels for a color image + dat = np.sum(im, axis=2) # sum RGB channels for a color image except: - dat=im; + dat = im for i in cuts: - if direction=='horz': - cdat=dat[i,:]; - elif direction=='vert': - cdat=dat[:,i]; - #print cdat; - else: raise CHX_utilities_Exception('error: cut direction must be either "horz" or "vert".') + if direction == "horz": + cdat = dat[i, :] + elif direction == "vert": + cdat = dat[:, i] + # print cdat; + else: + raise CHX_utilities_Exception( + 'error: cut direction must be either "horz" or "vert".' + ) # create initial guess: - yo=np.mean(cdat) - A=np.max(cdat) - xc=np.argmax(cdat)*1.0 - w=8 - p0=[yo,A,xc,w] + yo = np.mean(cdat) + A = np.max(cdat) + xc = np.argmax(cdat) * 1.0 + w = 8 + p0 = [yo, A, xc, w] try: - coeff,var_matrix = curve_fit(gauss,np.array(xrange(len(cdat))),cdat,p0=p0); - except: coeff=[0,0,0,0] + coeff, var_matrix = curve_fit( + gauss, np.array(xrange(len(cdat))), cdat, p0=p0 + ) + except: + coeff = [0, 0, 0, 0] baseline.append(coeff[0]) amp.append(coeff[1]) center.append(coeff[2]) width.append(coeff[3]) - imcount=imcount+1 + imcount = imcount + 1 - if filename=='im': - imcount=firstim + if filename == "im": + imcount = firstim im_time = times - baseline=[] - amp=[] - center=[] - width=[] - notime = lastim +1-firstim + baseline = [] + amp = [] + center = [] + width = [] + notime = lastim + 1 - firstim - for n in range(0,notime): - t= firstim + n - #im_time.append( data[t].time ) - im=imgs[t] + for n in range(0, notime): + t = firstim + n + # im_time.append( data[t].time ) + im = imgs[t] try: - dat=np.sum(im,axis=2); # sum RGB channels for a color image + dat = np.sum(im, axis=2) # sum RGB channels for a color image except: - dat=im; + dat = im for i in cuts: - if direction=='horz': - cdat=dat[i,:]; - elif direction=='vert': - cdat=dat[:,i]; - #print cdat; - else: raise CHX_utilities_Exception('error: cut direction must be either "horz" or "vert".') + if direction == "horz": + cdat = dat[i, :] + elif direction == "vert": + cdat = dat[:, i] + # print cdat; + else: + raise CHX_utilities_Exception( + 'error: cut direction must be either "horz" or "vert".' + ) # create initial guess: - yo=np.mean(cdat) - A=np.max(cdat) - xc=np.argmax(cdat)*1.0 - w=8 - p0=[yo,A,xc,w] + yo = np.mean(cdat) + A = np.max(cdat) + xc = np.argmax(cdat) * 1.0 + w = 8 + p0 = [yo, A, xc, w] try: - coeff,var_matrix = curve_fit(gauss, - np.array(xrange(len(cdat))),cdat,p0=p0); - except: coeff=[0,0,0,0] + coeff, var_matrix = curve_fit( + gauss, np.array(xrange(len(cdat))), cdat, p0=p0 + ) + except: + coeff = [0, 0, 0, 0] baseline.append(coeff[0]) amp.append(coeff[1]) center.append(coeff[2]) width.append(coeff[3]) - if filename=='wt': - imcount=firstim + if filename == "wt": + imcount = firstim im_time = times - baseline=[] - amp=[] - center=[] - width=[] - notime = lastim +1-firstim - - for n in range(0,notime): - t= firstim + n - #im_time.append( data[t].time ) - im=imgs[ :,t] + baseline = [] + amp = [] + center = [] + width = [] + notime = lastim + 1 - firstim + + for n in range(0, notime): + t = firstim + n + # im_time.append( data[t].time ) + im = imgs[:, t] try: - dat=np.sum(im,axis=2); # sum RGB channels for a color image + dat = np.sum(im, axis=2) # sum RGB channels for a color image except: - dat=im; + dat = im for i in cuts: - cdat=im + cdat = im - #else: raise CHX_utilities_Exception('error: cut direction must be either "horz" or "vert".') + # else: raise CHX_utilities_Exception('error: cut direction must be either "horz" or "vert".') # create initial guess: - yo=np.mean(cdat) - A=np.max(cdat) - xc=np.argmax(cdat)*1.0 - w=8 - p0=[yo,A,xc,w] + yo = np.mean(cdat) + A = np.max(cdat) + xc = np.argmax(cdat) * 1.0 + w = 8 + p0 = [yo, A, xc, w] try: - coeff,var_matrix = curve_fit(gauss, - np.array( xrange( len(cdat) )),cdat,p0=p0); - except: coeff=[0,0,0,0] + coeff, var_matrix = curve_fit( + gauss, np.array(xrange(len(cdat))), cdat, p0=p0 + ) + except: + coeff = [0, 0, 0, 0] baseline.append(coeff[0]) amp.append(coeff[1]) center.append(coeff[2]) width.append(coeff[3]) - #df_cen = trans_data_to_pd([center],label=['center'],dtype='list') + # df_cen = trans_data_to_pd([center],label=['center'],dtype='list') - return np.array(baseline),np.array(amp),np.array(center),np.array(width),np.array(im_time); + return ( + np.array(baseline), + np.array(amp), + np.array(center), + np.array(width), + np.array(im_time), + ) -def show_fit( data, para, func = gauss,title=None,ax=None): +def show_fit(data, para, func=gauss, title=None, ax=None): import matplotlib.pyplot as plt import numpy as np - x,y = data + + x, y = data x = np.array(x) - y=np.array(y) - #print 'here' - x0 = np.linspace( x.min(),x.max(),2000) - fit = func(x0,*para) - if ax is None:fig,ax=plt.subplots( ) - if title is not None:ax.set_title( title ) - ax.plot( x,y,'bo') - ax.plot(x0,fit,'r',ls='-') - #plt.show() - - -def show_focus_line_fit( waterfall, para, lists=None,title=None): + y = np.array(y) + # print 'here' + x0 = np.linspace(x.min(), x.max(), 2000) + fit = func(x0, *para) + if ax is None: + fig, ax = plt.subplots() + if title is not None: + ax.set_title(title) + ax.plot(x, y, "bo") + ax.plot(x0, fit, "r", ls="-") + # plt.show() + + +def show_focus_line_fit(waterfall, para, lists=None, title=None): import matplotlib.pyplot as plt + L, t = waterfall.shape if lists is None: - lists=[] - #lists.append( [t/4,t/2,3*t/4]) - lists.append( [t/5,2*t/5,3*t/5,4*t/5]) - #print lists - fig,ax=plt.subplots( len(lists[0]) ) - #if title is not None:plt.title( title ) - #print title - for i,l in enumerate(lists[0]): - im=waterfall[:,l] - p = para[i:i+1].values[0] - #print i,l - show_fit( [range(L), im], p,ax=ax[i], title =title+'_time@_%s'%l) + lists = [] + # lists.append( [t/4,t/2,3*t/4]) + lists.append([t / 5, 2 * t / 5, 3 * t / 5, 4 * t / 5]) + # print lists + fig, ax = plt.subplots(len(lists[0])) + # if title is not None:plt.title( title ) + # print title + for i, l in enumerate(lists[0]): + im = waterfall[:, l] + p = para[i : i + 1].values[0] + # print i,l + show_fit([range(L), im], p, ax=ax[i], title=title + "_time@_%s" % l) def test_fft(): import matplotlib.pyplot as plt import numpy as np - fig, ax = plt.subplots(2) - t=np.linspace(0,20,1000) - y= .5*np.sin(2*np.pi*5*t) + 2*np.sin(2*np.pi*t) + 1*np.sin(2*np.pi*14*t) - ax[0].plot(t,y,'ro',ls='--') - fx,fy=get_fft( t,y) - ax[1].plot(fx,fy,'go',ls='--') + fig, ax = plt.subplots(2) + t = np.linspace(0, 20, 1000) + y = ( + 0.5 * np.sin(2 * np.pi * 5 * t) + + 2 * np.sin(2 * np.pi * t) + + 1 * np.sin(2 * np.pi * 14 * t) + ) + ax[0].plot(t, y, "ro", ls="--") + + fx, fy = get_fft(t, y) + ax[1].plot(fx, fy, "go", ls="--") plt.show() -def get_fft( t,y ): +def get_fft(t, y): import scipy import scipy.fftpack import numpy as np L = len(t) tp = np.array(t) - yp=np.array( y) - dx = (t[L-1] - t[0])/float(L) - tm= 2 * L * dx - xs = 1/ tm - #print ts,xs, tm - #fx = xs * np.arange( L ) + yp = np.array(y) + dx = (t[L - 1] - t[0]) / float(L) + tm = 2 * L * dx + xs = 1 / tm + # print ts,xs, tm + # fx = xs * np.arange( L ) - y= np.concatenate( ( y, np.zeros( L ))) + y = np.concatenate((y, np.zeros(L))) FFT = abs(scipy.fft(y))[:L] - freqs = scipy.fftpack.fftfreq(y.size, dx)[:L]# - #FFT = np.fft.fft(y) - #freqs = np.fft.fftfreq(len(y), dx )## + freqs = scipy.fftpack.fftfreq(y.size, dx)[:L] # + # FFT = np.fft.fft(y) + # freqs = np.fft.fftfreq(len(y), dx )## # Find the peak in the coefficients idx = np.argmax(np.abs(FFT)) freq = freqs[idx] - freq_in_hertz = abs(freq ) #* dx) - print('The maximum frequency is: %s'%(freq_in_hertz ))## - #freqs = trans_data_to_pd([freqs], label=['freqs'],dtype='list')## - #20*scipy.log10(FFT) - #FFT = trans_data_to_pd([ 20*scipy.log10(FFT) ], label=label,dtype='list') - #FFT = trans_data_to_pd([ (FFT) ], label=label,dtype='list')## - return np.array(freqs), np.array( FFT ) - - -def plot_line_focus2(df_res,cuts,pix=0.79,title=None, - times=None, ROI=None, FFT=True): - from matplotlib import pyplot as plt + freq_in_hertz = abs(freq) # * dx) + print("The maximum frequency is: %s" % (freq_in_hertz)) ## + # freqs = trans_data_to_pd([freqs], label=['freqs'],dtype='list')## + # 20*scipy.log10(FFT) + # FFT = trans_data_to_pd([ 20*scipy.log10(FFT) ], label=label,dtype='list') + # FFT = trans_data_to_pd([ (FFT) ], label=label,dtype='list')## + return np.array(freqs), np.array(FFT) + + +def plot_line_focus2( + df_res, cuts, pix=0.79, title=None, times=None, ROI=None, FFT=True +): import numpy as np - import time,datetime import pandas.tools.plotting as ptg import matplotlib.dates as md - import pandas as pd + if ROI is not None: - x1,x2=ROI + x1, x2 = ROI else: - x1,x2=0,len(times) + x1, x2 = 0, len(times) times_ = times[x1:x2] - df_res_ = df_res.loc[x1:x2-1] - df_res_.index = range( 0, x2-x1 ) + df_res_ = df_res.loc[x1 : x2 - 1] + df_res_.index = range(0, x2 - x1) - N=len(times_) + N = len(times_) keys = df_res.keys() M = len(keys) - td=times_ - tf = trans_td_to_tf( td ) + td = times_ + tf = trans_td_to_tf(td) - if title==None:title='LF_' - fig,axs=ptg._subplots( M, sharex=True,sharey=False, - layout=[M,1]) + if title == None: + title = "LF_" + fig, axs = ptg._subplots(M, sharex=True, sharey=False, layout=[M, 1]) axs[0].set_title(title) for n in range(M): - df_res_.plot( x=td, y = [ keys[n] ],subplots=False, - marker='o',c='b', ls = '--', ax=axs[n]) - - #do analysis _center - - fig2,axs2=ptg._subplots( 2, sharex=True,sharey=True, - layout=[2,1]) - - yc=df_res_.center - #_df_res_ = df_res - df_res_['cen_pos'] = pix*(yc - yc.mean()) - df_res_.plot( x=td, y = [ 'cen_pos' ],subplots=False, - title=title+'center position', - marker='o',c='k', ls = '--', ax=axs2[0],label=str(cuts[0])) - - x=xrange(N) - y= yc - pol=np.polyfit(x,y,20) #20) - polfit=np.polyval(pol,x) - yfitted=y-polfit - - df_res_['cen_fitted'] = (yfitted-np.mean(yfitted))*pix - #print dd.shape,df_res.cen_fitted.shape - i=0 - mean = ( polfit- np.mean(polfit) )*pix - std = mean.std() - #label=str(str(cuts[i]) +' PV: '+str(round(max(polfit)-min(polfit),2))+ 'um rms: +/-'+str( round(std0,2))+'um' ) - - label=str(str(cuts[i]) - +' Center: '+str( round(max(polfit)-min(polfit),2) )+ 'um rms: +/-' - +str( round(std,2))+'um' ) - - axs2[0].plot( td, np.zeros( len(td) ) + std, c='red',ls='--',lw=4) - axs2[0].plot( td, np.zeros( len(td) )- std, c='red',ls='--',lw=4) - - axs2[0].text( .3,.9,label,transform=axs2[0].transAxes) - df_res_['cen_polyfit'] = (polfit-np.mean(polfit))*pix - - df_res_.plot( x=td, y = [ 'cen_polyfit' ],subplots=False, - title=title+'Drift of center position', - marker='',c='b', ls = '-', lw=3, ax=axs2[0],label=label) - - #label=str( str(cuts[i]) +' PV: '+ str(round(max(yfitted)-min(yfitted),2)) + 'um rms: +/-'+str(round(( (yfitted-np.mean(yfitted))*pix ).std(),2))+'um' ) - - mean = ( yfitted- np.mean(yfitted) )*pix - std = mean.std() - label=str(str(cuts[i]) - +'PV: '+str( round(max(polfit)-min(polfit),2) )+ 'um rms: +/-' - +str( round(std,2))+'um' ) - - axs2[1].text( .3,.9,label,transform=axs2[1].transAxes) - axs2[1].plot( td, np.zeros( len(td) ) + std, c='red',ls='--',lw=4) - axs2[1].plot( td, np.zeros( len(td) ) - std, c='red',ls='--',lw=4) - - df_res_.plot( x=td, y = [ 'cen_fitted' ],subplots=False, - title=title+'drift corrected vibrations', - marker='+',c='b', ls = '-', ax=axs2[1],label=label) - - axs2[1].set_xlabel('time') - xfmt = md.DateFormatter('%m-%d %H:%M') + df_res_.plot( + x=td, y=[keys[n]], subplots=False, marker="o", c="b", ls="--", ax=axs[n] + ) + + # do analysis _center + + fig2, axs2 = ptg._subplots(2, sharex=True, sharey=True, layout=[2, 1]) + + yc = df_res_.center + # _df_res_ = df_res + df_res_["cen_pos"] = pix * (yc - yc.mean()) + df_res_.plot( + x=td, + y=["cen_pos"], + subplots=False, + title=title + "center position", + marker="o", + c="k", + ls="--", + ax=axs2[0], + label=str(cuts[0]), + ) + + x = xrange(N) + y = yc + pol = np.polyfit(x, y, 20) # 20) + polfit = np.polyval(pol, x) + yfitted = y - polfit + + df_res_["cen_fitted"] = (yfitted - np.mean(yfitted)) * pix + # print dd.shape,df_res.cen_fitted.shape + i = 0 + mean = (polfit - np.mean(polfit)) * pix + std = mean.std() + # label=str(str(cuts[i]) +' PV: '+str(round(max(polfit)-min(polfit),2))+ 'um rms: +/-'+str( round(std0,2))+'um' ) + + label = str( + str(cuts[i]) + + " Center: " + + str(round(max(polfit) - min(polfit), 2)) + + "um rms: +/-" + + str(round(std, 2)) + + "um" + ) + + axs2[0].plot(td, np.zeros(len(td)) + std, c="red", ls="--", lw=4) + axs2[0].plot(td, np.zeros(len(td)) - std, c="red", ls="--", lw=4) + + axs2[0].text(0.3, 0.9, label, transform=axs2[0].transAxes) + df_res_["cen_polyfit"] = (polfit - np.mean(polfit)) * pix + + df_res_.plot( + x=td, + y=["cen_polyfit"], + subplots=False, + title=title + "Drift of center position", + marker="", + c="b", + ls="-", + lw=3, + ax=axs2[0], + label=label, + ) + + # label=str( str(cuts[i]) +' PV: '+ str(round(max(yfitted)-min(yfitted),2)) + 'um rms: +/-'+str(round(( (yfitted-np.mean(yfitted))*pix ).std(),2))+'um' ) + + mean = (yfitted - np.mean(yfitted)) * pix + std = mean.std() + label = str( + str(cuts[i]) + + "PV: " + + str(round(max(polfit) - min(polfit), 2)) + + "um rms: +/-" + + str(round(std, 2)) + + "um" + ) + + axs2[1].text(0.3, 0.9, label, transform=axs2[1].transAxes) + axs2[1].plot(td, np.zeros(len(td)) + std, c="red", ls="--", lw=4) + axs2[1].plot(td, np.zeros(len(td)) - std, c="red", ls="--", lw=4) + + df_res_.plot( + x=td, + y=["cen_fitted"], + subplots=False, + title=title + "drift corrected vibrations", + marker="+", + c="b", + ls="-", + ax=axs2[1], + label=label, + ) + + axs2[1].set_xlabel("time") + xfmt = md.DateFormatter("%m-%d %H:%M") axs2[1].xaxis.set_major_formatter(xfmt) - axs[n].set_xlabel('time') - xfmt = md.DateFormatter('%m-%d %H:%M') + axs[n].set_xlabel("time") + xfmt = md.DateFormatter("%m-%d %H:%M") axs[n].xaxis.set_major_formatter(xfmt) if FFT: - x,y0 = get_fft( tf, df_res_['cen_pos']) - x,y1 = get_fft( tf, df_res_['cen_fitted']) - x,y2 = get_fft( tf, df_res_['cen_polyfit']) + x, y0 = get_fft(tf, df_res_["cen_pos"]) + x, y1 = get_fft(tf, df_res_["cen_fitted"]) + x, y2 = get_fft(tf, df_res_["cen_polyfit"]) df_y = trans_data_to_pd( - [x,y0,y1,y2],label=['freqs, Hz','fft_cen_pos', - 'fft_cen_fitted','fft_cen_polyfit' - ],dtype='list') + [x, y0, y1, y2], + label=["freqs, Hz", "fft_cen_pos", "fft_cen_fitted", "fft_cen_polyfit"], + dtype="list", + ) - df_y.plot(subplots=True,x='freqs, Hz',logx=True, y=['fft_cen_pos', - 'fft_cen_fitted','fft_cen_polyfit'], title=title + 'FFT') + df_y.plot( + subplots=True, + x="freqs, Hz", + logx=True, + y=["fft_cen_pos", "fft_cen_fitted", "fft_cen_polyfit"], + title=title + "FFT", + ) - #return df_y + # return df_y -def plot_line_focus(res,cuts,pix=1,epochtime='false', times=None): +def plot_line_focus(res, cuts, pix=1, epochtime="false", times=None): """ LW 04/06/2015 function to plot results of line focus analysis """ from matplotlib import pyplot as plt import numpy as np - import time from dateutil import parser - base=np.array(res[0]) - amp=np.array(res[1]) - center=np.array(res[2]) - width=np.array(res[3]) - epoch=[] + + base = np.array(res[0]) + amp = np.array(res[1]) + center = np.array(res[2]) + width = np.array(res[3]) + epoch = [] plt.close() if times is None: for i in xrange(len(res[4][:])): - dt =parser.parse(res[4][i]) - epoch.append(dt.strftime('%s')) - else:epoch=times + dt = parser.parse(res[4][i]) + epoch.append(dt.strftime("%s")) + else: + epoch = times - epoch=np.array(epoch) - plt.figure(1) #### plot and anlyze center position + epoch = np.array(epoch) + plt.figure(1) #### plot and analyze center position for i in xrange(len(cuts)): - y=center[i::len(cuts)] - #print y - if epochtime=='false': - x=xrange(len(y)) - x_str='image #' + y = center[i :: len(cuts)] + # print y + if epochtime == "false": + x = xrange(len(y)) + x_str = "image #" else: - x=epoch - x_str='epoch [s]' - plt.plot(x,(y-np.mean(y))*pix,'+',label=str(cuts[i])) - plt.legend(loc='best') - plt.title('center position') - plt.ylabel('x_0 - [um]') + x = epoch + x_str = "epoch [s]" + plt.plot(x, (y - np.mean(y)) * pix, "+", label=str(cuts[i])) + plt.legend(loc="best") + plt.title("center position") + plt.ylabel("x_0 - [um]") plt.xlabel(x_str) plt.figure(5) - plt.figure(6) ##### plot and analyze center short time vibrations + plt.figure(6) ##### plot and analyze center short time vibrations for i in xrange(len(cuts)): - y=np.array(center[i::len(cuts)]) - #print y - if epochtime=='false': - x=xrange(len(y)) - x_str='image #' + y = np.array(center[i :: len(cuts)]) + # print y + if epochtime == "false": + x = xrange(len(y)) + x_str = "image #" else: - #x=epoch - x=xrange(len(y)) - x_str='epoch [s]' + # x=epoch + x = xrange(len(y)) + x_str = "epoch [s]" - pol=np.polyfit(x,y,20) - polfit=np.polyval(pol,x) - yfitted=y-polfit + pol = np.polyfit(x, y, 20) + polfit = np.polyval(pol, x) + yfitted = y - polfit plt.figure(5) - plt.plot(epoch,(yfitted-np.mean(yfitted))*pix,'+-', - label=str(str(cuts[i])+' PV: '+str(round(max(yfitted)-min(yfitted),2))+ - 'um rms: +/-'+str(round(((yfitted-np.mean(yfitted))*pix).std(),2))+'um')) + plt.plot( + epoch, + (yfitted - np.mean(yfitted)) * pix, + "+-", + label=str( + str(cuts[i]) + + " PV: " + + str(round(max(yfitted) - min(yfitted), 2)) + + "um rms: +/-" + + str(round(((yfitted - np.mean(yfitted)) * pix).std(), 2)) + + "um" + ), + ) plt.figure(6) - plt.plot(epoch,(polfit-np.mean(polfit))*pix, - label=str(str(cuts[i])+' PV: '+str(round(max(polfit)-min(polfit),2))+ - 'um rms: +/-'+str(round(((polfit-np.mean(polfit))*pix).std(),2))+'um')) + plt.plot( + epoch, + (polfit - np.mean(polfit)) * pix, + label=str( + str(cuts[i]) + + " PV: " + + str(round(max(polfit) - min(polfit), 2)) + + "um rms: +/-" + + str(round(((polfit - np.mean(polfit)) * pix).std(), 2)) + + "um" + ), + ) plt.figure(5) - plt.legend(loc='best') - plt.title('drift corrected vibrations') - plt.ylabel('x_0 - [um]') + plt.legend(loc="best") + plt.title("drift corrected vibrations") + plt.ylabel("x_0 - [um]") plt.xlabel(x_str) plt.figure(6) - plt.legend(loc='best') - plt.title('Drift of center position') - plt.ylabel('x_0 - [um]') + plt.legend(loc="best") + plt.title("Drift of center position") + plt.ylabel("x_0 - [um]") plt.xlabel(x_str) - plt.figure(6) #### plot and analyze long time drifts + plt.figure(6) #### plot and analyze long time drifts - plt.figure(2) ############ plot and analyze fwhm + plt.figure(2) ############ plot and analyze fwhm for i in xrange(len(cuts)): - y=width[i::len(cuts)] - #print y - y=2.35*y*pix - plt.plot(x,y,'+',label=str(cuts[i])) - print('minimum focus: ',y.min(),'um lowest 10%: ',y[x<(y.min()+.1*y.min())].mean(),'+/-',y[x<(y.min()+.1*y.min())].std(),'um @pixel cut: ',cuts[i]) - plt.legend(loc='best') - plt.title('FWHM') - plt.ylabel('Focus FWHM [um]') + y = width[i :: len(cuts)] + # print y + y = 2.35 * y * pix + plt.plot(x, y, "+", label=str(cuts[i])) + print( + "minimum focus: ", + y.min(), + "um lowest 10%: ", + y[x < (y.min() + 0.1 * y.min())].mean(), + "+/-", + y[x < (y.min() + 0.1 * y.min())].std(), + "um @pixel cut: ", + cuts[i], + ) + plt.legend(loc="best") + plt.title("FWHM") + plt.ylabel("Focus FWHM [um]") plt.xlabel(x_str) plt.figure(3) for i in xrange(len(cuts)): - y=amp[i::len(cuts)] - #print y - #x=xrange(len(y)) - plt.plot(x,y,'+',label=str(cuts[i])) - plt.legend(loc='best') - plt.title('Amplitude') - plt.ylabel('Amplitude [arb.u.]') + y = amp[i :: len(cuts)] + # print y + # x=xrange(len(y)) + plt.plot(x, y, "+", label=str(cuts[i])) + plt.legend(loc="best") + plt.title("Amplitude") + plt.ylabel("Amplitude [arb.u.]") plt.xlabel(x_str) plt.figure(4) for i in xrange(len(cuts)): - y=base[i::len(cuts)] - #print y - #x=xrange(len(y)) - plt.plot(x,y,'+',label=str(cuts[i])) - plt.legend(loc='best') - plt.title('Baseline') - plt.ylabel('y_0 [arb.u.]') + y = base[i :: len(cuts)] + # print y + # x=xrange(len(y)) + plt.plot(x, y, "+", label=str(cuts[i])) + plt.legend(loc="best") + plt.title("Baseline") + plt.ylabel("y_0 [arb.u.]") plt.xlabel(x_str) -def read_camonitor(filename,epoch='true'): - import Tkinter, tkFileDialog - import time +def read_camonitor(filename, epoch="true"): + import Tkinter + import tkFileDialog import numpy as np import datetime - if filename=='ia': # open file dialog + + if filename == "ia": # open file dialog root = Tkinter.Tk() root.withdraw() file_path = tkFileDialog.askopenfilename() - else: file_path=filename - print('loading camonitor data from: ', file_path) - with open(file_path,"r") as cafile: - cadata=cafile.readlines() - #datime=[] - #datdate=[] - value_camon=[] - epoch_camon=[] + else: + file_path = filename + print("loading camonitor data from: ", file_path) + with open(file_path, "r") as cafile: + cadata = cafile.readlines() + # datime=[] + # datdate=[] + value_camon = [] + epoch_camon = [] for i in xrange(len(cadata)): - line=cadata[i].split() - value_camon.append(float(line[len(line)-1])) - datdate=line[len(line)-3] - datime=line[len(line)-2] - #print datdate.split()[0] - #print datime - #print int(datdate.split('-')[0]),int(datdate.split('-')[1]),int(datdate.split('-')[2]),int(datime.split(':')[0]),int(datime.split(':')[1]),int(datime.split(':')[2].split('.')[0]),int(datime.split(':')[2].split('.')[1]) - epoch_camon.append(float(datetime.datetime(int(datdate.split('-')[0]),int(datdate.split('-')[1]),int(datdate.split('-')[2]),int(datime.split(':')[0]),int(datime.split(':')[1]),int(datime.split(':')[2].split('.')[0]),int(datime.split(':')[2].split('.')[1])).strftime('%s'))) - return np.array(epoch_camon),np.array(value_camon) - - -def knife_edge(filename, direction='horz', cuts=[1,2,3], firstim=0, lastim=1,pix=1): + line = cadata[i].split() + value_camon.append(float(line[len(line) - 1])) + datdate = line[len(line) - 3] + datime = line[len(line) - 2] + # print datdate.split()[0] + # print datime + # print int(datdate.split('-')[0]),int(datdate.split('-')[1]),int(datdate.split('-')[2]),int(datime.split(':')[0]),int(datime.split(':')[1]),int(datime.split(':')[2].split('.')[0]),int(datime.split(':')[2].split('.')[1]) + epoch_camon.append( + float( + datetime.datetime( + int(datdate.split("-")[0]), + int(datdate.split("-")[1]), + int(datdate.split("-")[2]), + int(datime.split(":")[0]), + int(datime.split(":")[1]), + int(datime.split(":")[2].split(".")[0]), + int(datime.split(":")[2].split(".")[1]), + ).strftime("%s") + ) + ) + return np.array(epoch_camon), np.array(value_camon) + + +def knife_edge(filename, direction="horz", cuts=[1, 2, 3], firstim=0, lastim=1, pix=1): """ LW 04/06/2015 function to analyse knife edge image data for e.g. stability """ from PIL import Image - import Tkinter, tkFileDialog + import Tkinter + import tkFileDialog from matplotlib import pyplot as plt import numpy as np - from scipy.optimize import curve_fit import time import scipy as sp - if filename=='ia': # open file dialog + if filename == "ia": # open file dialog root = Tkinter.Tk() root.withdraw() file_path = tkFileDialog.askopenfilename() - else: file_path=filename + else: + file_path = filename print(file_path) # extract filename and first image number: - a=file_path.split('/') - fn=a[len(a)-1].split('.')[0] # file name - b=fn.split('.') - #fe=b[len(b)-1] # file ending, e.g .tiff - fe=a[len(a)-1].split('.')[1] - c=fn.split('_') - fnum=c[len(c)-1].split('.')[0] # file number - nlen=len(fnum) # length of number format - fp=file_path.split(fn)[0] - fn=fn.split('_'+fnum)[0] - print('file path: ',fp) - print('file name: ',fn) - print('file ending: ',fe) - print('file number: ',fnum) - - if filename=='ia': # get first image number from filebame in interactive mode - firstim=int(fnum) - #lastim=30 # maximum number of images to search in autmatic mode - - imcount=firstim - #print 'imcount: ',imcount - #print 'firstim: ',firstim - get_out=0 - image_data=0 - center=[] + a = file_path.split("/") + fn = a[len(a) - 1].split(".")[0] # file name + b = fn.split(".") + # fe=b[len(b)-1] # file ending, e.g .tiff + fe = a[len(a) - 1].split(".")[1] + c = fn.split("_") + fnum = c[len(c) - 1].split(".")[0] # file number + nlen = len(fnum) # length of number format + fp = file_path.split(fn)[0] + fn = fn.split("_" + fnum)[0] + print("file path: ", fp) + print("file name: ", fn) + print("file ending: ", fe) + print("file number: ", fnum) + + if filename == "ia": # get first image number from filename in interactive mode + firstim = int(fnum) + # lastim=30 # maximum number of images to search in automatic mode + + imcount = firstim + # print 'imcount: ',imcount + # print 'firstim: ',firstim + get_out = 0 + image_data = 0 + center = [] while imcount <= lastim: - ic=str(imcount) - while len(ic) < nlen: # create file number - ic='0'+ic - #ic=str(ic) - #print ic - #assemble the image name to be loaded: - file_to_open=fp+fn+'_'+ic+'.'+fe + ic = str(imcount) + while len(ic) < nlen: # create file number + ic = "0" + ic + # ic=str(ic) + # print ic + # assemble the image name to be loaded: + file_to_open = fp + fn + "_" + ic + "." + fe try: - im = Image.open(file_to_open); - print('loaded file: ',file_to_open) + im = Image.open(file_to_open) + print("loaded file: ", file_to_open) except: - print('waiting for file: '+file_to_open) - #plt.close(1) # use the waiting time to plot some intermediate result -> does not work, as python is waiting until the end before printing.... - #plt.figure(1) - #imgplot= plt.imshow(image_data/(imcount-firstim+1),origin='lower') - #imgplot.set_cmap('gray') + print("waiting for file: " + file_to_open) + # plt.close(1) # use the waiting time to plot some intermediate result -> does not work, as python is waiting until the end before printing.... + # plt.figure(1) + # imgplot= plt.imshow(image_data/(imcount-firstim+1),origin='lower') + # imgplot.set_cmap('gray') time.sleep(5) try: - im = Image.open(file_to_open); - #im.show() - except:get_out=1 - if get_out==1: - return image_data ### THIS NEEDS TO BE CHANGED!!! - plt.close(1) # only plot at the end for now: too much overhead to plot every iteration + im = Image.open(file_to_open) + # im.show() + except: + get_out = 1 + if get_out == 1: + return image_data ### THIS NEEDS TO BE CHANGED!!! + plt.close( + 1 + ) # only plot at the end for now: too much overhead to plot every iteration plt.figure(1) - raise CHX_utilities_Exception('no more images found, images summed up to: '+file_to_open) - im=np.array(im)*1.0; + raise CHX_utilities_Exception( + "no more images found, images summed up to: " + file_to_open + ) + im = np.array(im) * 1.0 try: - dat=np.sum(im,axis=2); # sum RGB channels for a color image + dat = np.sum(im, axis=2) # sum RGB channels for a color image except: - dat=im; - if imcount==firstim: - plt.close(4) - plt.figure(4) - plt.imshow(dat,origin='lower') - for i in cuts: - plt.plot([i,i],[0,2048],'r--') #2048 is a fixed value for now, need to get dimension from data in the future - plt.tight_layout() - plt.xlabel('pixel') - plt.ylabel('pixel') - plt.title(file_to_open) + dat = im + if imcount == firstim: + plt.close(4) + plt.figure(4) + plt.imshow(dat, origin="lower") + for i in cuts: + plt.plot( + [i, i], [0, 2048], "r--" + ) # 2048 is a fixed value for now, need to get dimension from data in the future + plt.tight_layout() + plt.xlabel("pixel") + plt.ylabel("pixel") + plt.title(file_to_open) for i in cuts: - if direction=='vert': - cdat=dat[i,:]; - elif direction=='horz': - cdat=dat[:,i]; - #print cdat; - else: raise CHX_utilities_Exception('error: cut direction must be either "horz" or "vert".') - ######### try some fourier filtering before diff: - fft=sp.fft(cdat) - bp=fft[:]; - for i in range(len(bp)): - if i>=100:bp[i]=0 - cdat=sp.ifft(bp); + if direction == "vert": + cdat = dat[i, :] + elif direction == "horz": + cdat = dat[:, i] + # print cdat; + else: + raise CHX_utilities_Exception( + 'error: cut direction must be either "horz" or "vert".' + ) + ######### try some fourier filtering before diff: + fft = sp.fft(cdat) + bp = fft[:] + for i in range(len(bp)): + if i >= 100: + bp[i] = 0 + cdat = sp.ifft(bp) ###### try with normalization - #devdat=np.diff(cdat/max(cdat)) + # devdat=np.diff(cdat/max(cdat)) ####### - devdat=np.diff(cdat) - ########## - if imcount==firstim: - plt.close(9) - plt.figure(9) - plt.plot(xrange(len(devdat)),devdat/max(devdat)*max(cdat)) - plt.plot(xrange(len(cdat)),cdat) - - #print devdat - #np.argmax(devdat) - #center.append(np.argmax(devdat)) - #### try helping with using the correct maximum (allows to use more Fourier components) + devdat = np.diff(cdat) + ########## + if imcount == firstim: + plt.close(9) + plt.figure(9) + plt.plot(xrange(len(devdat)), devdat / max(devdat) * max(cdat)) + plt.plot(xrange(len(cdat)), cdat) + + # print devdat + # np.argmax(devdat) + # center.append(np.argmax(devdat)) + #### try helping with using the correct maximum (allows to use more Fourier components) center.append(np.argmax(devdat[900:1500])) - #### - imcount=imcount+1 - #plotting section: - #plt.figure(3) - #plt.plot(xrange(len(devdat)),devdat) - #plt.figure(2) - #plt.plot(xrange(len(cdat)),cdat,'o-') + #### + imcount = imcount + 1 + # plotting section: + # plt.figure(3) + # plt.plot(xrange(len(devdat)),devdat) + # plt.figure(2) + # plt.plot(xrange(len(cdat)),cdat,'o-') plt.close(51) plt.figure(51) for i in xrange(len(cuts)): - y=center[i::len(cuts)] - #print y - x=xrange(len(y)) - dy=(y-np.mean(y))*pix; - plt.plot(x,dy,'+',label=str(cuts[i])) - plt.legend(loc='best') - plt.title('knife edge position') - plt.ylabel('x_0 - [um]') - plt.xlabel('image #') - print('knife edge position: ',dy.mean(),'um +/- ',dy.std(),'um') + y = center[i :: len(cuts)] + # print y + x = xrange(len(y)) + dy = (y - np.mean(y)) * pix + plt.plot(x, dy, "+", label=str(cuts[i])) + plt.legend(loc="best") + plt.title("knife edge position") + plt.ylabel("x_0 - [um]") + plt.xlabel("image #") + print("knife edge position: ", dy.mean(), "um +/- ", dy.std(), "um") plt.close(52) plt.figure(52) for i in xrange(len(cuts)): - y=center[i::len(cuts)] - #print y - x=xrange(len(y)) - dy=y; - plt.plot(x,dy,'+',label=str(cuts[i])) - plt.legend(loc='best') - plt.title('knife edge position') - plt.ylabel('knife edge position [pixel]') - plt.xlabel('image #') + y = center[i :: len(cuts)] + # print y + x = xrange(len(y)) + dy = y + plt.plot(x, dy, "+", label=str(cuts[i])) + plt.legend(loc="best") + plt.title("knife edge position") + plt.ylabel("knife edge position [pixel]") + plt.xlabel("image #") return center -#copied from 99-bluesky....didn't figure out how to import... + +# copied from 99-bluesky....didn't figure out how to import... def detselect(detector_object, suffix="_stats1_total"): """Switch the active detector and set some internal state""" - gs.DETS =[detector_object] + gs.DETS = [detector_object] gs.PLOT_Y = detector_object.name + suffix gs.TABLE_COLS = [gs.PLOT_Y] -def get_ID_calibration(gapstart, gapstop, xray_eye1=None, gapstep=.2, gapoff=0, sl=300): + +def get_ID_calibration( + gapstart, gapstop, xray_eye1=None, gapstep=0.2, gapoff=0, sl=300 +): """ by LW 04/20/2015 function to automatically take a ID calibration curve_fit @@ -1444,99 +1799,137 @@ def get_ID_calibration(gapstart, gapstop, xray_eye1=None, gapstep=.2, gapoff=0, """ import numpy as np import xfuncs as xf - #from dataportal import DataBroker as db, StepScan as ss, DataMuxer as dm + + # from dataportal import DataBroker as db, StepScan as ss, DataMuxer as dm import time from epics import caput, caget from matplotlib import pyplot as plt from scipy.optimize import curve_fit ### just testing passing bluesky objects (motors, detectors) into function - gs.DETS =[xray_eye1] - #detselect(xray_eye1) - dscan(diff.xh,gapstart,gapstop,3) + gs.DETS = [xray_eye1] + # detselect(xray_eye1) + dscan(diff.xh, gapstart, gapstop, 3) ### end of testing section ################# - gaps=np.arange(gapstart,gapstop,gapstep)+gapoff - print('ID calibration will contain the following gaps [mm]: ',gaps) - if caget('XF:11IDA-OP{Mono:DCM-Ax:X}Pos-Sts') == 1: - xtal='Si111cryo' - elif caget('XF:11IDA-OP{Mono:DCM-Ax:X}Pos-Sts') == 2: - xtal='Si220cryo' - else: raise CHX_utilities_Exception('error: trying to do ID gap calibration with no crystal in the beam') - print('using ',xtal,' for ID gap calibration') + gaps = np.arange(gapstart, gapstop, gapstep) + gapoff + print("ID calibration will contain the following gaps [mm]: ", gaps) + if caget("XF:11IDA-OP{Mono:DCM-Ax:X}Pos-Sts") == 1: + xtal = "Si111cryo" + elif caget("XF:11IDA-OP{Mono:DCM-Ax:X}Pos-Sts") == 2: + xtal = "Si220cryo" + else: + raise CHX_utilities_Exception( + "error: trying to do ID gap calibration with no crystal in the beam" + ) + print("using ", xtal, " for ID gap calibration") # create file for writing calibration data: - fn='id_CHX_IVU20_'+str(time.strftime("%m"))+str(time.strftime("%d"))+str(time.strftime("%Y"))+'.dat' - dat_file = Path(rs_fn('chxtools', 'X-ray_database')) / Path(fn) + fn = ( + "id_CHX_IVU20_" + + str(time.strftime("%m")) + + str(time.strftime("%d")) + + str(time.strftime("%Y")) + + ".dat" + ) + dat_file = Path(rs_fn("chxtools", "X-ray_database")) / Path(fn) try: - with dat_file.open('w') as outFile: - outFile.write('% data from measurements {}\n'.format(time.strftime("%D"))) - outFile.write('% K column is a placeholder! \n') - outFile.write('% ID gap [mm] K E_1 [keV] \n') - print('successfully created output file: {}'.format(dat_file)) + with dat_file.open("w") as outFile: + outFile.write("% data from measurements {}\n".format(time.strftime("%D"))) + outFile.write("% K column is a placeholder! \n") + outFile.write("% ID gap [mm] K E_1 [keV] \n") + print("successfully created output file: {}".format(dat_file)) except: - raise CHX_utilities_Exception('error: could not create output file') + raise CHX_utilities_Exception("error: could not create output file") ### do the scanning and data fitting, file writing,.... - center=[] - E1=[] - realgap=[] - #detselect(xray_eye1) + center = [] + E1 = [] + realgap = [] + # detselect(xray_eye1) for i in gaps: - if i>= 5.2: - B_guess=-1.0*xf.get_Bragg(xtal,xf.get_Es(i-gapoff,5)[1])[0] + if i >= 5.2: + B_guess = -1.0 * xf.get_Bragg(xtal, xf.get_Es(i - gapoff, 5)[1])[0] else: - i=5.2 - B_guess=-1.0*xf.get_Bragg(xtal,xf.get_Es(i,5)[1])[0] - print('initial guess: Bragg= ',B_guess,' deg. ID gap = ',i,' mm') - if xf.get_Es(i,5)[1] < 9.5 and round(caget('XF:11IDA-OP{Mir:HDM-Ax:Y}Mtr.VAL'),1) != -7.5: - caput('XF:11IDA-OP{Mir:HDM-Ax:Y}Mtr.VAL',-7.5) # use HDM Si stripe + i = 5.2 + B_guess = -1.0 * xf.get_Bragg(xtal, xf.get_Es(i, 5)[1])[0] + print("initial guess: Bragg= ", B_guess, " deg. ID gap = ", i, " mm") + if ( + xf.get_Es(i, 5)[1] < 9.5 + and round(caget("XF:11IDA-OP{Mir:HDM-Ax:Y}Mtr.VAL"), 1) != -7.5 + ): + caput("XF:11IDA-OP{Mir:HDM-Ax:Y}Mtr.VAL", -7.5) # use HDM Si stripe time.sleep(20) - elif xf.get_Es(i,5)[1] >= 9.5 and round(caget('XF:11IDA-OP{Mir:HDM-Ax:Y}Mtr.VAL'),1) != 7.5: - caput('XF:11IDA-OP{Mir:HDM-Ax:Y}Mtr.VAL',7.5) # use HDM Rh stripe + elif ( + xf.get_Es(i, 5)[1] >= 9.5 + and round(caget("XF:11IDA-OP{Mir:HDM-Ax:Y}Mtr.VAL"), 1) != 7.5 + ): + caput("XF:11IDA-OP{Mir:HDM-Ax:Y}Mtr.VAL", 7.5) # use HDM Rh stripe time.sleep(20) - if round(caget('XF:11IDA-BI{Foil:Bpm-Ax:Y}Mtr'),1) != 0.0: - caput('XF:11IDA-BI{Foil:Bpm-Ax:Y}Mtr',0.0) + if round(caget("XF:11IDA-BI{Foil:Bpm-Ax:Y}Mtr"), 1) != 0.0: + caput("XF:11IDA-BI{Foil:Bpm-Ax:Y}Mtr", 0.0) time.sleep(30) - else: pass - print('moving DCM Bragg angle to: ',B_guess,' deg and ID gap to ',i,' mm') - #dcm.b.timeout=1200 #make sure dcm motions don't timeout... - #dcm.en.timeout=1200 - #mov(dcm.b,B_guess) - #mov(ivu_gap,i) - print('hurray, made it up to here!') -# ascan(dcm.b,float(B_guess-.4),float(B_guess+.4),60) # do the Bragg scan - header = db[-1] # retrive the data (first data point is often "wrong", so don't use + else: + pass + print("moving DCM Bragg angle to: ", B_guess, " deg and ID gap to ", i, " mm") + # dcm.b.timeout=1200 #make sure dcm motions don't timeout... + # dcm.en.timeout=1200 + # mov(dcm.b,B_guess) + # mov(ivu_gap,i) + print("hurray, made it up to here!") + # ascan(dcm.b,float(B_guess-.4),float(B_guess+.4),60) # do the Bragg scan + header = db[ + -1 + ] # retrieve the data (first data point is often "wrong", so don't use data = get_table(header) B = data.dcm_b[2:] intdat = data.xray_eye1_stats1_total[2:] - B=np.array(B) - intdat=np.array(intdat) - B=np.array(ss[-1].dcm_b)[2:] # retrive the data (first data point is often "wrong", so don't use - intdat=np.array(ss[-1].bpm_cam_stats_total1)[2:] - A=np.max(intdat) # initial parameter guess and fitting - xc=B[np.argmax(intdat)] - w=.2 - yo=mean(intdat) - p0=[yo,A,xc,w] - print('initial guess for fitting: ',p0) + B = np.array(B) + intdat = np.array(intdat) + B = np.array(ss[-1].dcm_b)[ + 2: + ] # retrieve the data (first data point is often "wrong", so don't use + intdat = np.array(ss[-1].bpm_cam_stats_total1)[2:] + A = np.max(intdat) # initial parameter guess and fitting + xc = B[np.argmax(intdat)] + w = 0.2 + yo = mean(intdat) + p0 = [yo, A, xc, w] + print("initial guess for fitting: ", p0) try: - coeff,var_matrix = curve_fit(gauss,B,intdat,p0=p0) + coeff, var_matrix = curve_fit(gauss, B, intdat, p0=p0) center.append(coeff[2]) - E1.append(xf.get_EBragg(xtal,-coeff[2])/5.0) - realgap.append(caget('SR:C11-ID:G1{IVU20:1-LEnc}Gap')) -# # append data file by i, 1 & xf.get_EBragg(xtal,-coeff[2]/5.0): - with dat_file.open('a') as myfile: - myfile.write(str(caget('SR:C11-ID:G1{IVU20:1-LEnc}Gap'))+' 1.0 '+str(float(xf.get_EBragg(xtal,-coeff[2])/5.0))+'\n') - print('added data point: ',caget('SR:C11-ID:G1{IVU20:1-LEnc}Gap'),' ',1.0,' ',str(float(xf.get_EBragg(xtal,-coeff[2])/5.0))) - except: print('could not evaluate data point for ID gap = ',i,' mm...data point skipped!') + E1.append(xf.get_EBragg(xtal, -coeff[2]) / 5.0) + realgap.append(caget("SR:C11-ID:G1{IVU20:1-LEnc}Gap")) + # # append data file by i, 1 & xf.get_EBragg(xtal,-coeff[2]/5.0): + with dat_file.open("a") as myfile: + myfile.write( + str(caget("SR:C11-ID:G1{IVU20:1-LEnc}Gap")) + + " 1.0 " + + str(float(xf.get_EBragg(xtal, -coeff[2]) / 5.0)) + + "\n" + ) + print( + "added data point: ", + caget("SR:C11-ID:G1{IVU20:1-LEnc}Gap"), + " ", + 1.0, + " ", + str(float(xf.get_EBragg(xtal, -coeff[2]) / 5.0)), + ) + except: + print( + "could not evaluate data point for ID gap = ", + i, + " mm...data point skipped!", + ) time.sleep(sl) plt.close(234) plt.figure(234) - plt.plot(E1,gaps,'ro-') - plt.xlabel('E_1 [keV]') - plt.ylabel('ID gap [mm]') - plt.title('ID gap calibration in file: {}'.format(dat_file), size=12) + plt.plot(E1, gaps, "ro-") + plt.xlabel("E_1 [keV]") + plt.ylabel("ID gap [mm]") + plt.title("ID gap calibration in file: {}".format(dat_file), size=12) plt.grid() @@ -1546,4 +1939,3 @@ class CHX_utilities_Exception(Exception): by LW 03/19/2015 class to raise xfuncs specific exceptions """ - diff --git a/chxtools/chx_wrapper.py b/chxtools/chx_wrapper.py index a1eb624..274c5ff 100644 --- a/chxtools/chx_wrapper.py +++ b/chxtools/chx_wrapper.py @@ -3,6 +3,7 @@ by LW 03/20/2015 some wrapper functions for ophyd """ + from __future__ import print_function @@ -17,66 +18,67 @@ def det_select(det): calling sequence: det_select(det) """ try: - rm_det=ascan.user_detectors[0].name + rm_det = ascan.user_detectors[0].name ascan.user_detectors.remove(session_mgr[rm_det]) ascan.default_triggers = [] ascan.default_detectors = [] - print('removed previous default detector: {}'.format(rm_det)) + print("removed previous default detector: {}".format(rm_det)) except: - print('list of detectors appeared to be empty...') + print("list of detectors appeared to be empty...") - ascan.user_detectors.append(det) # add detector - new_def_det=ascan.user_detectors[0].name + ascan.user_detectors.append(det) # add detector + new_def_det = ascan.user_detectors[0].name - print('\nnew default detector: {}'.format(new_def_det)) + print("\nnew default detector: {}".format(new_def_det)) -def cw_ascan(mot,xmin,xmax,npoints,acqt='default',pos_ret=True): +def cw_ascan(mot, xmin, xmax, npoints, acqt="default", pos_ret=True): """ by LW 03/21/2015 wrapper function for ophyd's acan added functionality: - default option: returns scanned axis back to the position where it was prior to starting the scan - - option to change acquition times on detectors (default: use current setting from corresponding css screen) + - option to change acquisition times on detectors (default: use current setting from corresponding css screen) - stop camera and set image mode to single - after the scan: restarts camera and set image mode to where it was prior to starting the scan calling sequence: cw_ascan(det,xmin,xmax,npoints,acqt='default',pos_ret=True) WILL NOT WORK FOR A LIST OF DETECTORS! """ # gather beamline information prior to starting the scan: - ini_motpos = caget(mot.record+'.RBV') + ini_motpos = caget(mot.record + ".RBV") # current detector: acq_pv = session_mgr[ascan.user_detectors[0].name].pvname # set different exposure time for the scan, if requested: - if acqt != 'default': + if acqt != "default": try: - ini_expt = caget(acq_pv[2]) # initial exposure time + ini_expt = caget(acq_pv[2]) # initial exposure time session_mgr[ascan.user_detectors[0].name].acquire_time = acqt - print('successfully set exposure time to [s]: {}'.format(acqt)) + print("successfully set exposure time to [s]: {}".format(acqt)) except: - print('could not set exposure time to {}'.format(acqt)) + print("could not set exposure time to {}".format(acqt)) # execute the scan - ascan(mot,xmin,xmax,npoints) + ascan(mot, xmin, xmax, npoints) # put beamline back into initial state if pos_ret: - caput(mot.record+'.VAL',ini_motpos) - print('returned axes to: {}'.format(ini_motpos)) - if acqt != 'default': + caput(mot.record + ".VAL", ini_motpos) + print("returned axes to: {}".format(ini_motpos)) + if acqt != "default": try: session_mgr[ascan.user_detectors[0].name].acquire_time = ini_expt - print('successfully reset exposure time to [s]: {}'.format(ini_expt)) + print("successfully reset exposure time to [s]: {}".format(ini_expt)) except: - print('could not reset exposure time to {}'.format(ini_expt)) + print("could not reset exposure time to {}".format(ini_expt)) + -def cw_dscan(mot,mdx,pdx,npoints,acqt='default',pos_ret=True): +def cw_dscan(mot, mdx, pdx, npoints, acqt="default", pos_ret=True): """ by LW 03/21/2015 wrapper function for ophyd's ascan added functionality: - - option to change acquition times on detectors (default: use current setting from corresponding css screen) + - option to change acquisition times on detectors (default: use current setting from corresponding css screen) calling sequence: cw_dscan(det,mdx,pdx,npoints,acqt='default',pos_ret=True) WILL NOT WORK FOR A LIST OF DETECTORS! """ @@ -84,26 +86,35 @@ def cw_dscan(mot,mdx,pdx,npoints,acqt='default',pos_ret=True): acq_pv = session_mgr[ascan.user_detectors[0].name].pvname # set different exposure time for the scan, if requested: - if acqt != 'default': + if acqt != "default": try: - ini_expt=caget(acq_pv[2]) # initial exposure time + ini_expt = caget(acq_pv[2]) # initial exposure time session_mgr[ascan.user_detectors[0].name].acquire_time = acqt - print('successfully set exposure time to [s]: {}'.format(acqt)) + print("successfully set exposure time to [s]: {}".format(acqt)) except: - print('could not set exposure time to {}'.format(acqt)) + print("could not set exposure time to {}".format(acqt)) # execute the scan - dscan(mot,mdx,pdx,npoints) - #print('finished scan') + dscan(mot, mdx, pdx, npoints) + # print('finished scan') - if acqt != 'default': + if acqt != "default": try: session_mgr[ascan.user_detectors[0].name].acquire_time = ini_expt - print('successfully reset exposure time to [s]: {}'.format(ini_expt)) + print("successfully reset exposure time to [s]: {}".format(ini_expt)) except: - print('could not reset exposure time to {}'.format(ini_expt)) + print("could not reset exposure time to {}".format(ini_expt)) + -def cw_CCDseries(folder,filename,detector,imnum='default',startn=1,acqt='default',acqperiod='default'): +def cw_CCDseries( + folder, + filename, + detector, + imnum="default", + startn=1, + acqt="default", + acqperiod="default", +): """ by LW 04/06/2015 wrapper function to take a time series with a CCD detector @@ -118,139 +129,172 @@ def cw_CCDseries(folder,filename,detector,imnum='default',startn=1,acqt='default calling sequence: cw_CCDseries(folder,filename,detector,imnum='default',startn=1,acqt='default',acqperiod='default') """ import time - import Tkinter, tkFileDialog - import os, stat, sys + import Tkinter + import tkFileDialog + import os + import stat + # get the detector name:: - detector.pvname.split('}')[0]+'}' - #get folder interactively: + detector.pvname.split("}")[0] + "}" + # get folder interactively: - if folder == 'ia': - root=Tkinter.Tk() - root.withdraw() - directory=tkFileDialog.askdirectory() - folder=directory + if folder == "ia": + root = Tkinter.Tk() + root.withdraw() + directory = tkFileDialog.askdirectory() + folder = directory # check whether target directory exists and create it, if it doesn't if not os.path.exists(folder): - os.mkdir(folder) - os.chmod(folder,436) #make sure everybody can read an write - os.chmod(folder,stat.S_IRWXO) - print('successfully created new directory: {}'.format(folder)) + os.mkdir(folder) + os.chmod(folder, 436) # make sure everybody can read an write + os.chmod(folder, stat.S_IRWXO) + print("successfully created new directory: {}".format(folder)) # put folder: - r=caput(detector.pvname.split('}')[0]+'}TIFF1:FilePath', folder) # have some problem with syntax here... + r = caput( + detector.pvname.split("}")[0] + "}TIFF1:FilePath", folder + ) # have some problem with syntax here... if r == 1: - print('changed saving directory to: {}'.format(folder)) + print("changed saving directory to: {}".format(folder)) else: - print('error: could not change directory for saving data files.') + print("error: could not change directory for saving data files.") # put the filename: - r=caput(detector.pvname.split('}')[0]+'}TIFF1:FileName', filename) # have some problem with syntax here... + r = caput( + detector.pvname.split("}")[0] + "}TIFF1:FileName", filename + ) # have some problem with syntax here... if r == 1: - print('filename for saving: {}'.format(filename)) + print("filename for saving: {}".format(filename)) else: - print('error: could not change file name for saving.') + print("error: could not change file name for saving.") # put start number - caput(detector.pvname.split('}')[0]+'}TIFF1:FileNumber',startn) + caput(detector.pvname.split("}")[0] + "}TIFF1:FileNumber", startn) - #gather information about current camera settings + # gather information about current camera settings acq_pv = session_mgr[ascan.user_detectors[0].name].pvname - ini_acq = caget(acq_pv.split('}')[0]+'}cam1:Acquire') # initial state: started or stopped - ini_mode = caget(acq_pv.split('}')[0]+'}cam1:ImageMode') # initial image mode: single [0], multiple [1], continous [2] - ini_expt = caget(acq_pv.split('}')[0]+'}cam1:AcquireTime') # initial exposure time - ini_acqperiod = caget(acq_pv.split('}')[0]+'}cam1:AcquirePeriod') # initial acquiring period - ini_imnum = caget(acq_pv.split('}')[0]+'}cam1:NumImages') # initial image number + ini_acq = caget( + acq_pv.split("}")[0] + "}cam1:Acquire" + ) # initial state: started or stopped + ini_mode = caget( + acq_pv.split("}")[0] + "}cam1:ImageMode" + ) # initial image mode: single [0], multiple [1], continuous [2] + ini_expt = caget( + acq_pv.split("}")[0] + "}cam1:AcquireTime" + ) # initial exposure time + ini_acqperiod = caget( + acq_pv.split("}")[0] + "}cam1:AcquirePeriod" + ) # initial acquiring period + ini_imnum = caget(acq_pv.split("}")[0] + "}cam1:NumImages") # initial image number - if acqt != 'default': + if acqt != "default": try: - caput(acq_pv.split('}')[0]+'}cam1:AcquireTime',acqt) - print('successfully set exposure time to [s]: {}'.format(acqt)) + caput(acq_pv.split("}")[0] + "}cam1:AcquireTime", acqt) + print("successfully set exposure time to [s]: {}".format(acqt)) except: - print('could not set exposure time to {}'.format(acqt)) + print("could not set exposure time to {}".format(acqt)) # stop camara: try: - caput(acq_pv.split('}')[0]+'}cam1:Acquire',0) - print('successfully stopped camera') + caput(acq_pv.split("}")[0] + "}cam1:Acquire", 0) + print("successfully stopped camera") except: - print('could not stop camera') + print("could not stop camera") # try to set image mode to multiple try: - caput(acq_pv.split('}')[0]+'}cam1:ImageMode',1) + caput(acq_pv.split("}")[0] + "}cam1:ImageMode", 1) print('successfully set ImageMode to "multiple"') except: print('could not set ImageMode to "multiple"') - if acqperiod != 'default': + if acqperiod != "default": try: - caput(acq_pv.split('}')[0]+'}cam1:AcquirePeriod',acqperiod) - print('successfully set acquiering period to: {}'.format(acqperiod)) + caput(acq_pv.split("}")[0] + "}cam1:AcquirePeriod", acqperiod) + print("successfully set acquiering period to: {}".format(acqperiod)) except: - print('could not set aquireing period to {}'.format(acqperiod)) + print("could not set aquireing period to {}".format(acqperiod)) # set number of images to be taken: - if imnum != 'default': + if imnum != "default": try: - caput(acq_pv.split('}')[0]+'}cam1:NumImages', imnum) - print('successfully set number of images to: {}'.format(imnum)) + caput(acq_pv.split("}")[0] + "}cam1:NumImages", imnum) + print("successfully set number of images to: {}".format(imnum)) except: - print('could not set number of images to {}'.format(imnum)) - print('going to start the acquisition...') + print("could not set number of images to {}".format(imnum)) + print("going to start the acquisition...") time.sleep(1) # start the acquisition!!! - caput(acq_pv.split('}')[0]+'}cam1:ArrayCounter',0) # set Images acquired (image counter) to 0 + caput( + acq_pv.split("}")[0] + "}cam1:ArrayCounter", 0 + ) # set Images acquired (image counter) to 0 - caput(detector.pvname.split('}')[0]+'}TIFF1:AutoSave',1) # start auto save - caput(acq_pv.split('}')[0]+'}cam1:Acquire',1) # start series acquisition + caput(detector.pvname.split("}")[0] + "}TIFF1:AutoSave", 1) # start auto save + caput(acq_pv.split("}")[0] + "}cam1:Acquire", 1) # start series acquisition - counting=0 - current_file=caget(detector.pvname.split('}')[0]+'}TIFF1:FullFileName_RBV',as_string=True) - while counting==0: - time.sleep(.5) - if caget(acq_pv.split('}')[0]+'}cam1:ArrayCounter_RBV') == imnum: - counting=1 - if caget(detector.pvname.split('}')[0]+'}TIFF1:FullFileName_RBV',as_string=True) != current_file: - current_file=caget(detector.pvname.split('}')[0]+'}TIFF1:FullFileName_RBV',as_string=True) - print('file written: {}'.format(current_file)) - time.sleep(.1) + counting = 0 + current_file = caget( + detector.pvname.split("}")[0] + "}TIFF1:FullFileName_RBV", as_string=True + ) + while counting == 0: + time.sleep(0.5) + if caget(acq_pv.split("}")[0] + "}cam1:ArrayCounter_RBV") == imnum: + counting = 1 + if ( + caget( + detector.pvname.split("}")[0] + "}TIFF1:FullFileName_RBV", + as_string=True, + ) + != current_file + ): + current_file = caget( + detector.pvname.split("}")[0] + "}TIFF1:FullFileName_RBV", + as_string=True, + ) + print("file written: {}".format(current_file)) + time.sleep(0.1) - print('going to stop the acquisition...') - time.sleep(.5) - caput(acq_pv.split('}')[0]+'}cam1:Acquire',0) # stop series acquisition (redundent...should have stopped already - caput(detector.pvname.split('}')[0]+'}TIFF1:AutoSave',0) # stop auto save + print("going to stop the acquisition...") + time.sleep(0.5) + caput( + acq_pv.split("}")[0] + "}cam1:Acquire", 0 + ) # stop series acquisition (redundant...should have stopped already + caput(detector.pvname.split("}")[0] + "}TIFF1:AutoSave", 0) # stop auto save - print('going to reset camera...') - #time.sleep(5) - # put camera back into initial state: - if acqt != 'default': + print("going to reset camera...") + # time.sleep(5) + # put camera back into initial state: + if acqt != "default": try: - caput(acq_pv.split('}')[0]+'}cam1:AcquireTime',ini_expt) - print('successfully reset exposure time to [s]: {}'.format(ini_expt)) + caput(acq_pv.split("}")[0] + "}cam1:AcquireTime", ini_expt) + print("successfully reset exposure time to [s]: {}".format(ini_expt)) except: - print('could not reset exposure time to {}'.format(ini_expt)) + print("could not reset exposure time to {}".format(ini_expt)) try: - caput(acq_pv.split('}')[0]+'}cam1:ImageMode', ini_mode) - print('successfully reset ImageMode') + caput(acq_pv.split("}")[0] + "}cam1:ImageMode", ini_mode) + print("successfully reset ImageMode") except: - print('could not reset ImageMode') + print("could not reset ImageMode") try: - caput(acq_pv.split('}')[0]+'}cam1:Acquire', ini_acq) - print('successfully reset camera acqusition mode') + caput(acq_pv.split("}")[0] + "}cam1:Acquire", ini_acq) + print("successfully reset camera acquisition mode") except: - print('could not reset camera acquisition mode') - if acqperiod!='default': + print("could not reset camera acquisition mode") + if acqperiod != "default": try: - caput(acq_pv.split('}')[0]+'}cam1:AcquirePeriod', ini_acqperiod) - print('successfully reset acquisition period to [s]: {}'.format(ini_acqperiod)) + caput(acq_pv.split("}")[0] + "}cam1:AcquirePeriod", ini_acqperiod) + print( + "successfully reset acquisition period to [s]: {}".format(ini_acqperiod) + ) except: - print('could not reset acquisition period to {}'.format(ini_acqperiod)) - if imnum!='default': + print("could not reset acquisition period to {}".format(ini_acqperiod)) + if imnum != "default": try: - caput(acq_pv.split('}')[0]+'}cam1:NumImages', ini_imnum) - print('successfully reset image numbers to: {}'.format(ini_imnum)) + caput(acq_pv.split("}")[0] + "}cam1:NumImages", ini_imnum) + print("successfully reset image numbers to: {}".format(ini_imnum)) except: - print('could not reset image numbers to {}'.format(ini_imnum)) - time.sleep(.5) + print("could not reset image numbers to {}".format(ini_imnum)) + time.sleep(0.5) try: - caput(acq_pv.split('}')[0]+'}cam1:Acquire', ini_acq) # restart camera if it was running before taking the series - print('restarted camera') + caput( + acq_pv.split("}")[0] + "}cam1:Acquire", ini_acq + ) # restart camera if it was running before taking the series + print("restarted camera") except: - print('could not restart camera...') - + print("could not restart camera...") diff --git a/chxtools/handlers.py b/chxtools/handlers.py index bd5c550..d5dc969 100644 --- a/chxtools/handlers.py +++ b/chxtools/handlers.py @@ -1,25 +1,30 @@ # handler registration and database instantiation should be done # here and only here! from databroker import Broker -from databroker.assets.handlers_base import HandlerBase -#from chxtools.pims_readers.eiger import EigerImages + +# from chxtools.pims_readers.eiger import EigerImages from eiger_io.fs_handler_dask import EigerHandlerDask, EigerImagesDask -from eiger_io.fs_handler import EigerHandler as EigerHandlerPIMS, EigerImages as EigerImagesPIMS +from eiger_io.fs_handler import ( + EigerHandler as EigerHandlerPIMS, + EigerImages as EigerImagesPIMS, +) -db = Broker.named('chx') +db = Broker.named("chx") -''' -Tried to allow function to change namespace did not work. +""" +Tried to allow function to change namespace did not work. DO NOT USE -''' +""" + + # toggle use of dask or no dask # TODO : eventually choose one of the two def use_pims(): global EigerImages, EigerHandler, db EigerImages = EigerImagesPIMS EigerHandler = EigerHandlerPIMS - db.reg.register_handler('AD_EIGER2', EigerHandler, overwrite=True) - db.reg.register_handler('AD_EIGER', EigerHandler, overwrite=True) + db.reg.register_handler("AD_EIGER2", EigerHandler, overwrite=True) + db.reg.register_handler("AD_EIGER", EigerHandler, overwrite=True) def use_dask(): @@ -27,8 +32,9 @@ def use_dask(): EigerImages = EigerImagesDask EigerHandler = EigerHandlerDask - db.reg.register_handler('AD_EIGER2', EigerHandler, overwrite=True) - db.reg.register_handler('AD_EIGER', EigerHandler, overwrite=True) + db.reg.register_handler("AD_EIGER2", EigerHandler, overwrite=True) + db.reg.register_handler("AD_EIGER", EigerHandler, overwrite=True) + # call use_pims or use_dask # default is use_dask() diff --git a/chxtools/plot_sid.py b/chxtools/plot_sid.py index 9e7a13f..da97287 100644 --- a/chxtools/plot_sid.py +++ b/chxtools/plot_sid.py @@ -1,43 +1,38 @@ -from databroker import Broker import matplotlib.pyplot as plt from .handlers import db def plot_scan(sid=-1, x=None, y=None): - '''plot scan_ids - Options: - sid: the scan id, a number or a list - x: the x-axis, a string - y: the y-axis, a string + """plot scan_ids + Options: + sid: the scan id, a number or a list + x: the x-axis, a string + y: the y-axis, a string - Usage: - ====== - from chxtools.plot import plot_scan - plot_scan([-1,-2], x='ivu_gap',y='xray_cam_stats_total1') - ''' + Usage: + ====== + from chxtools.plot import plot_scan + plot_scan([-1,-2], x='ivu_gap',y='xray_cam_stats_total1') + """ if not isinstance(sid, list): sid = [sid] if x is None: - x = 'time' + x = "time" if y is None: - y = 'time' + y = "time" fig, ax = plt.subplots() for s in sid: dat = db[s].table() # print ('here') if x not in dat.keys(): - print('Wrong x input!') - print('The available X includes: {}'.format(dat.keys())) + print("Wrong x input!") + print("The available X includes: {}".format(dat.keys())) break if y not in dat.keys(): - print('Wrong y input!') - print('The available Y includes: {}'.format(dat.keys())) + print("Wrong y input!") + print("The available Y includes: {}".format(dat.keys())) break - # datx = dat[x] - # daty = dat[y] - # print(x, y) - - dat.plot(x=x, y=y, ax=ax, label='sid: {}'.format(s)) + dat.plot(x=x, y=y, ax=ax, label="sid: {}".format(s)) ax.set_ylabel(y) diff --git a/chxtools/pv_explorer/__init__.py b/chxtools/pv_explorer/__init__.py index c2a0c05..f35406c 100644 --- a/chxtools/pv_explorer/__init__.py +++ b/chxtools/pv_explorer/__init__.py @@ -1 +1 @@ -__author__ = 'edill' +__author__ = "edill" diff --git a/chxtools/pv_explorer/model.py b/chxtools/pv_explorer/model.py index 928a04e..2ad1066 100644 --- a/chxtools/pv_explorer/model.py +++ b/chxtools/pv_explorer/model.py @@ -1,6 +1,5 @@ from atom.api import * from matplotlib.figure import Figure -from matplotlib.axes import Axes import numpy as np import datetime import os @@ -35,20 +34,21 @@ def __init__(self): self.dt0 = datetime.datetime.utcnow() self._fig = Figure(figsize=(1, 1)) self._fig.set_tight_layout(True) - pvs = ['pv1', 'pv2', 'pv3', 'pv4'] + pvs = ["pv1", "pv2", "pv3", "pv4"] - for idx, (name, position) in enumerate(zip(pvs, - range(1, len(pvs)+1))): + for idx, (name, position) in enumerate(zip(pvs, range(1, len(pvs) + 1))): if idx == 0: sharex = None else: sharex = self._axes[pvs[0]] self._axes[name] = self._fig.add_subplot( - len(pvs)+1, 1, position, sharex=sharex) - self._axes['data'] = self._fig.add_subplot( - len(pvs)+1, 1, len(pvs)+1, sharex=self._axes[pvs[0]]) + len(pvs) + 1, 1, position, sharex=sharex + ) + self._axes["data"] = self._fig.add_subplot( + len(pvs) + 1, 1, len(pvs) + 1, sharex=self._axes[pvs[0]] + ) - @observe('data_file') + @observe("data_file") def datafiles_changed(self, changed): # load your data @@ -56,21 +56,21 @@ def datafiles_changed(self, changed): x = np.linspace(self.t0, self.t1, 1000) y = np.cos(x) * 10 y += np.random.randn(len(x)) - self._axes['data'].cla() - self._axes['data'].plot(x, y, label=self.data_file.split(os.sep)[-1]) - self._axes['data'].legend(loc=0) + self._axes["data"].cla() + self._axes["data"].plot(x, y, label=self.data_file.split(os.sep)[-1]) + self._axes["data"].legend(loc=0) self.reformat_view() - @observe('dt0') + @observe("dt0") def dt0_changed(self, changed): print(changed) - @observe('pv1', 'pv2', 'pv3', 'pv4') + @observe("pv1", "pv2", "pv3", "pv4") def get_pv1(self, changed): print(changed) # get the data from the channel archiver - pv_name = changed['value'] - axes = self._axes[changed['name']] + pv_name = changed["value"] + axes = self._axes[changed["name"]] axes.set_ylabel(pv_name) self._update_data(pv_name, axes) @@ -82,10 +82,10 @@ def _update_data(self, pv_name, axes): axes.legend(loc=0) self.reformat_view() - @observe('t0', 't1') + @observe("t0", "t1") def change_time(self, changed): for k, axes in self._axes.items(): - if k == 'data': + if k == "data": continue pv_name = getattr(self, k) self._update_data(pv_name, axes) diff --git a/chxtools/pv_explorer/run.py b/chxtools/pv_explorer/run.py index a4db21e..ebb8a4f 100644 --- a/chxtools/pv_explorer/run.py +++ b/chxtools/pv_explorer/run.py @@ -1,13 +1,14 @@ -from __future__ import (unicode_literals, print_function, absolute_import, \ - division) +from __future__ import unicode_literals, print_function, absolute_import, division from enaml.qt.qt_application import QtApplication import enaml from chxtools.pv_explorer.model import Model + with enaml.imports(): from chxtools.pv_explorer.view import MainView + def main(): app = QtApplication() model = Model() @@ -16,5 +17,6 @@ def main(): app.start() + if __name__ == "__main__": main() diff --git a/chxtools/pv_explorer/view.enaml b/chxtools/pv_explorer/view.enaml index 675fdb8..baaab2f 100644 --- a/chxtools/pv_explorer/view.enaml +++ b/chxtools/pv_explorer/view.enaml @@ -59,4 +59,3 @@ enamldef MainView(MainWindow): main_view: MPLCanvas: figure << model._fig toolbar_visible = True - diff --git a/chxtools/transfuncs.py b/chxtools/transfuncs.py index b874f6a..acb6883 100644 --- a/chxtools/transfuncs.py +++ b/chxtools/transfuncs.py @@ -6,21 +6,24 @@ calculate current image position -> get_ip by LW 03/13/2016 """ + import numpy as np from chxtools import xfuncs as xf from scipy.optimize import fminbound from epics import * -def CRL_focalpoint(energy,lens_configuration): + +def CRL_focalpoint(energy, lens_configuration): """ CRL_focalpoint(energy,lens_configuration): - lens_confguration is a dictionary of the form - [lens_radius1:numer lenses, lens_radius2: number_lenses, ...] + lens_configuration is a dictionary of the form + [lens_radius1:number lenses, lens_radius2: number_lenses, ...] returns the focal length """ - focal_distance=0.0 + focal_distance = 0.0 return focal_distance + def trans_setup(): """ defining physical configuration of CHX transfocator system @@ -33,17 +36,22 @@ def trans_setup(): # Be Be Be Be Be Be Be lens material # 1.5 1.5 0.5 0.5 0.5 0.5 0.5 lens radius [mm] # 1 1 5 8 4 2 1 number of lenses - lens_R=[0.5,0.5,0.5,0.5,0.5,1.5,1.5] - lens_mat=['Be','Be','Be','Be','Be','Be','Be'] - lens_N=[1,2,4,8,5,1,1] - trans_pos=[35.2,35.8] - return {'lens_material':lens_mat,'lens_number':lens_N,'lens_radius':lens_R,'trans_position':trans_pos} + lens_R = [0.5, 0.5, 0.5, 0.5, 0.5, 1.5, 1.5] + lens_mat = ["Be", "Be", "Be", "Be", "Be", "Be", "Be"] + lens_N = [1, 2, 4, 8, 5, 1, 1] + trans_pos = [35.2, 35.8] + return { + "lens_material": lens_mat, + "lens_number": lens_N, + "lens_radius": lens_R, + "trans_position": trans_pos, + } -def calc_transsetup(image_pos,E='auto',silent=False): +def calc_transsetup(image_pos, E="auto", silent=False): """ - calc_transsetup(image_pos,E='auto'): funtion to calculate CHX transfocator settings - required arument: + calc_transsetup(image_pos,E='auto'): function to calculate CHX transfocator settings + required argument: image_pos: image position [m from source] optional arguments: E='auto': get beamline energy from PV (currently: DCM) | E=7894: manual overwrite of energy parameter @@ -52,89 +60,152 @@ def calc_transsetup(image_pos,E='auto',silent=False): Note: search is implemented to go through the lens stacks with large numbers of lenses first -> returned solution will be e.g. using lens stack #5 with 5 lenses, instead of #1 (1) and #3 (4) function returns dictionary of the form: {'lens_config':bin_index,'z_position':zmin} - dependencies: imports numpy, fminbound (scipy.optimze) and xfuncs + dependencies: imports numpy, fminbound (scipy.optimize) and xfuncs calls trans_setup() to get physical configuration of transfocator system by LW 03/14/2016 """ - image_pos=np.array(image_pos) - if E is 'auto': - #E=8000 # temporary: don't have channel access -> set E to 8000eV - E=caget('XF:11IDA-OP{Mono:DCM-Ax:Energy}Mtr.RBV') ### get energy automatically with channel access - print ("getting energy from global PV: E=",E,'eV') - #(currently not implemented in test version (no channel access) -> 8000eV default)' # future: add PV name for house keeping - if E> 30000 or E< 2000: - raise transfuncs_Exception("error: Input argument E has to be 2000 set E to 8000eV + E = caget( + "XF:11IDA-OP{Mono:DCM-Ax:Energy}Mtr.RBV" + ) ### get energy automatically with channel access + print("getting energy from global PV: E=", E, "eV") + # (currently not implemented in test version (no channel access) -> 8000eV default)' # future: add PV name for house keeping + if E > 30000 or E < 2000: + raise transfuncs_Exception( + "error: Input argument E has to be 20000: loc = 'downstream' - #conf_lens_mat=lens_mat*bin_index - conf_lensR=lens_R*bin_index - conf_lensN=lens_N*bin_index + if impos_diff <= 0: + loc = "upstream" + if impos_diff > 0: + loc = "downstream" + # conf_lens_mat=lens_mat*bin_index + conf_lensR = lens_R * bin_index + conf_lensN = lens_N * bin_index if silent == False: - print (' ') - print ('optimized transfocator settings for E='+str(E)+' [eV]') - print ('requested image position: '+str(image_pos)+' [m from source] best match: '+str(image_pos+impos_diff)+' [m from source]') - print ('difference in image location: '+str(impos_diff)+' [m], image '+loc+' of requested position') - print ('requested demagnification: 1/'+str(zpos.mean()/(image_pos-zpos.mean()))+' actual demagnification: 1/'+str(zmin/(image_pos-zmin))) - print ('optimized transfocator position: '+str(zmin)+' [m from source], relative position: '+str(zmin-zpos.mean())+'[m]') - print (' # slot7 slot6 slot5 slot4 slot3 slot2 slot1 <------ beam direction (slot 8 is currently B-fiber only)') - print (' '+str(conf_lensR[6])+' '+str(conf_lensR[5])+' '+str(conf_lensR[4])+' '+str(conf_lensR[3])+' '+str(conf_lensR[2])+' '+str(conf_lensR[1])+' '+str(conf_lensR[0])+' lens radius [mm]') - print (' '+str(conf_lensN[6])+' '+str(conf_lensN[5])+' '+str(conf_lensN[4])+' '+str(conf_lensN[3])+' '+str(conf_lensN[2])+' '+str(conf_lensN[1])+' '+str(conf_lensN[0])+' number of lenses') - print (' ' ) - return {'lens_config':bin_index,'z_position':zmin} - -def get_ip(conf,rel_z_pos,E='auto'): + print(" ") + print("optimized transfocator settings for E=" + str(E) + " [eV]") + print( + "requested image position: " + + str(image_pos) + + " [m from source] best match: " + + str(image_pos + impos_diff) + + " [m from source]" + ) + print( + "difference in image location: " + + str(impos_diff) + + " [m], image " + + loc + + " of requested position" + ) + print( + "requested demagnification: 1/" + + str(zpos.mean() / (image_pos - zpos.mean())) + + " actual demagnification: 1/" + + str(zmin / (image_pos - zmin)) + ) + print( + "optimized transfocator position: " + + str(zmin) + + " [m from source], relative position: " + + str(zmin - zpos.mean()) + + "[m]" + ) + print( + " # slot7 slot6 slot5 slot4 slot3 slot2 slot1 <------ beam direction (slot 8 is currently B-fiber only)" + ) + print( + " " + + str(conf_lensR[6]) + + " " + + str(conf_lensR[5]) + + " " + + str(conf_lensR[4]) + + " " + + str(conf_lensR[3]) + + " " + + str(conf_lensR[2]) + + " " + + str(conf_lensR[1]) + + " " + + str(conf_lensR[0]) + + " lens radius [mm]" + ) + print( + " " + + str(conf_lensN[6]) + + " " + + str(conf_lensN[5]) + + " " + + str(conf_lensN[4]) + + " " + + str(conf_lensN[3]) + + " " + + str(conf_lensN[2]) + + " " + + str(conf_lensN[1]) + + " " + + str(conf_lensN[0]) + + " number of lenses" + ) + print(" ") + return {"lens_config": bin_index, "z_position": zmin} + + +def get_ip(conf, rel_z_pos, E="auto"): """ - get_ip(conf,real_z_pos,E='auto'): funtion to calculate the image position from current CHX transfocator configuration - required aruments: + get_ip(conf,real_z_pos,E='auto'): function to calculate the image position from current CHX transfocator configuration + required arguments: conf: 'binary' array, reflecting the status of the lens stacks !!! omit slot #8 (B-fiber) e.g. [0,0,1,0,0,0,0] -> lens stack #3 in, all other out @@ -148,45 +219,84 @@ def get_ip(conf,rel_z_pos,E='auto'): by LW 03/15/2016 """ # transfocator setup - trans_conf=trans_setup() - lens_R = np.array(trans_conf['lens_radius'])*1. - zpos = np.array(trans_conf['trans_position'])*1. - lens_N = np.array(trans_conf['lens_number']) - lens_mat = np.array(trans_conf['lens_material']) - - # checks: - conf=np.array(conf) - #print conf.min<0 - #print conf.max()>1 + trans_conf = trans_setup() + lens_R = np.array(trans_conf["lens_radius"]) * 1.0 + zpos = np.array(trans_conf["trans_position"]) * 1.0 + lens_N = np.array(trans_conf["lens_number"]) + lens_mat = np.array(trans_conf["lens_material"]) + + # checks: + conf = np.array(conf) + # print conf.min<0 + # print conf.max()>1 # print len(conf) != len(lens_R) if len(conf) != len(lens_R): - print ('thats interesting') - if E is 'auto': - #E=8000 # temporary: don't have channel access -> set E to 8000eV - E=caget('XF:11IDA-OP{Mono:DCM-Ax:Energy}Mtr.RBV') ### get energy automatically with channel access - print ('getting energy from global PV: E=',E,'eV')# (currently not implemented in test version (no channel access) -> 8000eV default)' # future: add PV name for house keeping - if E> 30000 or E< 2000: - raise transfuncs_Exception("error: Input argument E has to be 20001 or len(conf) != len(lens_R): - raise transfuncs_Exception("error: Input argument conf has to be an array or list of the form [1,0,1,...] and its lenght has to match the number of slots occupied by lenses") - if rel_z_pos>zpos.max()-zpos.mean() or rel_z_pos set E to 8000eV + E = caget( + "XF:11IDA-OP{Mono:DCM-Ax:Energy}Mtr.RBV" + ) ### get energy automatically with channel access + print( + "getting energy from global PV: E=", E, "eV" + ) # (currently not implemented in test version (no channel access) -> 8000eV default)' # future: add PV name for house keeping + if E > 30000 or E < 2000: + raise transfuncs_Exception( + "error: Input argument E has to be 2000 1 or len(conf) != len(lens_R): + raise transfuncs_Exception( + "error: Input argument conf has to be an array or list of the form [1,0,1,...] and its length has to match the number of slots occupied by lenses" + ) + if rel_z_pos > zpos.max() - zpos.mean() or rel_z_pos < zpos.min() - zpos.mean(): + raise transfuncs_Exception( + "error: relative transfocator position has to be [" + + str(zpos.min() - zpos.mean()) + + ":" + + str(zpos.max() - zpos.mean()) + + "] m" + ) # calculate focusing: - delta=np.zeros(len(lens_R)) + delta = np.zeros(len(lens_R)) for w in range(0, len(lens_R)): - delta[w]=np.real(xf.get_n(lens_mat[w],E/1000.)) - ln=lens_N*conf - F=1/np.sum(2*delta*ln/(lens_R*1E-3)) - q=1/(-1/(rel_z_pos+zpos.mean()) + 1/F) - imp=rel_z_pos+zpos.mean()+q - print ('requested calculation for E= '+str(E)+'[eV], relative position of transfocator: '+str(rel_z_pos)+'[m]') - print (' # slot7 slot6 slot5 slot4 slot3 slot2 slot1 <------ beam direction (slot 8 is currently B-fiber only)') - print (' '+str(conf[6])+' '+str(conf[5])+' '+str(conf[4])+' '+str(conf[3])+' '+str(conf[2])+' '+str(conf[1])+' '+str(conf[0])+' (1: lens in| 0: lens out) ') - if q<0: - print ('image is imaginary...'+'(@'+str(imp)+' [m from source])') - else: print ('image position: '+str(imp)+' [m from source]') + delta[w] = np.real(xf.get_n(lens_mat[w], E / 1000.0)) + ln = lens_N * conf + F = 1 / np.sum(2 * delta * ln / (lens_R * 1e-3)) + q = 1 / (-1 / (rel_z_pos + zpos.mean()) + 1 / F) + imp = rel_z_pos + zpos.mean() + q + print( + "requested calculation for E= " + + str(E) + + "[eV], relative position of transfocator: " + + str(rel_z_pos) + + "[m]" + ) + print( + " # slot7 slot6 slot5 slot4 slot3 slot2 slot1 <------ beam direction (slot 8 is currently B-fiber only)" + ) + print( + " " + + str(conf[6]) + + " " + + str(conf[5]) + + " " + + str(conf[4]) + + " " + + str(conf[3]) + + " " + + str(conf[2]) + + " " + + str(conf[1]) + + " " + + str(conf[0]) + + " (1: lens in| 0: lens out) " + ) + if q < 0: + print("image is imaginary..." + "(@" + str(imp) + " [m from source])") + else: + print("image position: " + str(imp) + " [m from source]") return imp diff --git a/chxtools/xfuncs.py b/chxtools/xfuncs.py index 420df9c..2bcb1a0 100644 --- a/chxtools/xfuncs.py +++ b/chxtools/xfuncs.py @@ -8,9 +8,9 @@ version 0.1.2: added get_EBragg version 0.1.3: fixed error messages in get_gap and get_Es version 0.1.4: added lattice spacings for Si111 and Si220 @80K (from XOP) (02/26/2015) -version 0.2.0: fixed some problems wiht arrays as inputs, e.g. in get_gap +version 0.2.0: fixed some problems with arrays as inputs, e.g. in get_gap and changed the help files to use the standard python 'help' and docstring functions; - added xfuncs_exeption class for handling xfuncs specific exeptions within python (03/23/2015) + added xfuncs_exception class for handling xfuncs specific exceptions within python (03/23/2015) added get_pinflux: calculates photon flux from photo current from pin diode version 0.2.1: (this file) changed ID selection for gap function to 'default_id' """ @@ -24,40 +24,53 @@ # path to X-ray data files # This is a Path object: -datapath = Path(rs_fn('chxtools', 'X-ray_database')) -xdatafiles = [str(f.relative_to(datapath)) for f in datapath.glob('*') if (Path(datapath) / Path(f)).is_file()] -default_id = 'CHX_IVU20_05272017' +datapath = Path(rs_fn("chxtools", "X-ray_database")) +xdatafiles = [ + str(f.relative_to(datapath)) + for f in datapath.glob("*") + if (Path(datapath) / Path(f)).is_file() +] +default_id = "CHX_IVU20_05272017" -def get_Lambda(E,u='SI'): + +def get_Lambda(E, u="SI"): """ calculates X-ray wavelength as a function of Energy [keV] in optional units. Syntax: getLambda(E,u), where E=X-ray energy; optional: u= 'A','nm','um','cm','mm','m','SI' (='m'), default in the absence of u: 'SI' """ - hPlank=6.62606876e-34; - cvac=2.99792458e8; - Qelectron=1.602176463e-19; - scale=1 - #l=hPlank*cvac/(E*1000*Qelectron) - l=hPlank*cvac/(E*1000*Qelectron); - if u is 'A': - scale=1e10;return l*scale # Angstroem - elif u is 'nm': - scale=1e9; return l*scale # nm - elif u is 'um': - scale=1e6; return l*scale # um - elif u is 'mm': - scale=1e3; return l*scale # mm - elif u is 'cm': - scale=1e2; return l*scale # cm - elif u is 'm' or u is 'SI': - scale=1; return l*scale + hPlank = 6.62606876e-34 + cvac = 2.99792458e8 + Qelectron = 1.602176463e-19 + scale = 1 + # l=hPlank*cvac/(E*1000*Qelectron) + l = hPlank * cvac / (E * 1000 * Qelectron) + if u == "A": + scale = 1e10 + return l * scale # Angstroem + elif u == "nm": + scale = 1e9 + return l * scale # nm + elif u == "um": + scale = 1e6 + return l * scale # um + elif u == "mm": + scale = 1e3 + return l * scale # mm + elif u == "cm": + scale = 1e2 + return l * scale # cm + elif u == "m" or u == "SI": + scale = 1 + return l * scale else: - print ('invalid option, type "get_Lambda(\'?\')" for available options and syntax') + print( + "invalid option, type \"get_Lambda('?')\" for available options and syntax" + ) -def get_saxsQ(theta,E=8): +def get_saxsQ(theta, E=8): """ written by LW 13/03/2008. Calculates Q value for SAXS experiments [1/A], @@ -65,69 +78,104 @@ def get_saxsQ(theta,E=8): X-ray energy in keV (default: 8keV) function is vector compatible calling sequence: get_saxsQ(theta,E) """ - lam=get_Lambda(E,'A') - return 4*np.pi/lam*np.sin(np.rad2deg(theta/2)); + lam = get_Lambda(E, "A") + return 4 * np.pi / lam * np.sin(np.rad2deg(theta / 2)) + -def get_ac(material,E=8): +def get_ac(material, E=8): """ by LW 10/03/2010 function calculates the critical angle for total external reflection as a function of the material and the X-ray energy according to ac=sqrt(2*delta) index of refraction is a .dat file from http://henke.lbl.gov/optical_constants/getdb2.html (energy range: 2-30keV,delete the header % lines, name the file n_material.dat) % - calling sequence: ac=get_ac(material,E) where ac: critial angle in degrees, E [keV] (default: 8keV) + calling sequence: ac=get_ac(material,E) where ac: critical angle in degrees, E [keV] (default: 8keV) type get_ac(\'materilal?\') to show list of supported materials" """ - #get list_of supported materials from data file directory: - name=[] + # get list_of supported materials from data file directory: + name = [] for i in range(len(xdatafiles)): - m=re.search('(?<=n_)\w+', xdatafiles[i]) + m = re.search("(?<=n_)\w+", xdatafiles[i]) if m is not None: name.append(m.group(0)) - E=np.array(E) + E = np.array(E) if material in name: - loadn = Path(datapath) / Path('n_{}.dat'.format(material)) - n = pl.loadtxt(loadn, comments='%') - if np.min(E)>=np.min(n[:,0]/1000) and np.max(E)<=np.max(n[:,0]/1000): - d=np.interp(E*1000,n[:,0],n[:,1]) - return np.degrees(np.sqrt(2*d)) - else: print ('error: energy '+"%3.4f" %E +'[keV] out of range ('+"%3.4f" % np.min(n[:,0]/1000)+'== np.min(n[:, 0] / 1000) and np.max(E) <= np.max(n[:, 0] / 1000): + d = np.interp(E * 1000, n[:, 0], n[:, 1]) + return np.degrees(np.sqrt(2 * d)) + else: + print( + "error: energy " + + "%3.4f" % E + + "[keV] out of range (" + + "%3.4f" % np.min(n[:, 0] / 1000) + + "==np.min(n[:,0]/1000) and np.max(E)<=np.max(n[:,0]/1000): - d=np.interp(E*1000,n[:,0],n[:,1]) - b=np.interp(E*1000,n[:,0],n[:,2]) - return d-1j*b - else: print ('error: energy '+"%3.4f" %E +'[keV] out of range ('+"%3.4f" % np.min(n[:,0]/1000)+'== np.min(n[:, 0] / 1000) and np.max(E) <= np.max(n[:, 0] / 1000): + d = np.interp(E * 1000, n[:, 0], n[:, 1]) + b = np.interp(E * 1000, n[:, 0], n[:, 2]) + return d - 1j * b + else: + print( + "error: energy " + + "%3.4f" % E + + "[keV] out of range (" + + "%3.4f" % np.min(n[:, 0] / 1000) + + "==np.min(m[:,0]/1000) and np.max(E)<=np.max(m[:,0]/1000): - mu=np.interp(E*1000,m[:,0],m[:,1]) + loadn = Path(datapath) / Path("mu_{}.dat".format(material)) + m = pl.loadtxt(loadn, comments="%") + if np.min(E) >= np.min(m[:, 0] / 1000) and np.max(E) <= np.max(m[:, 0] / 1000): + mu = np.interp(E * 1000, m[:, 0], m[:, 1]) return mu - else: print ('error: energy '+"%3.4f" %E +'[keV] out of range ('+"%3.4f" % np.min(m[:,0]/1000))+'==np.min(n[:,0]/1000) and np.max(E)<=np.max(n[:,0]/1000): - b=np.interp(E*1000,n[:,0],n[:,2]) - mu=4*np.pi/get_Lambda(E,'um')*b; - return np.exp(-mu*l); - else: print ('error: energy '+"%3.4f" %E +'[keV] out of range ('+"%3.4f" % np.min(n[:,0]/1000)+'= don't check - E=np.array(E) - lam=get_Lambda(E,'A') + loadn = Path(datapath) / Path("n_{}.dat".format(material)) + n = pl.loadtxt(loadn, comments="%") + if np.min(E) >= np.min(n[:, 0] / 1000) and np.max(E) <= np.max( + n[:, 0] / 1000 + ): + b = np.interp(E * 1000, n[:, 0], n[:, 2]) + mu = 4 * np.pi / get_Lambda(E, "um") * b + return np.exp(-mu * l) + else: + print( + "error: energy " + + "%3.4f" % E + + "[keV] out of range (" + + "%3.4f" % np.min(n[:, 0] / 1000) + + "= don't check + E = np.array(E) + lam = get_Lambda(E, "A") if reflection in reflstr: - ind=reflstr.index(reflection) - print (reflstr[ind] +': d_{hkl}=' + "%3.4f" %dspace[ind] +'A I/I_o='+ "%3.4f" %Irel[ind]) - theta=np.degrees(np.arcsin(lam/2/dspace[ind])) - ds=[];I=[] - for l in range(0,np.size(theta)): + ind = reflstr.index(reflection) + print( + reflstr[ind] + + ": d_{hkl}=" + + "%3.4f" % dspace[ind] + + "A I/I_o=" + + "%3.4f" % Irel[ind] + ) + theta = np.degrees(np.arcsin(lam / 2 / dspace[ind])) + ds = [] + I = [] + for l in range(0, np.size(theta)): ds.append(dspace[ind]) I.append(Irel[ind]) - res=np.array([np.array([theta]),np.array(ds),np.array(I)])[0] + res = np.array([np.array([theta]), np.array(ds), np.array(I)])[0] return res.T - elif reflection=='reflections?': - print ('List of available reflections (T=25C):') - print (reflstr ) - else: print ('error: reflection not found. Type get_Bragg("reflections?") for list of available reflections.') - else: print ('error: reflection has to be a string and E needs to be numeric. Type get_Bragg? for help') - -def get_EBragg(reflection,theta_Bragg=12.0, d_spacing=None): - """ - by LW 17/03/2010 - function returns the X-ray energy [keV] of a given crystal and Bragg reflection. - Calling sequence: EBragg=get_EBragg(reflection,theta_Bragg (default: 12deg)), thetaB(1)=Bragg angle[deg.] thetaB(2)=dhkl [A], thetaB(3)=I/Io [%]. - E: x-ray energy in keV (can be an array of energies), - reflection: string, e.g. 'Si111'. Reflections implemented from http://database.iem.ac.ru/mincryst, T=25C or calculated from XOP, e.g. for Si111 and Si220 @80K - type get_Bragg(\'reflections?\') for a list of currently availabel materials - by MR 06/07/2017: added optional argument to pass an arbitrary d-spacing [A]. - """ - reflstr=['Si111cryo','Si220cryo','Si111', 'Si220', 'Si113', 'Si224', 'Si331', 'Si400','Ge111', 'Ge220', 'Ge113', 'Ge224', 'Ge331', 'Ge620', 'Ge531', 'Ge400', 'Ge115', 'Ge335','Ge440', 'Ge444', 'Ge333', 'C111', 'C220'] - dspace=np.array([3.13379852,1.91905183,3.13542,1.92004,1.63742,1.10854,1.24589,1.35767,3.26627,2.00018,1.70576,1.15480,1.29789,0.89451,0.95627,1.41434,1.08876,0.86274,1.00009,0.81657,1.08876,2.05929,1.26105]) - Irel=np.array([100,67.80,40.50,23.80,16.60,10.90,100,73.80,44.10,23.10,17.00,15.90,15.70,11.50,9.80,8.50,8.20,7.30,3.30,100,39.00]) - # constants needed: - hPlank=6.62606876e-34 - cvac=2.99792458e8 - Qelectron=1.602176463e-19 - - if isinstance(reflection,str): # and all(isinstance(E, (int, long, float, complex)) for item in [E,E]): # <- bug in python: check for E is numeric works in standalone function, but not in this package => don't check - theta_Bragg=np.array(theta_Bragg) - if reflection in reflstr or d_spacing: - if not d_spacing: - ind=reflstr.index(reflection) - #print reflstr[ind] +': d_{hkl}=' + "%3.4f" %dspace[ind] +'A I/I_o='+ "%3.4f" %Irel[ind] - ds=[];I=[] - for l in range(0,np.size(theta_Bragg)): - ds.append(dspace[ind]) - I.append(Irel[ind]) - else: - #ds = [d_spacing] * len(theta_Bragg) #will raise error if theta_Bragg is a scalar - ds = [d_spacing] * np.size(theta_Bragg) - dspace=np.array(ds) - lam=2*dspace*np.sin(theta_Bragg/180*np.pi) - EthetaB=hPlank*cvac/(lam*Qelectron)*1e7 - return EthetaB.T - elif reflection=='reflections?': - print ('List of available reflections (T=25C):') - print (reflstr ) - else: print ('error: reflection not found. Type get_EBragg("reflections?") for list of available reflections.') - - else: print ('error: reflection has to be a string and E needs to be numeric. Type get_Bragg? for help') - - -def get_Qpixel(center=[0,0],cord=[100,100],dis=1000,ps=75,E=8): - """ - written by LW 13/03/2008. - calculates Q [1/A] value for SAXS experiments from detector position, - input parameters: position of the direct beam [x,y] in pixel numbers, pixel coordinate to calculte Q [x y], - sample detector distance in mm, pixel size [um], X-ray energy in keV. - function is vector compatible for parameters dis OR ps OR E. - calling sequence: get_Qpixel([xc,yc],[x,y],dis,ps,E) - """ - center=np.array(center);cord=np.array(cord);dis=np.array(dis);ps=np.array(ps);E=np.array(E) - lam=get_Lambda(E,'A') - xc=center[0] - yc=center[1] - x=cord[0] - y=cord[1] - ps=ps/1000 #convert pixel size to mm - #real space distance on the CCD [mm]: - u=np.sqrt(((x-xc)**2+(y-yc)**2))*ps - #wavevector transfer [1/A] - return 4*np.pi/lam*np.sin(np.arctan(u/dis/2)) + elif reflection == "reflections?": + print("List of available reflections (T=25C):") + print(reflstr) + else: + print( + 'error: reflection not found. Type get_Bragg("reflections?") for list of available reflections.' + ) + else: + print( + "error: reflection has to be a string and E needs to be numeric. Type get_Bragg? for help" + ) + + +def get_EBragg(reflection, theta_Bragg=12.0, d_spacing=None): + """ + by LW 17/03/2010 + function returns the X-ray energy [keV] of a given crystal and Bragg reflection. + Calling sequence: EBragg=get_EBragg(reflection,theta_Bragg (default: 12deg)), thetaB(1)=Bragg angle[deg.] thetaB(2)=dhkl [A], thetaB(3)=I/Io [%]. + E: x-ray energy in keV (can be an array of energies), + reflection: string, e.g. 'Si111'. Reflections implemented from http://database.iem.ac.ru/mincryst, T=25C or calculated from XOP, e.g. for Si111 and Si220 @80K + type get_Bragg(\'reflections?\') for a list of currently available materials + by MR 06/07/2017: added optional argument to pass an arbitrary d-spacing [A]. + """ + reflstr = [ + "Si111cryo", + "Si220cryo", + "Si111", + "Si220", + "Si113", + "Si224", + "Si331", + "Si400", + "Ge111", + "Ge220", + "Ge113", + "Ge224", + "Ge331", + "Ge620", + "Ge531", + "Ge400", + "Ge115", + "Ge335", + "Ge440", + "Ge444", + "Ge333", + "C111", + "C220", + ] + dspace = np.array( + [ + 3.13379852, + 1.91905183, + 3.13542, + 1.92004, + 1.63742, + 1.10854, + 1.24589, + 1.35767, + 3.26627, + 2.00018, + 1.70576, + 1.15480, + 1.29789, + 0.89451, + 0.95627, + 1.41434, + 1.08876, + 0.86274, + 1.00009, + 0.81657, + 1.08876, + 2.05929, + 1.26105, + ] + ) + Irel = np.array( + [ + 100, + 67.80, + 40.50, + 23.80, + 16.60, + 10.90, + 100, + 73.80, + 44.10, + 23.10, + 17.00, + 15.90, + 15.70, + 11.50, + 9.80, + 8.50, + 8.20, + 7.30, + 3.30, + 100, + 39.00, + ] + ) + # constants needed: + hPlank = 6.62606876e-34 + cvac = 2.99792458e8 + Qelectron = 1.602176463e-19 + + if isinstance( + reflection, str + ): # and all(isinstance(E, (int, long, float, complex)) for item in [E,E]): # <- bug in python: check for E is numeric works in standalone function, but not in this package => don't check + theta_Bragg = np.array(theta_Bragg) + if reflection in reflstr or d_spacing: + if not d_spacing: + ind = reflstr.index(reflection) + # print reflstr[ind] +': d_{hkl}=' + "%3.4f" %dspace[ind] +'A I/I_o='+ "%3.4f" %Irel[ind] + ds = [] + I = [] + for l in range(0, np.size(theta_Bragg)): + ds.append(dspace[ind]) + I.append(Irel[ind]) + else: + # ds = [d_spacing] * len(theta_Bragg) #will raise error if theta_Bragg is a scalar + ds = [d_spacing] * np.size(theta_Bragg) + dspace = np.array(ds) + lam = 2 * dspace * np.sin(theta_Bragg / 180 * np.pi) + EthetaB = hPlank * cvac / (lam * Qelectron) * 1e7 + return EthetaB.T + elif reflection == "reflections?": + print("List of available reflections (T=25C):") + print(reflstr) + else: + print( + 'error: reflection not found. Type get_EBragg("reflections?") for list of available reflections.' + ) + + else: + print( + "error: reflection has to be a string and E needs to be numeric. Type get_Bragg? for help" + ) + + +def get_Qpixel(center=[0, 0], cord=[100, 100], dis=1000, ps=75, E=8): + """ + written by LW 13/03/2008. + calculates Q [1/A] value for SAXS experiments from detector position, + input parameters: position of the direct beam [x,y] in pixel numbers, pixel coordinate to calculate Q [x y], + sample detector distance in mm, pixel size [um], X-ray energy in keV. + function is vector compatible for parameters dis OR ps OR E. + calling sequence: get_Qpixel([xc,yc],[x,y],dis,ps,E) + """ + center = np.array(center) + cord = np.array(cord) + dis = np.array(dis) + ps = np.array(ps) + E = np.array(E) + lam = get_Lambda(E, "A") + xc = center[0] + yc = center[1] + x = cord[0] + y = cord[1] + ps = ps / 1000 # convert pixel size to mm + # real space distance on the CCD [mm]: + u = np.sqrt(((x - xc) ** 2 + (y - yc) ** 2)) * ps + # wavevector transfer [1/A] + return 4 * np.pi / lam * np.sin(np.arctan(u / dis / 2)) + def get_2theta(Qxy, E=8): """ by LW 16/06/2005 - Returns the 2theta angle [deg] as a function of the input prameters Qxy [1/A] and Energy. + Returns the 2theta angle [deg] as a function of the input parameters Qxy [1/A] and Energy. Type get_2theta(Qxy [1/A], E [keV] (default: 8keV) ). This function is vector compatible. """ - Qxy=np.array(Qxy);E=np.array(E) - lam=get_Lambda(E,'A') - return np.degrees(2*np.arcsin(Qxy/(4*np.pi)*lam)) + Qxy = np.array(Qxy) + E = np.array(E) + lam = get_Lambda(E, "A") + return np.degrees(2 * np.arcsin(Qxy / (4 * np.pi) * lam)) -def get_Gam(Qz,alpha_i=.12,E=8): - hlpstr="by sof 01/12/2003 Returns the angle Gamma (out-of-plane angle in GIXD) defined in terms of Qz, the incident angle and the energy of the X-Ray beam. Type getGam(Qz [1/A], Incident Angle [deg] (default: 0.12deg) , E[keV] (default:8keV)), This function is vector compatible. Type get_Gam(\'?\') for help" - if Qz=='?': - print (hlpstr) + +def get_Game(Qz, alpha_i=0.12, E=8): + hlpstr = "by sof 01/12/2003 Returns the angle Gamma (out-of-plane angle in GIXD) defined in terms of Qz, the incident angle and the energy of the X-Ray beam. Type getGame(Qz [1/A], Incident Angle [deg] (default: 0.12deg) , E[keV] (default:8keV)), This function is vector compatible. Type get_Game('?') for help" + if Qz == "?": + print(hlpstr) else: - Qz=np.array(Qz);alpha_i=np.array(alpha_i);E=np.array(E) - lam=get_Lambda(E,'A') - return np.degrees(lam*Qz/(2*np.pi)-np.sin(alpha_i)) - -def get_Qll(Ty,alpha_i=.12,d=1000,E=8): - hlpstr="LW 26-01-2005 Function returns the parallel wavevector transfer in GI-XPCS geometry [cm^-1]. Type: get_Qll(Ty[mm],alpha_i[deg] (default: 0.12deg), d [mm] (default: 1000mm),E[keV] (default:8keV))=>qll[cm^-1]; type get_Qll(\'?\') for help" - if Ty=='?': - print ( hlpstr) + Qz = np.array(Qz) + alpha_i = np.array(alpha_i) + E = np.array(E) + lam = get_Lambda(E, "A") + return np.degrees(lam * Qz / (2 * np.pi) - np.sin(alpha_i)) + + +def get_Qll(Ty, alpha_i=0.12, d=1000, E=8): + hlpstr = "LW 26-01-2005 Function returns the parallel wavevector transfer in GI-XPCS geometry [cm^-1]. Type: get_Qll(Ty[mm],alpha_i[deg] (default: 0.12deg), d [mm] (default: 1000mm),E[keV] (default:8keV))=>qll[cm^-1]; type get_Qll('?') for help" + if Ty == "?": + print(hlpstr) else: - Ty=np.array(Ty);alpha_i=np.array(alpha_i);E=np.array(E) - lam=get_Lambda(E,'A') - af=np.arctan((d*np.tan(np.deg2rad(alpha_i))+Ty)/d) - return (2*np.pi/lam)*(np.cos(np.deg2rad(alpha_i))-np.cos(af))*1e8 - -def get_Qxy(theta,E=8): - hlpstr="sof 26/11/2003 To return for in plane X-ray scattering the parallel wavevector transfer in 1/A. Type get_Qxy(FULL scattering angle [deg], E [keV] (default:8keV), the function returns the Q vector [1/A] in the surface plane. Angle measured in surface from specular direction! This function is vector compatible. Type get_QXY(\'?\') for help." - if theta=='?': - print (hlpstr) + Ty = np.array(Ty) + alpha_i = np.array(alpha_i) + E = np.array(E) + lam = get_Lambda(E, "A") + af = np.arctan((d * np.tan(np.deg2rad(alpha_i)) + Ty) / d) + return (2 * np.pi / lam) * (np.cos(np.deg2rad(alpha_i)) - np.cos(af)) * 1e8 + + +def get_Qxy(theta, E=8): + hlpstr = "sof 26/11/2003 To return for in plane X-ray scattering the parallel wavevector transfer in 1/A. Type get_Qxy(FULL scattering angle [deg], E [keV] (default:8keV), the function returns the Q vector [1/A] in the surface plane. Angle measured in surface from specular direction! This function is vector compatible. Type get_QXY('?') for help." + if theta == "?": + print(hlpstr) else: - theta=np.array(theta);E=np.array(E) - lam=get_Lambda(E,'A') - return 4*np.pi*np.sin(np.deg2rad(theta)/2)/lam - -def get_Qz(Gam,alpha_i=.12,E=8): - hlpstr="function by sof 01/12/2003 Returns the Qz z-component of wavevector transfer defined in terms of the incident and measured angles and the energy of the X-Ray beam. Type get_Qz(Gam [deg], alpha_i [deg] (default: 0.12deg), E[keV] (default: 8keV)). This function is vector compatible. Type get_Qz(\'?\') for help." - if Gam=='?': - print (hlpstr) + theta = np.array(theta) + E = np.array(E) + lam = get_Lambda(E, "A") + return 4 * np.pi * np.sin(np.deg2rad(theta) / 2) / lam + + +def get_Qz(Game, alpha_i=0.12, E=8): + hlpstr = "function by sof 01/12/2003 Returns the Qz z-component of wavevector transfer defined in terms of the incident and measured angles and the energy of the X-Ray beam. Type get_Qz(Game [deg], alpha_i [deg] (default: 0.12deg), E[keV] (default: 8keV)). This function is vector compatible. Type get_Qz('?') for help." + if Game == "?": + print(hlpstr) else: - E=np.array(E);Gam=np.deg2rad(np.array(Gam));alpha_i=np.deg2rad(np.array(alpha_i)) - lam=get_Lambda(E,'A') - return 2*np.pi*(np.sin(Gam)+np.sin(alpha_i))/lam + E = np.array(E) + Game = np.deg2rad(np.array(Game)) + alpha_i = np.deg2rad(np.array(alpha_i)) + lam = get_Lambda(E, "A") + return 2 * np.pi * (np.sin(Game) + np.sin(alpha_i)) / lam -def ch2deg(chn,channel_per_degree=100,chOff=0): + +def ch2deg(chn, channel_per_degree=100, chOff=0): """ LW 08-07-2005 function converts the PSD channel (chn) into angles [deg], @@ -342,100 +622,142 @@ def ch2deg(chn,channel_per_degree=100,chOff=0): Calling sequence: ch2deg(chan,channel_per_degree (default: 100),chOff (default=0) -> corresponding angle [deg]. Function is vector compatible. """ - chn=np.array(chn);channel_per_degree=np.array(channel_per_degree);chOff=np.array(chOff) - return (chn-chOff)/channel_per_degree + chn = np.array(chn) + channel_per_degree = np.array(channel_per_degree) + chOff = np.array(chOff) + return (chn - chOff) / channel_per_degree + -def get_Vineyard(Qz,E=8,ra0=1): +def get_Vineyard(Qz, E=8, ra0=1): """ by LW 20/07/2006 function calculates the Vineyard (transmission) Function for a given (range) of Qz(s) and given X-ray energy and scattering length density ra0 of the subphase media. Type: getVineyard(Qz [A], E [keV] (default: 8keV), ra0 [SI] (default: 1 ). """ - lam=get_Lambda(E,'A') - Qc=4*np.pi*np.sin(np.sqrt(np.pi*ra0)*lam/np.pi)/lam - rx=Qz/Qc - V=np.ones(np.size(Qz)) - for i in range(0,np.size(Qz)): - if rx[i]<=1: - V[i]=2*rx[i] - else: V[i]=2*rx[i]/(rx[i]+np.sqrt(rx[i]**2-1)) + lam = get_Lambda(E, "A") + Qc = 4 * np.pi * np.sin(np.sqrt(np.pi * ra0) * lam / np.pi) / lam + rx = Qz / Qc + V = np.ones(np.size(Qz)) + for i in range(0, np.size(Qz)): + if rx[i] <= 1: + V[i] = 2 * rx[i] + else: + V[i] = 2 * rx[i] / (rx[i] + np.sqrt(rx[i] ** 2 - 1)) return V -def get_gap(E,harmonic=3,ID=default_id): + +def get_gap(E, harmonic=3, ID=default_id): """ by LW 12/03/2014, function calculates the undulator gap for a requested energy and harmonic based on magnetic measurement data in the database type get_gap(E [kev], harmonic [integer] (default=3), id (default: defined by 'default_id')); E can be an array of energies. Type get_gap\"ID?\") for a list of available magnetic datasets. - + Change gap unit from mm to um by Yugang at 2019/April - + """ - #get list_of available magnetic measurements from data file directory: - name=[] + # get list_of available magnetic measurements from data file directory: + name = [] for i in range(len(xdatafiles)): - m=re.search('(?<=id_)\w+', xdatafiles[i]) + m = re.search("(?<=id_)\w+", xdatafiles[i]) if m is not None: name.append(m.group(0)) - if E=='ID?': - print ('list of available magnetic measurements (based on data files in directory {}:'.format(str(datapath))) - print (name) + if E == "ID?": + print( + "list of available magnetic measurements (based on data files in directory {}:".format( + str(datapath) + ) + ) + print(name) else: - E=np.array(E)*1.0 + E = np.array(E) * 1.0 harm_check(harmonic) if ID in name: - loadn = Path(datapath) / Path('id_{}.dat'.format(ID)) - magdat = pl.loadtxt(loadn, comments='%') - #harmonic=harmonic*1.0 - if np.min(E/harmonic)>=np.min(magdat[:,2]) and np.max(E/harmonic)<=np.max(magdat[:,2]): - gap=np.interp(E/harmonic,magdat[:,2],magdat[:,0]) + loadn = Path(datapath) / Path("id_{}.dat".format(ID)) + magdat = pl.loadtxt(loadn, comments="%") + # harmonic=harmonic*1.0 + if np.min(E / harmonic) >= np.min(magdat[:, 2]) and np.max( + E / harmonic + ) <= np.max(magdat[:, 2]): + gap = np.interp(E / harmonic, magdat[:, 2], magdat[:, 0]) return gap * 1000.0 # this else should be a warning only and should return NaN - else: raise xfuncs_Exception ('error: energy '+"%3.4f" %E +'[keV] out of range for requested harmonic number, gap limit: ('+"%3.4f" % np.min(magdat[:,0])+'==np.min(magdat[:,0]) and np.max(gap)<=np.max(magdat[:,0]): - Es=np.interp(gap,magdat[:,0],magdat[:,2]) - Eharmonics=np.array([harmonic,harmonic*Es]) - return Eharmonics.T - else: print ('error: gap '+"%3.4f" % gap +'[mm] out of range for gap ('+"%3.4f" % np.min(magdat[:,0])+'== np.min(magdat[:, 0]) and np.max(gap) <= np.max( + magdat[:, 0] + ): + Es = np.interp(gap, magdat[:, 0], magdat[:, 2]) + Eharmonics = np.array([harmonic, harmonic * Es]) + return Eharmonics.T + else: + print( + "error: gap " + + "%3.4f" % gap + + "[mm] out of range for gap (" + + "%3.4f" % np.min(magdat[:, 0]) + + "= xmax or np.min(Energy) xmax or np.min(Energy) < xmin: + raise xfuncs_Exception( + [ + "X-ray energy out of range for cross section data. ", + xmin, + "<=Energy<=,", + xmax, + " [keV]", + ] + ) # calculate photo current PER 1E10 ph/s: - PhCur=1E10*crossdat[:,0]*1e3*1.6022e-16*(1-np.exp(-crossdat[:,1]*thickness*rho_Si))/epsilon - PhCurint=np.interp(Energy,crossdat[:,0]*1E3,PhCur) - photon_flux=current/PhCurint*1E10 - print ('photo current for E= ',Energy,'keV: ',PhCurint*1E3,'mA/10^10ph/s') - print ('flux for photo current ',current*1E3,'mA at E=',Energy,'keV: ',photon_flux,'ph/s') + PhCur = ( + 1e10 + * crossdat[:, 0] + * 1e3 + * 1.6022e-16 + * (1 - np.exp(-crossdat[:, 1] * thickness * rho_Si)) + / epsilon + ) + PhCurint = np.interp(Energy, crossdat[:, 0] * 1e3, PhCur) + photon_flux = current / PhCurint * 1e10 + print("photo current for E= ", Energy, "keV: ", PhCurint * 1e3, "mA/10^10ph/s") + print( + "flux for photo current ", + current * 1e3, + "mA at E=", + Energy, + "keV: ", + photon_flux, + "ph/s", + ) return photon_flux + ####### Help functions class xfuncs_Exception(Exception): pass @@ -478,6 +824,7 @@ class xfuncs_Exception(Exception): class to raise xfuncs specific exceptions """ + def harm_check(harm_n): """ by LW 03/19/2015 @@ -486,21 +833,22 @@ def harm_check(harm_n): calling sequence: harm_check(harm_n), where harm_n is a single value or list of values function raises an exception is harm_n does contain values other than integers >=1 """ - state=True + state = True - if np.all(np.array(harm_n)>=1)!=True: - raise xfuncs_Exception("Type Error: Harmonic numbers need to be >=1!") + if np.all(np.array(harm_n) >= 1) != True: + raise xfuncs_Exception("Type Error: Harmonic numbers need to be >=1!") try: - list(harm_n) # b is the list to be checked + list(harm_n) # b is the list to be checked for i in list(harm_n): - #print isinstance(i,int) - state=state*isinstance(i,int) - except: # check for b being a single integer + # print isinstance(i,int) + state = state * isinstance(i, int) + except: # check for b being a single integer try: - state=state*isinstance(harm_n, int) - #print isinstance(harm_n, int) - except: pass + state = state * isinstance(harm_n, int) + # print isinstance(harm_n, int) + except: + pass - if state !=True: + if state != True: raise xfuncs_Exception("Type Error: Harmonic numbers need to be integers >=1!") diff --git a/pixi.toml b/pixi.toml index 4f0e78e..9371987 100644 --- a/pixi.toml +++ b/pixi.toml @@ -41,4 +41,4 @@ isort-fix = "isort ." [environments] terminal = {features=["base", "terminal"], solve-group="profile"} -dev = {features=["dev"], no-default-feature = true} \ No newline at end of file +dev = {features=["dev"], no-default-feature = true} diff --git a/requirements_backup.txt b/requirements_backup.txt index b8841b3..8d5ba5f 100644 --- a/requirements_backup.txt +++ b/requirements_backup.txt @@ -1,6 +1,7 @@ #atom #cython databroker +eiger-io #enaml #fabio #h5py @@ -14,4 +15,3 @@ pymongo scikit-beam scikit-image scipy -eiger-io diff --git a/setup.py b/setup.py index d4e2e08..6c83807 100644 --- a/setup.py +++ b/setup.py @@ -1,28 +1,26 @@ #!/usr/bin/env python -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import, division, print_function -import sys -import warnings import versioneer from setuptools import setup, find_packages no_git_reqs = [] -with open('requirements.txt') as f: +with open("requirements.txt") as f: required = f.read().splitlines() for r in required: - if not (r.startswith('git') or r.startswith('#') or r.strip() == ''): + if not (r.startswith("git") or r.startswith("#") or r.strip() == ""): no_git_reqs.append(r) setup( - name='chxtools', + name="chxtools", version=versioneer.get_version(), cmdclass=versioneer.get_cmdclass(), - author='Brookhaven National Laboratory', - long_description=open('README.md').read(), - long_description_content_type='text/markdown', + author="Brookhaven National Laboratory", + long_description=open("README.md").read(), + long_description_content_type="text/markdown", packages=find_packages(), - package_data={'chxtools': ['X-ray_database/*.dat']}, + package_data={"chxtools": ["X-ray_database/*.dat"]}, include_package_data=True, install_requires=no_git_reqs, ) diff --git a/versioneer.py b/versioneer.py index 5db821a..8403e08 100644 --- a/versioneer.py +++ b/versioneer.py @@ -1,4 +1,3 @@ - # Version: 0.15 """ @@ -340,6 +339,7 @@ """ from __future__ import print_function + try: import configparser except ImportError: @@ -368,11 +368,13 @@ def get_root(): setup_py = os.path.join(root, "setup.py") versioneer_py = os.path.join(root, "versioneer.py") if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)): - err = ("Versioneer was unable to run the project root directory. " - "Versioneer requires setup.py to be executed from " - "its immediate directory (like 'python setup.py COMMAND'), " - "or in a way that lets it use sys.argv[0] to find the root " - "(like 'python path/to/setup.py COMMAND').") + err = ( + "Versioneer was unable to run the project root directory. " + "Versioneer requires setup.py to be executed from " + "its immediate directory (like 'python setup.py COMMAND'), " + "or in a way that lets it use sys.argv[0] to find the root " + "(like 'python path/to/setup.py COMMAND')." + ) raise VersioneerBadRootError(err) try: # Certain runtime workflows (setup.py install/develop in a setuptools @@ -383,8 +385,10 @@ def get_root(): # versioneer.py was first imported, even in later projects. me = os.path.realpath(os.path.abspath(__file__)) if os.path.splitext(me)[0] != os.path.splitext(versioneer_py)[0]: - print("Warning: build in %s is using versioneer.py from %s" - % (os.path.dirname(me), versioneer_py)) + print( + "Warning: build in %s is using versioneer.py from %s" + % (os.path.dirname(me), versioneer_py) + ) except NameError: pass return root @@ -404,6 +408,7 @@ def get(parser, name): if parser.has_option("versioneer", name): return parser.get("versioneer", name) return None + cfg = VersioneerConfig() cfg.VCS = VCS cfg.style = get(parser, "style") or "" @@ -418,6 +423,7 @@ def get(parser, name): class NotThisMethod(Exception): pass + # these dictionaries contain VCS-specific tools LONG_VERSION_PY = {} HANDLERS = {} @@ -429,6 +435,7 @@ def decorate(f): HANDLERS[vcs] = {} HANDLERS[vcs][method] = f return f + return decorate @@ -439,9 +446,12 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False): try: dispcmd = str([c] + args) # remember shell=False, so use git.cmd on windows, not just git - p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE, - stderr=(subprocess.PIPE if hide_stderr - else None)) + p = subprocess.Popen( + [c] + args, + cwd=cwd, + stdout=subprocess.PIPE, + stderr=(subprocess.PIPE if hide_stderr else None), + ) break except EnvironmentError: e = sys.exc_info()[1] @@ -463,7 +473,9 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False): print("unable to run %s (error)" % dispcmd) return None return stdout -LONG_VERSION_PY['git'] = ''' + + +LONG_VERSION_PY["git"] = """ # This file helps to compute a version number in source trees obtained from # git-archive tarball (such as those provided by githubs download-from-tag # feature). Distribution tarballs (built by setup.py sdist) and build @@ -691,7 +703,7 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): # TAG-NUM-gHEX mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) if not mo: - # unparseable. Maybe git-describe is misbehaving? + # unparsable. Maybe git-describe is misbehaving? pieces["error"] = ("unable to parse git-describe output: '%%s'" %% describe_out) return pieces @@ -923,7 +935,7 @@ def get_versions(): return {"version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to compute version"} -''' +""" @register_vcs_handler("git", "get_keywords") @@ -963,7 +975,7 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose): # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of # just "foo-1.0". If we see a "tag: " prefix, prefer those. TAG = "tag: " - tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)]) + tags = set([r[len(TAG) :] for r in refs if r.startswith(TAG)]) if not tags: # Either we're using git < 1.8.3, or there really are no tags. We use # a heuristic: assume all version tags have a digit. The old git %d @@ -972,27 +984,32 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose): # between branches and tags. By ignoring refnames without digits, we # filter out many common branch names like "release" and # "stabilization", as well as "HEAD" and "master". - tags = set([r for r in refs if re.search(r'\d', r)]) + tags = set([r for r in refs if re.search(r"\d", r)]) if verbose: - print("discarding '%s', no digits" % ",".join(refs-tags)) + print("discarding '%s', no digits" % ",".join(refs - tags)) if verbose: print("likely tags: %s" % ",".join(sorted(tags))) for ref in sorted(tags): # sorting will prefer e.g. "2.0" over "2.0rc1" if ref.startswith(tag_prefix): - r = ref[len(tag_prefix):] + r = ref[len(tag_prefix) :] if verbose: print("picking %s" % r) - return {"version": r, - "full-revisionid": keywords["full"].strip(), - "dirty": False, "error": None - } + return { + "version": r, + "full-revisionid": keywords["full"].strip(), + "dirty": False, + "error": None, + } # no suitable tags, so version is "0+unknown", but full hex is still there if verbose: print("no suitable tags, using unknown + full revision id") - return {"version": "0+unknown", - "full-revisionid": keywords["full"].strip(), - "dirty": False, "error": "no suitable tags"} + return { + "version": "0+unknown", + "full-revisionid": keywords["full"].strip(), + "dirty": False, + "error": "no suitable tags", + } @register_vcs_handler("git", "pieces_from_vcs") @@ -1012,9 +1029,9 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): GITS = ["git.cmd", "git.exe"] # if there is a tag, this yields TAG-NUM-gHEX[-dirty] # if there are no tags, this yields HEX[-dirty] (no NUM) - describe_out = run_command(GITS, ["describe", "--tags", "--dirty", - "--always", "--long"], - cwd=root) + describe_out = run_command( + GITS, ["describe", "--tags", "--dirty", "--always", "--long"], cwd=root + ) # --long was added in git-1.5.5 if describe_out is None: raise NotThisMethod("'git describe' failed") @@ -1037,17 +1054,16 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): dirty = git_describe.endswith("-dirty") pieces["dirty"] = dirty if dirty: - git_describe = git_describe[:git_describe.rindex("-dirty")] + git_describe = git_describe[: git_describe.rindex("-dirty")] # now we have TAG-NUM-gHEX or HEX if "-" in git_describe: # TAG-NUM-gHEX - mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) + mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe) if not mo: - # unparseable. Maybe git-describe is misbehaving? - pieces["error"] = ("unable to parse git-describe output: '%s'" - % describe_out) + # unparsable. Maybe git-describe is misbehaving? + pieces["error"] = "unable to parse git-describe output: '%s'" % describe_out return pieces # tag @@ -1056,10 +1072,12 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): if verbose: fmt = "tag '%s' doesn't start with prefix '%s'" print(fmt % (full_tag, tag_prefix)) - pieces["error"] = ("tag '%s' doesn't start with prefix '%s'" - % (full_tag, tag_prefix)) + pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % ( + full_tag, + tag_prefix, + ) return pieces - pieces["closest-tag"] = full_tag[len(tag_prefix):] + pieces["closest-tag"] = full_tag[len(tag_prefix) :] # distance: number of commits since tag pieces["distance"] = int(mo.group(2)) @@ -1070,8 +1088,7 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): else: # HEX: no tags pieces["closest-tag"] = None - count_out = run_command(GITS, ["rev-list", "HEAD", "--count"], - cwd=root) + count_out = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root) pieces["distance"] = int(count_out) # total number of commits return pieces @@ -1116,12 +1133,18 @@ def versions_from_parentdir(parentdir_prefix, root, verbose): dirname = os.path.basename(root) if not dirname.startswith(parentdir_prefix): if verbose: - print("guessing rootdir is '%s', but '%s' doesn't start with " - "prefix '%s'" % (root, dirname, parentdir_prefix)) + print( + "guessing rootdir is '%s', but '%s' doesn't start with " + "prefix '%s'" % (root, dirname, parentdir_prefix) + ) raise NotThisMethod("rootdir doesn't start with parentdir_prefix") - return {"version": dirname[len(parentdir_prefix):], - "full-revisionid": None, - "dirty": False, "error": None} + return { + "version": dirname[len(parentdir_prefix) :], + "full-revisionid": None, + "dirty": False, + "error": None, + } + SHORT_VERSION_PY = """ # This file was generated by 'versioneer.py' (0.15) from @@ -1148,8 +1171,9 @@ def versions_from_file(filename): contents = f.read() except EnvironmentError: raise NotThisMethod("unable to read _version.py") - mo = re.search(r"version_json = '''\n(.*)''' # END VERSION_JSON", - contents, re.M | re.S) + mo = re.search( + r"version_json = '''\n(.*)''' # END VERSION_JSON", contents, re.M | re.S + ) if not mo: raise NotThisMethod("no version_json in _version.py") return json.loads(mo.group(1)) @@ -1157,8 +1181,7 @@ def versions_from_file(filename): def write_to_version_file(filename, versions): os.unlink(filename) - contents = json.dumps(versions, sort_keys=True, - indent=1, separators=(",", ": ")) + contents = json.dumps(versions, sort_keys=True, indent=1, separators=(",", ": ")) with open(filename, "w") as f: f.write(SHORT_VERSION_PY % contents) @@ -1188,8 +1211,7 @@ def render_pep440(pieces): rendered += ".dirty" else: # exception #1 - rendered = "0+untagged.%d.g%s" % (pieces["distance"], - pieces["short"]) + rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" return rendered @@ -1296,10 +1318,12 @@ def render_git_describe_long(pieces): def render(pieces, style): if pieces["error"]: - return {"version": "unknown", - "full-revisionid": pieces.get("long"), - "dirty": None, - "error": pieces["error"]} + return { + "version": "unknown", + "full-revisionid": pieces.get("long"), + "dirty": None, + "error": pieces["error"], + } if not style or style == "default": style = "pep440" # the default @@ -1319,8 +1343,12 @@ def render(pieces, style): else: raise ValueError("unknown style '%s'" % style) - return {"version": rendered, "full-revisionid": pieces["long"], - "dirty": pieces["dirty"], "error": None} + return { + "version": rendered, + "full-revisionid": pieces["long"], + "dirty": pieces["dirty"], + "error": None, + } class VersioneerBadRootError(Exception): @@ -1341,8 +1369,9 @@ def get_versions(verbose=False): handlers = HANDLERS.get(cfg.VCS) assert handlers, "unrecognized VCS '%s'" % cfg.VCS verbose = verbose or cfg.verbose - assert cfg.versionfile_source is not None, \ + assert cfg.versionfile_source is not None, ( "please set versioneer.versionfile_source" + ) assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix" versionfile_abs = os.path.join(root, cfg.versionfile_source) @@ -1396,8 +1425,12 @@ def get_versions(verbose=False): if verbose: print("unable to compute version") - return {"version": "0+unknown", "full-revisionid": None, - "dirty": None, "error": "unable to compute version"} + return { + "version": "0+unknown", + "full-revisionid": None, + "dirty": None, + "error": "unable to compute version", + } def get_version(): @@ -1443,6 +1476,7 @@ def run(self): print(" dirty: %s" % vers.get("dirty")) if vers["error"]: print(" error: %s" % vers["error"]) + cmds["version"] = cmd_version # we override "build_py" in both distutils and setuptools @@ -1466,10 +1500,10 @@ def run(self): # now locate _version.py in the new build/ directory and replace # it with an updated value if cfg.versionfile_build: - target_versionfile = os.path.join(self.build_lib, - cfg.versionfile_build) + target_versionfile = os.path.join(self.build_lib, cfg.versionfile_build) print("UPDATING %s" % target_versionfile) write_to_version_file(target_versionfile, versions) + cmds["build_py"] = cmd_build_py if "cx_Freeze" in sys.modules: # cx_freeze enabled? @@ -1488,13 +1522,17 @@ def run(self): os.unlink(target_versionfile) with open(cfg.versionfile_source, "w") as f: LONG = LONG_VERSION_PY[cfg.VCS] - f.write(LONG % - {"DOLLAR": "$", - "STYLE": cfg.style, - "TAG_PREFIX": cfg.tag_prefix, - "PARENTDIR_PREFIX": cfg.parentdir_prefix, - "VERSIONFILE_SOURCE": cfg.versionfile_source, - }) + f.write( + LONG + % { + "DOLLAR": "$", + "STYLE": cfg.style, + "TAG_PREFIX": cfg.tag_prefix, + "PARENTDIR_PREFIX": cfg.parentdir_prefix, + "VERSIONFILE_SOURCE": cfg.versionfile_source, + } + ) + cmds["build_exe"] = cmd_build_exe del cmds["build_py"] @@ -1522,8 +1560,10 @@ def make_release_tree(self, base_dir, files): # updated value target_versionfile = os.path.join(base_dir, cfg.versionfile_source) print("UPDATING %s" % target_versionfile) - write_to_version_file(target_versionfile, - self._versioneer_generated_versions) + write_to_version_file( + target_versionfile, self._versioneer_generated_versions + ) + cmds["sdist"] = cmd_sdist return cmds @@ -1577,11 +1617,13 @@ def do_setup(): root = get_root() try: cfg = get_config_from_root(root) - except (EnvironmentError, configparser.NoSectionError, - configparser.NoOptionError) as e: + except ( + EnvironmentError, + configparser.NoSectionError, + configparser.NoOptionError, + ) as e: if isinstance(e, (EnvironmentError, configparser.NoSectionError)): - print("Adding sample versioneer config to setup.cfg", - file=sys.stderr) + print("Adding sample versioneer config to setup.cfg", file=sys.stderr) with open(os.path.join(root, "setup.cfg"), "a") as f: f.write(SAMPLE_CONFIG) print(CONFIG_ERROR, file=sys.stderr) @@ -1590,15 +1632,18 @@ def do_setup(): print(" creating %s" % cfg.versionfile_source) with open(cfg.versionfile_source, "w") as f: LONG = LONG_VERSION_PY[cfg.VCS] - f.write(LONG % {"DOLLAR": "$", - "STYLE": cfg.style, - "TAG_PREFIX": cfg.tag_prefix, - "PARENTDIR_PREFIX": cfg.parentdir_prefix, - "VERSIONFILE_SOURCE": cfg.versionfile_source, - }) - - ipy = os.path.join(os.path.dirname(cfg.versionfile_source), - "__init__.py") + f.write( + LONG + % { + "DOLLAR": "$", + "STYLE": cfg.style, + "TAG_PREFIX": cfg.tag_prefix, + "PARENTDIR_PREFIX": cfg.parentdir_prefix, + "VERSIONFILE_SOURCE": cfg.versionfile_source, + } + ) + + ipy = os.path.join(os.path.dirname(cfg.versionfile_source), "__init__.py") if os.path.exists(ipy): try: with open(ipy, "r") as f: @@ -1640,8 +1685,10 @@ def do_setup(): else: print(" 'versioneer.py' already in MANIFEST.in") if cfg.versionfile_source not in simple_includes: - print(" appending versionfile_source ('%s') to MANIFEST.in" % - cfg.versionfile_source) + print( + " appending versionfile_source ('%s') to MANIFEST.in" + % cfg.versionfile_source + ) with open(manifest_in, "a") as f: f.write("include %s\n" % cfg.versionfile_source) else: @@ -1689,6 +1736,7 @@ def scan_setup_py(): errors += 1 return errors + if __name__ == "__main__": cmd = sys.argv[1] if cmd == "setup": From 631b8e5ed69038e3ff3e8b06d499a56bd725d8e9 Mon Sep 17 00:00:00 2001 From: jennmald Date: Thu, 19 Mar 2026 15:15:48 -0400 Subject: [PATCH 4/7] fix ruff formatting for xfuncs --- .pre-commit-config.yaml | 2 +- chxtools/xfuncs.py | 64 +++++++++++++++++++---------------------- 2 files changed, 31 insertions(+), 35 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 26e53c4..c449a03 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -93,4 +93,4 @@ repos: language: python entry: cog -P -r -I ./helpers files: "^docs/pages/guides/(packaging_compiled|docs|tasks|gha_basic).md|^copier.yml|^docs/_includes/pyproject.md" - additional_dependencies: [cogapp, cookiecutter, tomlkit] \ No newline at end of file + additional_dependencies: [cogapp, cookiecutter, tomlkit] diff --git a/chxtools/xfuncs.py b/chxtools/xfuncs.py index 2bcb1a0..347b34f 100644 --- a/chxtools/xfuncs.py +++ b/chxtools/xfuncs.py @@ -45,25 +45,25 @@ def get_Lambda(E, u="SI"): Qelectron = 1.602176463e-19 scale = 1 # l=hPlank*cvac/(E*1000*Qelectron) - l = hPlank * cvac / (E * 1000 * Qelectron) + wavelength = hPlank * cvac / (E * 1000 * Qelectron) if u == "A": scale = 1e10 - return l * scale # Angstroem + return wavelength * scale # Angstroem elif u == "nm": scale = 1e9 - return l * scale # nm + return wavelength * scale # nm elif u == "um": scale = 1e6 - return l * scale # um + return wavelength * scale # um elif u == "mm": scale = 1e3 - return l * scale # mm + return wavelength * scale # mm elif u == "cm": scale = 1e2 - return l * scale # cm + return wavelength * scale # cm elif u == "m" or u == "SI": scale = 1 - return l * scale + return wavelength * scale else: print( "invalid option, type \"get_Lambda('?')\" for available options and syntax" @@ -222,7 +222,7 @@ def get_mu(material, E=8): ) -def get_T(material, E=8, l=1): +def get_T(material, E=8, thickness=1): """ by LW 10/03/2010, function calculates the transmission as a function of the material and the X-ray energy according to e^(-mul), @@ -241,9 +241,9 @@ def get_T(material, E=8, l=1): name.append(m.group(0)) E = np.array(E) - l = np.array(l) - # if len(E)==1 or len(l)==1: - if E.size == 1 or l.size == 1: + thickness = np.array(thickness) + # if len(E)==1 or len(thickness)==1: + if E.size == 1 or thickness.size == 1: if material in name: loadn = Path(datapath) / Path("n_{}.dat".format(material)) n = pl.loadtxt(loadn, comments="%") @@ -252,7 +252,7 @@ def get_T(material, E=8, l=1): ): b = np.interp(E * 1000, n[:, 0], n[:, 2]) mu = 4 * np.pi / get_Lambda(E, "um") * b - return np.exp(-mu * l) + return np.exp(-mu * thickness) else: print( "error: energy " @@ -382,11 +382,11 @@ def get_Bragg(reflection, E=8.0): ) theta = np.degrees(np.arcsin(lam / 2 / dspace[ind])) ds = [] - I = [] - for l in range(0, np.size(theta)): + I_list = [] + for _ in range(0, np.size(theta)): ds.append(dspace[ind]) - I.append(Irel[ind]) - res = np.array([np.array([theta]), np.array(ds), np.array(I)])[0] + I_list.append(Irel[ind]) + res = np.array([np.array([theta]), np.array(ds), np.array(I_list)])[0] return res.T elif reflection == "reflections?": print("List of available reflections (T=25C):") @@ -502,10 +502,10 @@ def get_EBragg(reflection, theta_Bragg=12.0, d_spacing=None): ind = reflstr.index(reflection) # print reflstr[ind] +': d_{hkl}=' + "%3.4f" %dspace[ind] +'A I/I_o='+ "%3.4f" %Irel[ind] ds = [] - I = [] - for l in range(0, np.size(theta_Bragg)): + I_list = [] + for _ in range(0, np.size(theta_Bragg)): ds.append(dspace[ind]) - I.append(Irel[ind]) + I_list.append(Irel[ind]) else: # ds = [d_spacing] * len(theta_Bragg) #will raise error if theta_Bragg is a scalar ds = [d_spacing] * np.size(theta_Bragg) @@ -727,9 +727,9 @@ def get_Es(gap, harmonic=[1, 2, 3, 4, 5], ID=default_id): ) print(name) else: - for l in range(0, np.size(harmonic)): + for idx in range(0, np.size(harmonic)): harm_check(harmonic) - if l == np.size(harmonic) - 1: + if idx == np.size(harmonic) - 1: gap = np.array(gap) if ID in name: loadn = Path(datapath) / Path("id_{}.dat".format(ID)) @@ -835,20 +835,16 @@ def harm_check(harm_n): """ state = True - if np.all(np.array(harm_n) >= 1) != True: + if not np.all(np.array(harm_n) >= 1): raise xfuncs_Exception("Type Error: Harmonic numbers need to be >=1!") try: - list(harm_n) # b is the list to be checked - for i in list(harm_n): - # print isinstance(i,int) - state = state * isinstance(i, int) - except: # check for b being a single integer - try: - state = state * isinstance(harm_n, int) - # print isinstance(harm_n, int) - except: - pass - - if state != True: + for i in harm_n: + # Check all values when an iterable is provided. + state = state and isinstance(i, int) + except TypeError: + # Scalar input is valid if it is a single integer. + state = state and isinstance(harm_n, int) + + if not state: raise xfuncs_Exception("Type Error: Harmonic numbers need to be integers >=1!") From 1af6cb1b68ae2fc2fa93beb86a38596045e7498d Mon Sep 17 00:00:00 2001 From: jennmald Date: Thu, 2 Apr 2026 10:39:39 -0400 Subject: [PATCH 5/7] updates to chx_utilites, chx_wrapper, modelm and transfuncs --- chxtools/chx_utilities.py | 128 +++++----------------------------- chxtools/chx_wrapper.py | 63 +++++++++-------- chxtools/pv_explorer/model.py | 2 +- chxtools/transfuncs.py | 26 +++---- 4 files changed, 64 insertions(+), 155 deletions(-) diff --git a/chxtools/chx_utilities.py b/chxtools/chx_utilities.py index b2ab95b..ad63b23 100644 --- a/chxtools/chx_utilities.py +++ b/chxtools/chx_utilities.py @@ -9,6 +9,7 @@ from pathlib import Path from pkg_resources import resource_filename as rs_fn +from numpy import mean ############ @@ -193,7 +194,7 @@ def dcm_roll(Bragg, offset, distance, offmode="mm", pixsize=5.0): else: try: d = float(distance) - except: + except Exception: raise CHX_utilities_Exception( "Error: distance must be a recognized string or numerical value" ) @@ -262,34 +263,23 @@ def sum_image(filename, firstim=0, lastim=9999): # lastim=30 # maximum number of images to search in automatic mode imcount = firstim - # print 'imcount: ',imcount - # print 'firstim: ',firstim get_out = 0 image_data = 0 while imcount <= lastim: ic = str(imcount) while len(ic) < nlen: # create file number ic = "0" + ic - # ic=str(ic) - # print ic - # assemble the image name to be loaded: file_to_open = fp + fn + "_" + ic + "." + fe try: im = Image.open(file_to_open) - # im.show() print("loaded file: ", file_to_open) - except: + except Exception: print("waiting for file: " + file_to_open) - # plt.close(1) # use the waiting time to plot some intermediate result -> does not work, as python is waiting until the end before printing.... - # plt.figure(1) - # imgplot= plt.imshow(image_data/(imcount-firstim+1),origin='lower') - # imgplot.set_cmap('gray') time.sleep(5) try: im = Image.open(file_to_open) - # im.show() - except: + except Exception: get_out = 1 if get_out == 1: return image_data @@ -308,19 +298,14 @@ def sum_image(filename, firstim=0, lastim=9999): imgplot.set_cmap("gray") else: image_data = image_data + np.array(np.sum(im, axis=2)) - # plt.close(1) - # plt.figure(1) - # plt.imshow(image_data/((imcount-firstim)+1)) imcount = imcount + 1 plt.close( 1 ) # only plot at the end for now: too much overhead to plot every iteration plt.figure(1) - # finalimage=Image.fromarray(image_data/(imcount-firstim+1)) imgplot = plt.imshow(image_data / (imcount - firstim + 1), origin="lower") imgplot.set_cmap("gray") plt.title("summed :" + fp + fn + fnum + "-" + ic) - # finalimage.show() return image_data / (imcount - firstim + 1) @@ -389,7 +374,6 @@ def get_cuts(img, cut=1000, direction="horz"): def show_cut(img, cut=1000, direction="horz", width=10, title=None): - # fig,ax=plt.subplots() img_ = img.copy() if direction == "horz": @@ -445,9 +429,6 @@ def show_img( shape = img.shape dy, dx = shape - # ax.set_xlim( 0,dx) - # ax.set_ylim(0,dy) - if xlim is not None: ax.set_xlim(xlim) if extent is not None: @@ -455,12 +436,10 @@ def show_img( if ylim is None: ylim = [y2, y1] aspect_ = determine_aspect(shape, extent) - # rsx,rsy = [ (x2-x1)/float(dx), (y2-y1)/float(dy)] #rescale x, and rescale y else: aspect_ = None if ylim is None: ylim = [0, dy] - # print ylim ax.set_ylim([ylim[0], ylim[1]]) if not logs: cax = ax.imshow( @@ -509,9 +488,6 @@ def show_img( if ax is None: cbar = fig.colorbar(cax, ticks=[vmin, vmax]) - # if ylim is not None:ax.set_ylim(ylim) - # if xlim is not None:ax.set_xlim(xlim) - if save: if outDir != None: fp = outDir + title + "_.png" @@ -523,7 +499,6 @@ def show_img( def trans_data_to_pd(data, label=None, dtype="list"): - # lists a [ list1, list2...] all the list have the same length from numpy import arange, array import pandas as pd @@ -577,7 +552,6 @@ def plot_pv_values( import pandas.tools.plotting as ptg - # from numpy import arange,array import numpy as np if keys is None: @@ -598,9 +572,7 @@ def plot_pv_values( N_data = N_img + 1 sharex = True - # fig, axs = plt.subplots(N,sharex = True) fig, axs = ptg._subplots(N, sharex=sharex, sharey=False, layout=[N, 1]) - # tf=time axs[0].set_xlim(np.min(time), np.max(time)) for n in range(N0, N): i = n - N0 @@ -611,8 +583,6 @@ def plot_pv_values( width = min([ymax - ymean, ymean - ymin]) ymax_ = ymean + width ymin_ = ymean - width - # print ymean, ymax_, ymin_ - # ylim=[ ymin_ - ymean*.2, ymax_ + ymean*.2] ylim = [ymean - width * 5, ymean + width * 5] else: ylim = ylim_tv[i] @@ -637,8 +607,6 @@ def plot_pv_values( for x in m: x.set_visible(False) - # dd =np.array( [datetime.datetime.fromtimestamp( tf[i] ) - # for i in range( 0, len(tf) )] ) if img is not None: dy, dx = img.shape shape = img.shape @@ -655,7 +623,6 @@ def plot_pv_values( m = axs[N_img].get_xticklabels() for x in m: x.set_visible(False) - # axs[N_img].set_xlim( np.min(time), np.max(time) ) axs[N_img].set_ylabel("size, um", fontsize=24) if data is not None: @@ -681,7 +648,6 @@ def plot_pv_values( axs[n].set_xlim(np.min(time), np.max(time)) - # print trans_tf_to_td( [np.min(time), np.max(time) ]) xt = axs[n].get_xticks() xticks = trans_tf_to_td(xt) axs[n].set_xlabel(xlabel, fontsize=24) @@ -689,8 +655,6 @@ def plot_pv_values( axs[n].set_ylabel(ki, fontsize=24) - # axs[0].set_xlim( np.min(time), np.max(time) ) - def make_wave_data2(x, y): import numpy as np @@ -755,8 +719,6 @@ def get_archive_pvlist_values( "http://ca.cs.nsls2.local:8888/cgi-bin/ArchiveDataServer.cgi" ) - # archiver.scan_archives() - dict_tv = {} N = len(PVs) for i in range(N): @@ -807,8 +769,7 @@ def get_archive_pv_value( archiver = [ Archiver("http://xf11id-ca.cs.nsls2.local/cgi-bin/ArchiveDataServer.cgi") ] - # if scan_archives: - # archiver.scan_archives() + if label[:3] == "Acc": arch = archiver[1] else: @@ -852,17 +813,10 @@ def read_scan(sid, fill=True): muxer = dm.from_events(ev) data = muxer.to_sparse_dataframe() dt = data.time - # print dt - # print dt[0] - # print dt[len(dt)-1] - # data = list( db.fetch_events(hdr)) t1 = datetime.datetime.fromtimestamp(dt[0]).strftime("%Y-%m-%d %H:%M:%S") t2 = datetime.datetime.fromtimestamp(dt[len(dt) - 1]).strftime("%Y-%m-%d %H:%M:%S") - # t1 = dt[0].strftime('%Y-%m-%d %H:%M:%S') - # t2 = dt[len(dt)-1].strftime('%Y-%m-%d %H:%M:%S') - print("the first scan time is: %s" % t1) print("the last scan time is: %s" % t2) start_time = t1 @@ -870,11 +824,6 @@ def read_scan(sid, fill=True): return data, start_time, end_time -# w,t = get_waterfall( data,direction='vert', cuts=[1000],firstim=0, lastim=1200,detector="xray_cam_img_image_lightfield",) - -# show_img(w[1000].T,aspect=1.2, ylim=[700,1400]) - - def get_waterfall( data, direction="horz", @@ -887,17 +836,13 @@ def get_waterfall( import numpy as np imcount = firstim - # im_time=[] waterfall = {} notime = lastim + 1 - firstim in_time = data.time imgs = data[detector] for n in range(0, notime): t = firstim + n - # im_time.append( data[t].time ) dat = imgs[n] - # print t, data[t].time - # dat = data[t].data[detector] for i in cuts: if t == 0: waterfall[i] = [] @@ -923,17 +868,11 @@ def get_img_waterfall( import numpy as np imcount = firstim - # im_time=[] waterfall = {} notime = lastim + 1 - firstim - # in_time = data.time - # imgs = data[detector] for n in range(0, notime): t = firstim + n - # im_time.append( data[t].time ) dat = imgs[n] - # print t, data[t].time - # dat = data[t].data[detector] for i in cuts: if t == 0: waterfall[i] = [] @@ -1030,7 +969,7 @@ def line_focus( # im.show() # im.show() print("loaded file: ", file_to_open) - except: + except Exception: print("waiting for file: " + file_to_open) # plt.close(1) # use the waiting time to plot some intermediate result -> does not work, as python is waiting until the end before printing.... # plt.figure(1) @@ -1044,7 +983,7 @@ def line_focus( ) im_time.append(time.ctime(mtime)) # im.show() - except: + except Exception: get_out = 1 if get_out == 1: return image_data ### THIS NEEDS TO BE CHANGED!!! @@ -1057,12 +996,9 @@ def line_focus( ) # do the analysis im = np.array(im) * 1.0 - # print 'shape of image: ',im.shape - # plt.figure(1) - # plt.imshow(im) try: dat = np.sum(im, axis=2) # sum RGB channels for a color image - except: + except Exception: dat = im for i in cuts: if direction == "horz": @@ -1084,7 +1020,7 @@ def line_focus( coeff, var_matrix = curve_fit( gauss, np.array(xrange(len(cdat))), cdat, p0=p0 ) - except: + except Exception: coeff = [0, 0, 0, 0] baseline.append(coeff[0]) amp.append(coeff[1]) @@ -1103,7 +1039,6 @@ def line_focus( for n in range(0, notime): t = firstim + n - # im_time.append( data[t].time ) im = imgs[t] try: @@ -1149,7 +1084,6 @@ def line_focus( for n in range(0, notime): t = firstim + n - # im_time.append( data[t].time ) im = imgs[:, t] try: dat = np.sum(im, axis=2) # sum RGB channels for a color image @@ -1177,8 +1111,6 @@ def line_focus( center.append(coeff[2]) width.append(coeff[3]) - # df_cen = trans_data_to_pd([center],label=['center'],dtype='list') - return ( np.array(baseline), np.array(amp), @@ -1195,7 +1127,6 @@ def show_fit(data, para, func=gauss, title=None, ax=None): x, y = data x = np.array(x) y = np.array(y) - # print 'here' x0 = np.linspace(x.min(), x.max(), 2000) fit = func(x0, *para) if ax is None: @@ -1204,7 +1135,6 @@ def show_fit(data, para, func=gauss, title=None, ax=None): ax.set_title(title) ax.plot(x, y, "bo") ax.plot(x0, fit, "r", ls="-") - # plt.show() def show_focus_line_fit(waterfall, para, lists=None, title=None): @@ -1213,16 +1143,11 @@ def show_focus_line_fit(waterfall, para, lists=None, title=None): L, t = waterfall.shape if lists is None: lists = [] - # lists.append( [t/4,t/2,3*t/4]) lists.append([t / 5, 2 * t / 5, 3 * t / 5, 4 * t / 5]) - # print lists fig, ax = plt.subplots(len(lists[0])) - # if title is not None:plt.title( title ) - # print title for i, l in enumerate(lists[0]): im = waterfall[:, l] p = para[i : i + 1].values[0] - # print i,l show_fit([range(L), im], p, ax=ax[i], title=title + "_time@_%s" % l) @@ -1255,24 +1180,15 @@ def get_fft(t, y): dx = (t[L - 1] - t[0]) / float(L) tm = 2 * L * dx xs = 1 / tm - # print ts,xs, tm - # fx = xs * np.arange( L ) y = np.concatenate((y, np.zeros(L))) FFT = abs(scipy.fft(y))[:L] freqs = scipy.fftpack.fftfreq(y.size, dx)[:L] # - # FFT = np.fft.fft(y) - # freqs = np.fft.fftfreq(len(y), dx )## - # Find the peak in the coefficients idx = np.argmax(np.abs(FFT)) freq = freqs[idx] freq_in_hertz = abs(freq) # * dx) print("The maximum frequency is: %s" % (freq_in_hertz)) ## - # freqs = trans_data_to_pd([freqs], label=['freqs'],dtype='list')## - # 20*scipy.log10(FFT) - # FFT = trans_data_to_pd([ 20*scipy.log10(FFT) ], label=label,dtype='list') - # FFT = trans_data_to_pd([ (FFT) ], label=label,dtype='list')## return np.array(freqs), np.array(FFT) @@ -1312,7 +1228,6 @@ def plot_line_focus2( fig2, axs2 = ptg._subplots(2, sharex=True, sharey=True, layout=[2, 1]) yc = df_res_.center - # _df_res_ = df_res df_res_["cen_pos"] = pix * (yc - yc.mean()) df_res_.plot( x=td, @@ -1333,12 +1248,9 @@ def plot_line_focus2( yfitted = y - polfit df_res_["cen_fitted"] = (yfitted - np.mean(yfitted)) * pix - # print dd.shape,df_res.cen_fitted.shape i = 0 mean = (polfit - np.mean(polfit)) * pix std = mean.std() - # label=str(str(cuts[i]) +' PV: '+str(round(max(polfit)-min(polfit),2))+ 'um rms: +/-'+str( round(std0,2))+'um' ) - label = str( str(cuts[i]) + " Center: " @@ -1367,8 +1279,6 @@ def plot_line_focus2( label=label, ) - # label=str( str(cuts[i]) +' PV: '+ str(round(max(yfitted)-min(yfitted),2)) + 'um rms: +/-'+str(round(( (yfitted-np.mean(yfitted))*pix ).std(),2))+'um' ) - mean = (yfitted - np.mean(yfitted)) * pix std = mean.std() label = str( @@ -1668,7 +1578,7 @@ def knife_edge(filename, direction="horz", cuts=[1, 2, 3], firstim=0, lastim=1, try: im = Image.open(file_to_open) print("loaded file: ", file_to_open) - except: + except Exception: print("waiting for file: " + file_to_open) # plt.close(1) # use the waiting time to plot some intermediate result -> does not work, as python is waiting until the end before printing.... # plt.figure(1) @@ -1678,7 +1588,7 @@ def knife_edge(filename, direction="horz", cuts=[1, 2, 3], firstim=0, lastim=1, try: im = Image.open(file_to_open) # im.show() - except: + except Exception: get_out = 1 if get_out == 1: return image_data ### THIS NEEDS TO BE CHANGED!!! @@ -1692,7 +1602,7 @@ def knife_edge(filename, direction="horz", cuts=[1, 2, 3], firstim=0, lastim=1, im = np.array(im) * 1.0 try: dat = np.sum(im, axis=2) # sum RGB channels for a color image - except: + except Exception: dat = im if imcount == firstim: plt.close(4) @@ -1839,7 +1749,7 @@ def get_ID_calibration( outFile.write("% K column is a placeholder! \n") outFile.write("% ID gap [mm] K E_1 [keV] \n") print("successfully created output file: {}".format(dat_file)) - except: + except Exception: raise CHX_utilities_Exception("error: could not create output file") ### do the scanning and data fitting, file writing,.... @@ -1872,10 +1782,6 @@ def get_ID_calibration( else: pass print("moving DCM Bragg angle to: ", B_guess, " deg and ID gap to ", i, " mm") - # dcm.b.timeout=1200 #make sure dcm motions don't timeout... - # dcm.en.timeout=1200 - # mov(dcm.b,B_guess) - # mov(ivu_gap,i) print("hurray, made it up to here!") # ascan(dcm.b,float(B_guess-.4),float(B_guess+.4),60) # do the Bragg scan header = db[ @@ -1886,10 +1792,12 @@ def get_ID_calibration( intdat = data.xray_eye1_stats1_total[2:] B = np.array(B) intdat = np.array(intdat) - B = np.array(ss[-1].dcm_b)[ + B = np.array( + ss[-1].dcm_b + )[ # noqa: F821 2: ] # retrieve the data (first data point is often "wrong", so don't use - intdat = np.array(ss[-1].bpm_cam_stats_total1)[2:] + intdat = np.array(ss[-1].bpm_cam_stats_total1)[2:] # noqa: F821 A = np.max(intdat) # initial parameter guess and fitting xc = B[np.argmax(intdat)] w = 0.2 @@ -1917,7 +1825,7 @@ def get_ID_calibration( " ", str(float(xf.get_EBragg(xtal, -coeff[2]) / 5.0)), ) - except: + except Exception: print( "could not evaluate data point for ID gap = ", i, diff --git a/chxtools/chx_wrapper.py b/chxtools/chx_wrapper.py index 274c5ff..c1ba6cc 100644 --- a/chxtools/chx_wrapper.py +++ b/chxtools/chx_wrapper.py @@ -5,6 +5,7 @@ """ from __future__ import print_function +from pyepics import caget, caput def det_select(det): @@ -18,16 +19,16 @@ def det_select(det): calling sequence: det_select(det) """ try: - rm_det = ascan.user_detectors[0].name - ascan.user_detectors.remove(session_mgr[rm_det]) - ascan.default_triggers = [] - ascan.default_detectors = [] + rm_det = ascan.user_detectors[0].name # noqa: F821 + ascan.user_detectors.remove(session_mgr[rm_det]) # noqa: F821 + ascan.default_triggers = [] # noqa: F821 + ascan.default_detectors = [] # noqa: F821 print("removed previous default detector: {}".format(rm_det)) - except: + except Exception: print("list of detectors appeared to be empty...") - ascan.user_detectors.append(det) # add detector - new_def_det = ascan.user_detectors[0].name + ascan.user_detectors.append(det) # noqa: F821 + new_def_det = ascan.user_detectors[0].name # noqa: F821 print("\nnew default detector: {}".format(new_def_det)) @@ -47,19 +48,19 @@ def cw_ascan(mot, xmin, xmax, npoints, acqt="default", pos_ret=True): # gather beamline information prior to starting the scan: ini_motpos = caget(mot.record + ".RBV") # current detector: - acq_pv = session_mgr[ascan.user_detectors[0].name].pvname + acq_pv = session_mgr[ascan.user_detectors[0].name].pvname # noqa: F821 # set different exposure time for the scan, if requested: if acqt != "default": try: ini_expt = caget(acq_pv[2]) # initial exposure time - session_mgr[ascan.user_detectors[0].name].acquire_time = acqt + session_mgr[ascan.user_detectors[0].name].acquire_time = acqt # noqa: F821 print("successfully set exposure time to [s]: {}".format(acqt)) - except: + except Exception: print("could not set exposure time to {}".format(acqt)) # execute the scan - ascan(mot, xmin, xmax, npoints) + ascan(mot, xmin, xmax, npoints) # noqa: F821 # put beamline back into initial state if pos_ret: @@ -67,9 +68,9 @@ def cw_ascan(mot, xmin, xmax, npoints, acqt="default", pos_ret=True): print("returned axes to: {}".format(ini_motpos)) if acqt != "default": try: - session_mgr[ascan.user_detectors[0].name].acquire_time = ini_expt + session_mgr[ascan.user_detectors[0].name].acquire_time = ini_expt # noqa: F821 print("successfully reset exposure time to [s]: {}".format(ini_expt)) - except: + except Exception: print("could not reset exposure time to {}".format(ini_expt)) @@ -83,26 +84,26 @@ def cw_dscan(mot, mdx, pdx, npoints, acqt="default", pos_ret=True): WILL NOT WORK FOR A LIST OF DETECTORS! """ # current detector: - acq_pv = session_mgr[ascan.user_detectors[0].name].pvname + acq_pv = session_mgr[ascan.user_detectors[0].name].pvname # noqa: F821 # set different exposure time for the scan, if requested: if acqt != "default": try: ini_expt = caget(acq_pv[2]) # initial exposure time - session_mgr[ascan.user_detectors[0].name].acquire_time = acqt + session_mgr[ascan.user_detectors[0].name].acquire_time = acqt # noqa: F821 print("successfully set exposure time to [s]: {}".format(acqt)) - except: + except Exception: print("could not set exposure time to {}".format(acqt)) # execute the scan - dscan(mot, mdx, pdx, npoints) + dscan(mot, mdx, pdx, npoints) # noqa: F821 # print('finished scan') if acqt != "default": try: - session_mgr[ascan.user_detectors[0].name].acquire_time = ini_expt + session_mgr[ascan.user_detectors[0].name].acquire_time = ini_expt # noqa: F821 print("successfully reset exposure time to [s]: {}".format(ini_expt)) - except: + except Exception: print("could not reset exposure time to {}".format(ini_expt)) @@ -171,7 +172,7 @@ def cw_CCDseries( caput(detector.pvname.split("}")[0] + "}TIFF1:FileNumber", startn) # gather information about current camera settings - acq_pv = session_mgr[ascan.user_detectors[0].name].pvname + acq_pv = session_mgr[ascan.user_detectors[0].name].pvname # noqa: F821 ini_acq = caget( acq_pv.split("}")[0] + "}cam1:Acquire" ) # initial state: started or stopped @@ -190,25 +191,25 @@ def cw_CCDseries( try: caput(acq_pv.split("}")[0] + "}cam1:AcquireTime", acqt) print("successfully set exposure time to [s]: {}".format(acqt)) - except: + except Exception: print("could not set exposure time to {}".format(acqt)) # stop camara: try: caput(acq_pv.split("}")[0] + "}cam1:Acquire", 0) print("successfully stopped camera") - except: + except Exception: print("could not stop camera") # try to set image mode to multiple try: caput(acq_pv.split("}")[0] + "}cam1:ImageMode", 1) print('successfully set ImageMode to "multiple"') - except: + except Exception: print('could not set ImageMode to "multiple"') if acqperiod != "default": try: caput(acq_pv.split("}")[0] + "}cam1:AcquirePeriod", acqperiod) print("successfully set acquiering period to: {}".format(acqperiod)) - except: + except Exception: print("could not set aquireing period to {}".format(acqperiod)) # set number of images to be taken: @@ -216,7 +217,7 @@ def cw_CCDseries( try: caput(acq_pv.split("}")[0] + "}cam1:NumImages", imnum) print("successfully set number of images to: {}".format(imnum)) - except: + except Exception: print("could not set number of images to {}".format(imnum)) print("going to start the acquisition...") time.sleep(1) @@ -264,17 +265,17 @@ def cw_CCDseries( try: caput(acq_pv.split("}")[0] + "}cam1:AcquireTime", ini_expt) print("successfully reset exposure time to [s]: {}".format(ini_expt)) - except: + except Exception: print("could not reset exposure time to {}".format(ini_expt)) try: caput(acq_pv.split("}")[0] + "}cam1:ImageMode", ini_mode) print("successfully reset ImageMode") - except: + except Exception: print("could not reset ImageMode") try: caput(acq_pv.split("}")[0] + "}cam1:Acquire", ini_acq) print("successfully reset camera acquisition mode") - except: + except Exception: print("could not reset camera acquisition mode") if acqperiod != "default": try: @@ -282,13 +283,13 @@ def cw_CCDseries( print( "successfully reset acquisition period to [s]: {}".format(ini_acqperiod) ) - except: + except Exception: print("could not reset acquisition period to {}".format(ini_acqperiod)) if imnum != "default": try: caput(acq_pv.split("}")[0] + "}cam1:NumImages", ini_imnum) print("successfully reset image numbers to: {}".format(ini_imnum)) - except: + except Exception: print("could not reset image numbers to {}".format(ini_imnum)) time.sleep(0.5) try: @@ -296,5 +297,5 @@ def cw_CCDseries( acq_pv.split("}")[0] + "}cam1:Acquire", ini_acq ) # restart camera if it was running before taking the series print("restarted camera") - except: + except Exception: print("could not restart camera...") diff --git a/chxtools/pv_explorer/model.py b/chxtools/pv_explorer/model.py index 2ad1066..f2c064f 100644 --- a/chxtools/pv_explorer/model.py +++ b/chxtools/pv_explorer/model.py @@ -1,4 +1,4 @@ -from atom.api import * +from atom.api import observe, Atom, Str, Typed, Dict, Bool, Float from matplotlib.figure import Figure import numpy as np import datetime diff --git a/chxtools/transfuncs.py b/chxtools/transfuncs.py index acb6883..f6eefcc 100644 --- a/chxtools/transfuncs.py +++ b/chxtools/transfuncs.py @@ -10,7 +10,7 @@ import numpy as np from chxtools import xfuncs as xf from scipy.optimize import fminbound -from epics import * +from epics import caget def CRL_focalpoint(energy, lens_configuration): @@ -95,24 +95,24 @@ def calc_transsetup(image_pos, E="auto", silent=False): impos_diff = np.zeros(2 ** len(lens_R)) q = np.zeros(2 ** len(lens_R)) real_im = np.zeros(2 ** len(lens_R)) - for l in range(0, 2 ** len(lens_R)): - k = f.format(l) + for idx in range(0, 2 ** len(lens_R)): + k = f.format(idx) a = np.zeros(len(lens_R)) for h in range(0, len(lens_R)): a[h] = int(k[h]) ln = lens_N * a - F[l] = 1 / np.sum(2 * delta * ln / (lens_R * 1e-3)) + F[idx] = 1 / np.sum(2 * delta * ln / (lens_R * 1e-3)) - # print F[l] + # print F[idx] def image_func(x): - return abs(x - image_pos + 1 / (-1 / x + 1 / F[l])) + return abs(x - image_pos + 1 / (-1 / x + 1 / F[idx])) - zmin[l] = fminbound(image_func, zpos[0], zpos[1]) - q[l] = 1 / (-1 / zmin[l] + 1 / F[l]) - if q[l] < 0: - real_im[l] = 0 - q[l] = float("NaN") - impos_diff[l] = q[l] + zmin[l] - image_pos + zmin[idx] = fminbound(image_func, zpos[0], zpos[1]) + q[idx] = 1 / (-1 / zmin[idx] + 1 / F[idx]) + if q[idx] < 0: + real_im[idx] = 0 + q[idx] = float("NaN") + impos_diff[idx] = q[idx] + zmin[idx] - image_pos # looking for the best option index = np.nanargmin(abs(impos_diff)) @@ -131,7 +131,7 @@ def image_func(x): # conf_lens_mat=lens_mat*bin_index conf_lensR = lens_R * bin_index conf_lensN = lens_N * bin_index - if silent == False: + if not silent: print(" ") print("optimized transfocator settings for E=" + str(E) + " [eV]") print( From 410b40b14e843cef6fc425a19b70c60c4cd497e4 Mon Sep 17 00:00:00 2001 From: jennmald Date: Thu, 2 Apr 2026 15:11:24 -0400 Subject: [PATCH 6/7] fix linting for utils and add updated numpy calls --- chxtools/chx_utilities.py | 181 ++++++++++++++++---------------------- 1 file changed, 76 insertions(+), 105 deletions(-) diff --git a/chxtools/chx_utilities.py b/chxtools/chx_utilities.py index ad63b23..a2c2982 100644 --- a/chxtools/chx_utilities.py +++ b/chxtools/chx_utilities.py @@ -54,18 +54,18 @@ def E_calibration(file, Edge="Cu", xtal="Si111cryo", B_off=0): for row in filereader: # read data try: Bragg.append(float(row[2])) - except: + except Exception: print("could not convert: ", row[2]) try: Gap.append(float(row[5])) - except: + except Exception: print("could not convert: ", row[5]) try: Intensity.append(float(row[7])) - except: + except Exception: print("could not convert: ", row[8]) B = np.array(Bragg) * -1.0 + B_off - G = np.array(Gap[0 : len(B)]) # not currently used, but converted for future use + # G = np.array(Gap[0 : len(B)]) # not currently used, but converted for future use Int = np.array(Intensity[0 : len(B)]) # normalize and remove background: @@ -200,10 +200,12 @@ def dcm_roll(Bragg, offset, distance, offmode="mm", pixsize=5.0): ) # data fitting - fitfunc = lambda p, x: ( - p[0] + 2 * d * p[1] * np.sin(x / 180.0 * np.pi) - ) # Target function - errfunc = lambda p, x, y: fitfunc(p, Bragg) - y # Distance to the target function + def fitfunc(p, x): # Target function + return p[0] + 2 * d * p[1] * np.sin(x / 180.0 * np.pi) + + def errfunc(p, x, y): # Distance to the target function + return fitfunc(p, Bragg) - y + p0 = [np.mean(offset), -0.5] # Initial guess for the parameters p1, success = optimize.leastsq(errfunc, p0[:], args=(Bragg, offset)) @@ -245,7 +247,7 @@ def sum_image(filename, firstim=0, lastim=9999): # extract filename and first image number: a = file_path.split("/") fn = a[len(a) - 1].split(".")[0] # file name - b = fn.split(".") + # b = fn.split(".") # fe=b[len(b)-1] # file ending, e.g .tiff fe = a[len(a) - 1].split(".")[1] c = fn.split("_") @@ -383,7 +385,7 @@ def show_cut(img, cut=1000, direction="horz", width=10, title=None): :, cut - width / 2 : cut + width / 2, ] = 0 - if title == None: + if title is None: title = "Img-&-Cut_%s" % cut show_img(img_, title=title) @@ -422,9 +424,9 @@ def show_img( if ax is None: fig, ax = plt.subplots(nrows=1) - if vmin == None: + if vmin is None: vmin = img.min() - if vmax == None: + if vmax is None: vmax = img.max() shape = img.shape dy, dx = shape @@ -435,23 +437,23 @@ def show_img( x1, x2, y1, y2 = extent if ylim is None: ylim = [y2, y1] - aspect_ = determine_aspect(shape, extent) - else: - aspect_ = None + # aspect_ = determine_aspect(shape, extent) + # else: + # aspect_ = None if ylim is None: ylim = [0, dy] ax.set_ylim([ylim[0], ylim[1]]) - if not logs: - cax = ax.imshow( - img, cmap=cmap, vmin=vmin, vmax=vmax, aspect=aspect_, extent=extent - ) # ,interpolation='none') + # if not logs: + # cax = ax.imshow( + # img, cmap=cmap, vmin=vmin, vmax=vmax, aspect=aspect_, extent=extent + # ) # ,interpolation='none') if logs: img = np.log(img) - if vmin == None: - vmin = img[nonzero(img)].min() # img.min() - cax = ax.imshow( - img, cmap=cmap, vmin=vmin, vmax=vmax, aspect=aspect_, extent=extent - ) # ,interpolation='none') + if vmin is not None: + vmin = img[np.nonzero(img)].min() # img.min() + # cax = ax.imshow( + # img, cmap=cmap, vmin=vmin, vmax=vmax, aspect=aspect_, extent=extent + # ) # ,interpolation='none') if aspect is not None: im = ax.get_images() x1, x2, y1, y2 = im[0].get_extent() @@ -485,11 +487,11 @@ def show_img( plt.step(ax.get_yticklabels(), fontsize=fontsize * 0.8, visible=True) # else: plt.yticks(yticks, fontsize=fontsize * 0.8) - if ax is None: - cbar = fig.colorbar(cax, ticks=[vmin, vmax]) + # if ax is None: + # cbar = fig.colorbar(cax, ticks=[vmin, vmax]) if save: - if outDir != None: + if outDir is not None: fp = outDir + title + "_.png" else: fp = title + "_.png" @@ -581,8 +583,6 @@ def plot_pv_values( ymean = d[keys[i]].mean() ymax, ymin = d[keys[i]].max(), d[keys[i]].min() width = min([ymax - ymean, ymean - ymin]) - ymax_ = ymean + width - ymin_ = ymean - width ylim = [ymean - width * 5, ymean + width * 5] else: ylim = ylim_tv[i] @@ -608,8 +608,7 @@ def plot_pv_values( x.set_visible(False) if img is not None: - dy, dx = img.shape - shape = img.shape + dy, _ = img.shape extent = [np.min(time), np.max(time), dy * pixsize, 0] show_img( img, @@ -808,8 +807,8 @@ def read_scan(sid, fill=True): from dataportal import DataMuxer as dm import datetime - hdr = db[sid] - ev = db.fetch_events(hdr, fill=fill) # , fill=True) + hdr = db[sid] # noqa: F821 + ev = db.fetch_events(hdr, fill=fill) # , fill=True) # noqa: F821 muxer = dm.from_events(ev) data = muxer.to_sparse_dataframe() dt = data.time @@ -835,7 +834,6 @@ def get_waterfall( import numpy as np - imcount = firstim waterfall = {} notime = lastim + 1 - firstim in_time = data.time @@ -867,7 +865,6 @@ def get_img_waterfall( import numpy as np - imcount = firstim waterfall = {} notime = lastim + 1 - firstim for n in range(0, notime): @@ -924,7 +921,7 @@ def line_focus( # extract filename and first image number: a = file_path.split("/") fn = a[len(a) - 1].split(".")[0] # file name - b = fn.split(".") + _ = fn.split(".") # fe=b[len(b)-1] # file ending, e.g .tiff fe = a[len(a) - 1].split(".")[1] c = fn.split("_") @@ -1005,7 +1002,6 @@ def line_focus( cdat = dat[i, :] elif direction == "vert": cdat = dat[:, i] - # print cdat; else: raise CHX_utilities_Exception( 'error: cut direction must be either "horz" or "vert".' @@ -1018,7 +1014,7 @@ def line_focus( p0 = [yo, A, xc, w] try: coeff, var_matrix = curve_fit( - gauss, np.array(xrange(len(cdat))), cdat, p0=p0 + gauss, np.array(list(range(len(cdat)))), cdat, p0=p0 ) except Exception: coeff = [0, 0, 0, 0] @@ -1043,7 +1039,7 @@ def line_focus( try: dat = np.sum(im, axis=2) # sum RGB channels for a color image - except: + except Exception: dat = im for i in cuts: @@ -1064,9 +1060,9 @@ def line_focus( p0 = [yo, A, xc, w] try: coeff, var_matrix = curve_fit( - gauss, np.array(xrange(len(cdat))), cdat, p0=p0 + gauss, np.array(list(range(len(cdat)))), cdat, p0=p0 ) - except: + except Exception: coeff = [0, 0, 0, 0] baseline.append(coeff[0]) amp.append(coeff[1]) @@ -1087,7 +1083,7 @@ def line_focus( im = imgs[:, t] try: dat = np.sum(im, axis=2) # sum RGB channels for a color image - except: + except Exception: dat = im for i in cuts: @@ -1102,9 +1098,9 @@ def line_focus( p0 = [yo, A, xc, w] try: coeff, var_matrix = curve_fit( - gauss, np.array(xrange(len(cdat))), cdat, p0=p0 + gauss, np.array(list(range(len(cdat)))), cdat, p0=p0 ) - except: + except Exception: coeff = [0, 0, 0, 0] baseline.append(coeff[0]) amp.append(coeff[1]) @@ -1144,18 +1140,18 @@ def show_focus_line_fit(waterfall, para, lists=None, title=None): if lists is None: lists = [] lists.append([t / 5, 2 * t / 5, 3 * t / 5, 4 * t / 5]) - fig, ax = plt.subplots(len(lists[0])) - for i, l in enumerate(lists[0]): - im = waterfall[:, l] + _, ax = plt.subplots(len(lists[0])) + for i, val in enumerate(lists[0]): + im = waterfall[:, val] p = para[i : i + 1].values[0] - show_fit([range(L), im], p, ax=ax[i], title=title + "_time@_%s" % l) + show_fit([range(L), im], p, ax=ax[i], title=title + "_time@_%s" % val) def test_fft(): import matplotlib.pyplot as plt import numpy as np - fig, ax = plt.subplots(2) + _, ax = plt.subplots(2) t = np.linspace(0, 20, 1000) y = ( 0.5 * np.sin(2 * np.pi * 5 * t) @@ -1175,11 +1171,11 @@ def get_fft(t, y): import numpy as np L = len(t) - tp = np.array(t) - yp = np.array(y) + _ = np.array(t) + _ = np.array(y) dx = (t[L - 1] - t[0]) / float(L) tm = 2 * L * dx - xs = 1 / tm + _ = 1 / tm y = np.concatenate((y, np.zeros(L))) @@ -1214,7 +1210,7 @@ def plot_line_focus2( td = times_ tf = trans_td_to_tf(td) - if title == None: + if title is None: title = "LF_" fig, axs = ptg._subplots(M, sharex=True, sharey=False, layout=[M, 1]) axs[0].set_title(title) @@ -1241,7 +1237,7 @@ def plot_line_focus2( label=str(cuts[0]), ) - x = xrange(N) + x = list(range(N)) y = yc pol = np.polyfit(x, y, 20) # 20) polfit = np.polyval(pol, x) @@ -1351,7 +1347,7 @@ def plot_line_focus(res, cuts, pix=1, epochtime="false", times=None): epoch = [] plt.close() if times is None: - for i in xrange(len(res[4][:])): + for i in list(range(len(res[4][:]))): dt = parser.parse(res[4][i]) epoch.append(dt.strftime("%s")) else: @@ -1359,11 +1355,11 @@ def plot_line_focus(res, cuts, pix=1, epochtime="false", times=None): epoch = np.array(epoch) plt.figure(1) #### plot and analyze center position - for i in xrange(len(cuts)): + for i in list(range(len(cuts))): y = center[i :: len(cuts)] # print y if epochtime == "false": - x = xrange(len(y)) + x = list(range(len(y))) x_str = "image #" else: x = epoch @@ -1376,15 +1372,13 @@ def plot_line_focus(res, cuts, pix=1, epochtime="false", times=None): plt.figure(5) plt.figure(6) ##### plot and analyze center short time vibrations - for i in xrange(len(cuts)): + for i in list(range(len(cuts))): y = np.array(center[i :: len(cuts)]) - # print y if epochtime == "false": - x = xrange(len(y)) + x = list(range(len(y))) x_str = "image #" else: - # x=epoch - x = xrange(len(y)) + x = list(range(len(y))) x_str = "epoch [s]" pol = np.polyfit(x, y, 20) @@ -1433,9 +1427,8 @@ def plot_line_focus(res, cuts, pix=1, epochtime="false", times=None): plt.figure(6) #### plot and analyze long time drifts plt.figure(2) ############ plot and analyze fwhm - for i in xrange(len(cuts)): + for i in list(range(len(cuts))): y = width[i :: len(cuts)] - # print y y = 2.35 * y * pix plt.plot(x, y, "+", label=str(cuts[i])) print( @@ -1454,10 +1447,8 @@ def plot_line_focus(res, cuts, pix=1, epochtime="false", times=None): plt.xlabel(x_str) plt.figure(3) - for i in xrange(len(cuts)): + for i in list(range(len(cuts))): y = amp[i :: len(cuts)] - # print y - # x=xrange(len(y)) plt.plot(x, y, "+", label=str(cuts[i])) plt.legend(loc="best") plt.title("Amplitude") @@ -1465,10 +1456,8 @@ def plot_line_focus(res, cuts, pix=1, epochtime="false", times=None): plt.xlabel(x_str) plt.figure(4) - for i in xrange(len(cuts)): + for i in list(range(len(cuts))): y = base[i :: len(cuts)] - # print y - # x=xrange(len(y)) plt.plot(x, y, "+", label=str(cuts[i])) plt.legend(loc="best") plt.title("Baseline") @@ -1491,18 +1480,13 @@ def read_camonitor(filename, epoch="true"): print("loading camonitor data from: ", file_path) with open(file_path, "r") as cafile: cadata = cafile.readlines() - # datime=[] - # datdate=[] value_camon = [] epoch_camon = [] - for i in xrange(len(cadata)): + for i in list(range(len(cadata))): line = cadata[i].split() value_camon.append(float(line[len(line) - 1])) datdate = line[len(line) - 3] datime = line[len(line) - 2] - # print datdate.split()[0] - # print datime - # print int(datdate.split('-')[0]),int(datdate.split('-')[1]),int(datdate.split('-')[2]),int(datime.split(':')[0]),int(datime.split(':')[1]),int(datime.split(':')[2].split('.')[0]),int(datime.split(':')[2].split('.')[1]) epoch_camon.append( float( datetime.datetime( @@ -1543,7 +1527,7 @@ def knife_edge(filename, direction="horz", cuts=[1, 2, 3], firstim=0, lastim=1, # extract filename and first image number: a = file_path.split("/") fn = a[len(a) - 1].split(".")[0] # file name - b = fn.split(".") + # b = fn.split(".") # fe=b[len(b)-1] # file ending, e.g .tiff fe = a[len(a) - 1].split(".")[1] c = fn.split("_") @@ -1633,35 +1617,24 @@ def knife_edge(filename, direction="horz", cuts=[1, 2, 3], firstim=0, lastim=1, if i >= 100: bp[i] = 0 cdat = sp.ifft(bp) - ###### try with normalization - # devdat=np.diff(cdat/max(cdat)) - ####### devdat = np.diff(cdat) ########## if imcount == firstim: plt.close(9) plt.figure(9) - plt.plot(xrange(len(devdat)), devdat / max(devdat) * max(cdat)) - plt.plot(xrange(len(cdat)), cdat) + plt.plot(list(range(len(devdat))), devdat / max(devdat) * max(cdat)) + plt.plot(list(range(len(cdat))), cdat) - # print devdat - # np.argmax(devdat) - # center.append(np.argmax(devdat)) #### try helping with using the correct maximum (allows to use more Fourier components) center.append(np.argmax(devdat[900:1500])) #### imcount = imcount + 1 - # plotting section: - # plt.figure(3) - # plt.plot(xrange(len(devdat)),devdat) - # plt.figure(2) - # plt.plot(xrange(len(cdat)),cdat,'o-') plt.close(51) plt.figure(51) - for i in xrange(len(cuts)): + for i in list(range(len(cuts))): y = center[i :: len(cuts)] # print y - x = xrange(len(y)) + x = list(range(len(y))) dy = (y - np.mean(y)) * pix plt.plot(x, dy, "+", label=str(cuts[i])) plt.legend(loc="best") @@ -1671,10 +1644,10 @@ def knife_edge(filename, direction="horz", cuts=[1, 2, 3], firstim=0, lastim=1, print("knife edge position: ", dy.mean(), "um +/- ", dy.std(), "um") plt.close(52) plt.figure(52) - for i in xrange(len(cuts)): + for i in list(range(len(cuts))): y = center[i :: len(cuts)] # print y - x = xrange(len(y)) + x = list(range(len(y))) dy = y plt.plot(x, dy, "+", label=str(cuts[i])) plt.legend(loc="best") @@ -1687,9 +1660,9 @@ def knife_edge(filename, direction="horz", cuts=[1, 2, 3], firstim=0, lastim=1, # copied from 99-bluesky....didn't figure out how to import... def detselect(detector_object, suffix="_stats1_total"): """Switch the active detector and set some internal state""" - gs.DETS = [detector_object] - gs.PLOT_Y = detector_object.name + suffix - gs.TABLE_COLS = [gs.PLOT_Y] + gs.DETS = [detector_object] # noqa: F821 + gs.PLOT_Y = detector_object.name + suffix # noqa: F821 + gs.TABLE_COLS = [gs.PLOT_Y] # noqa: F821 def get_ID_calibration( @@ -1717,9 +1690,9 @@ def get_ID_calibration( from scipy.optimize import curve_fit ### just testing passing bluesky objects (motors, detectors) into function - gs.DETS = [xray_eye1] + gs.DETS = [xray_eye1] # noqa: F821 # detselect(xray_eye1) - dscan(diff.xh, gapstart, gapstop, 3) + dscan(diff.xh, gapstart, gapstop, 3) # noqa: F821 ### end of testing section ################# @@ -1784,19 +1757,17 @@ def get_ID_calibration( print("moving DCM Bragg angle to: ", B_guess, " deg and ID gap to ", i, " mm") print("hurray, made it up to here!") # ascan(dcm.b,float(B_guess-.4),float(B_guess+.4),60) # do the Bragg scan - header = db[ + header = db[ # noqa: F821 -1 ] # retrieve the data (first data point is often "wrong", so don't use - data = get_table(header) + data = get_table(header) # noqa: F821 B = data.dcm_b[2:] intdat = data.xray_eye1_stats1_total[2:] B = np.array(B) intdat = np.array(intdat) B = np.array( - ss[-1].dcm_b - )[ # noqa: F821 - 2: - ] # retrieve the data (first data point is often "wrong", so don't use + ss[-1].dcm_b # noqa: F821 + )[2:] # retrieve the data (first data point is often "wrong", so don't use intdat = np.array(ss[-1].bpm_cam_stats_total1)[2:] # noqa: F821 A = np.max(intdat) # initial parameter guess and fitting xc = B[np.argmax(intdat)] From 16346064decc8a56d55d4b389aaa380de125346f Mon Sep 17 00:00:00 2001 From: jennmald Date: Fri, 3 Apr 2026 15:43:11 -0400 Subject: [PATCH 7/7] finished pre-commit refactor --- chxtools/attfuncs.py | 8 ++++---- chxtools/attfuncs2.py | 16 +++++++--------- chxtools/bpm_stability.py | 10 +++++----- 3 files changed, 16 insertions(+), 18 deletions(-) diff --git a/chxtools/attfuncs.py b/chxtools/attfuncs.py index 1b31ad3..f0d53af 100644 --- a/chxtools/attfuncs.py +++ b/chxtools/attfuncs.py @@ -10,7 +10,7 @@ import numpy as np from . import xfuncs as xf -from epics import * +from epics import caget, caput ##PVs # XF:11IDB-BI{Attn:03}Cmd:In-Cmd @@ -111,14 +111,14 @@ def calc_T(T, E="auto", foil_mode="Si"): sT = np.zeros(len(abs_th)) for m in range(0, len(abs_th)): sT[m] = xf.get_T(abs_mat[m], E / 1000, abs_th[m]) - for l in range(0, 2 ** len(abs_th)): - k = f.format(l) + for num in range(0, 2 ** len(abs_th)): + k = f.format(num) a = np.zeros(len(abs_th)) for h in range(0, len(abs_th)): a[h] = int(k[h]) x = sT * a x[x == 0] = 1 - T_tot[l] = np.product(x) + T_tot[num] = np.product(x) # determine best attenuator configuration diff = np.abs(T_tot - T) diff --git a/chxtools/attfuncs2.py b/chxtools/attfuncs2.py index 0c2a735..0900c71 100644 --- a/chxtools/attfuncs2.py +++ b/chxtools/attfuncs2.py @@ -5,17 +5,15 @@ @author: lwiegart """ -""" -Functions for CHX attenuators in monitor chamber: -physical setup of attenuator system -> att_setup -calculate optimal transmission -> calc_T -set optimal transmission -> set_T -by LW 09/25/2017 -_v2 (this version): added W-edge as T=0 [02/07/2018] -""" +# Functions for CHX attenuators in monitor chamber: +# physical setup of attenuator system -> att_setup +# calculate optimal transmission -> calc_T +# set optimal transmission -> set_T +# by LW 09/25/2017 +# _v2 (this version): added W-edge as T=0 [02/07/2018] import numpy as np from chxtools import xfuncs as xf -from epics import * +from epics import caget, caput def att_setup(): diff --git a/chxtools/bpm_stability.py b/chxtools/bpm_stability.py index c7a2d46..a5fd8b3 100644 --- a/chxtools/bpm_stability.py +++ b/chxtools/bpm_stability.py @@ -47,11 +47,11 @@ def get_fft(t, y): import numpy as np L = len(t) - tp = np.array(t, dtype=float) + # tp = np.array(t, dtype=float) yp = np.array(y, dtype=float) ts = (t[L - 1] - t[0]) / float(L) - tm = 2 * L * ts - xs = 1 / tm + # tm = 2 * L * ts + # xs = 1 / tm ps = np.abs(np.fft.fft(yp)) time_step = ts freqs = np.fft.fftfreq(yp.size, time_step) @@ -284,7 +284,7 @@ def bpm_read( else: filename = filename + "_" + times - colms = ["A", "B", "C", "D", "X", "Y", "Sum"] + # colms = ["A", "B", "C", "D", "X", "Y", "Sum"] num_sample *= rate metadata = {} @@ -305,7 +305,7 @@ def bpm_read( if n == 0: try: zero_point = np.where(d == 0)[0][0] - except: + except Exception: zero_point = len(d) data = d[:zero_point] else: