From 0b26c85622a36eb141fe65c2746123024a8daf24 Mon Sep 17 00:00:00 2001 From: C-Achard Date: Mon, 16 Feb 2026 19:41:16 +0100 Subject: [PATCH 01/60] Pin deeplabcut-live to 1.1.0 in pyproject Replace flexible >=2 specifiers with an exact deeplabcut-live==1.1.0 for the main dependency and its [pytorch] and [tf] extras in pyproject.toml. This pins the package (and its included extras like timm and scipy) to a known compatible release to avoid missing-dependency or compatibility issues. --- pyproject.toml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 9dac41d..c7ab122 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -24,7 +24,7 @@ classifiers = [ dynamic = [ "version" ] dependencies = [ "cv2-enumerate-cameras", - "deeplabcut-live>=2", # might be missing timm and scipy + "deeplabcut-live==1.1", "matplotlib", "numpy", "opencv-python", @@ -49,7 +49,7 @@ dev = [ ] gentl = [ "harvesters" ] pytorch = [ - "deeplabcut-live[pytorch]>=2", # this includes timm and scipy + "deeplabcut-live[pytorch]==1.1", # includes timm and scipy ] test = [ "hypothesis>=6", @@ -59,7 +59,7 @@ test = [ "pytest-qt>=4.2", ] tf = [ - "deeplabcut-live[tf]>=2", + "deeplabcut-live[tf]==1.1", ] [project.scripts] dlclivegui = "dlclivegui:main" From 05497e82143c3379d037386e4fa0cc9c65d2a62c Mon Sep 17 00:00:00 2001 From: C-Achard Date: Mon, 16 Feb 2026 20:22:57 +0100 Subject: [PATCH 02/60] Add PyPI release workflow and rename CI file Add a new GitHub Actions workflow (python-package.yml) to build and publish packages to PyPI. The workflow triggers on tag pushes (v*.*.*) and on PR events, sets up Python, caches pip, installs build tooling (build, twine, packaging), builds the package, and uploads artifacts to PyPI using TWINE_API_KEY. Also rename .github/workflows/ci.yml to .github/workflows/testing-ci.yml to clarify CI purpose. --- .github/workflows/python-package.yml | 59 ++++++++++++++++++++ .github/workflows/{ci.yml => testing-ci.yml} | 0 2 files changed, 59 insertions(+) create mode 100644 .github/workflows/python-package.yml rename .github/workflows/{ci.yml => testing-ci.yml} (100%) diff --git a/.github/workflows/python-package.yml b/.github/workflows/python-package.yml new file mode 100644 index 0000000..d588e95 --- /dev/null +++ b/.github/workflows/python-package.yml @@ -0,0 +1,59 @@ +name: Update pypi release + +on: + push: + tags: + - 'v*.*.*' + pull_request: + branches: + - main + - public + types: + - labeled + - opened + - edited + - synchronize + - reopened + +jobs: + release: + runs-on: ubuntu-latest + + steps: + - name: Setup Python + id: setup-python + uses: actions/setup-python@v5 + with: + python-version: '3.x' + + - name: Cache dependencies + id: pip-cache + uses: actions/cache@v4 + with: + path: ~/.cache/pip + key: ${{ runner.os }}-pip-${{ steps.setup-python.outputs.python-version }}-${{ hashFiles('pyproject.toml', 'requirements.txt', 'setup.cfg', 'setup.py') }} + restore-keys: | + ${{ runner.os }}-pip-${{ steps.setup-python.outputs.python-version }}- + ${{ runner.os }}-pip- + + - name: Install dependencies + run: | + pip install --upgrade pip + pip install wheel + pip install "packaging>=24.2" + pip install build + pip install twine + + - name: Checkout code + uses: actions/checkout@v4 + + - name: Build and publish to PyPI + if: ${{ github.event_name == 'push' }} + env: + TWINE_USERNAME: __token__ + TWINE_PASSWORD: ${{ secrets.TWINE_API_KEY }} + run: | + python -m build + ls dist/ + tar tvf dist/*.tar.gz + python3 -m twine upload --verbose dist/* diff --git a/.github/workflows/ci.yml b/.github/workflows/testing-ci.yml similarity index 100% rename from .github/workflows/ci.yml rename to .github/workflows/testing-ci.yml From 1cc7801e3bca69c2b613e49aaec6af4667b864e6 Mon Sep 17 00:00:00 2001 From: C-Achard Date: Mon, 16 Feb 2026 20:49:19 +0100 Subject: [PATCH 03/60] CI: build, validate & release Python package MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Rename workflow and split into a build/validate job and a gated publish job. Add a Python matrix (3.10–3.12), cache keyed by matrix python-version, and update checkout/setup/cache action usages. Build sdist/wheel, run twine check, inspect dist contents, and perform a smoke install from the built wheel (with a best-effort CLI check). Publish step now runs only for tag pushes like vX.Y.Z and uploads with twine using the TWINE_API_KEY secret. --- .github/workflows/python-package.yml | 91 ++++++++++++++++++++-------- 1 file changed, 67 insertions(+), 24 deletions(-) diff --git a/.github/workflows/python-package.yml b/.github/workflows/python-package.yml index d588e95..8a15aaf 100644 --- a/.github/workflows/python-package.yml +++ b/.github/workflows/python-package.yml @@ -1,4 +1,4 @@ -name: Update pypi release +name: Build, validate & Release on: push: @@ -16,44 +16,87 @@ on: - reopened jobs: - release: + build_check: + name: Build & validate package runs-on: ubuntu-latest - + strategy: + fail-fast: false + matrix: + python-version: [ "3.10", "3.11", "3.12" ] # adjust to what you support steps: - - name: Setup Python - id: setup-python + - name: Checkout + uses: actions/checkout@v4 + + - name: Setup Python ${{ matrix.python-version }} uses: actions/setup-python@v5 with: - python-version: '3.x' + python-version: ${{ matrix.python-version }} - - name: Cache dependencies - id: pip-cache + - name: Cache pip uses: actions/cache@v4 with: path: ~/.cache/pip - key: ${{ runner.os }}-pip-${{ steps.setup-python.outputs.python-version }}-${{ hashFiles('pyproject.toml', 'requirements.txt', 'setup.cfg', 'setup.py') }} + key: ${{ runner.os }}-pip-${{ matrix.python-version }}-${{ hashFiles('pyproject.toml', 'setup.cfg', 'setup.py', 'requirements.txt') }} restore-keys: | - ${{ runner.os }}-pip-${{ steps.setup-python.outputs.python-version }}- + ${{ runner.os }}-pip-${{ matrix.python-version }}- ${{ runner.os }}-pip- - - name: Install dependencies + - name: Install build tools run: | - pip install --upgrade pip - pip install wheel - pip install "packaging>=24.2" - pip install build - pip install twine + python -m pip install --upgrade pip + python -m pip install build twine wheel "packaging>=24.2" + + - name: Build distributions (sdist + wheel) + run: python -m build + + - name: Inspect dist + run: | + ls -lah dist/ + echo "sdist contents (first ~200 entries):" + tar -tf dist/*.tar.gz | sed -n '1,200p' + + - name: Twine metadata & README check + run: python -m twine check dist/* + + - name: Install from wheel & smoke test + run: | + # Install from the built wheel (not from the source tree) + python -m pip install --no-deps dist/*.whl + + python - <<'PY' + import importlib + pkg_name = "dlclivegui" # change if your top-level import differs + m = importlib.import_module(pkg_name) + print("Imported:", m.__name__, "version:", getattr(m, "__version__", "n/a")) + PY - - name: Checkout code + # Console entry point best-effort check (adjust name if different) + (command -v dlclivegui && dlclivegui --help) || echo "CLI not available or returned non-zero; continuing." + + publish: + name: Publish to PyPI + runs-on: ubuntu-latest + needs: build_check + if: ${{ startsWith(github.ref, 'refs/tags/v') }} # only on tag pushes like v1.2.3 + steps: + - name: Checkout uses: actions/checkout@v4 - - name: Build and publish to PyPI - if: ${{ github.event_name == 'push' }} + - name: Setup Python + uses: actions/setup-python@v5 + with: + python-version: "3.x" + + - name: Install build tools + run: | + python -m pip install --upgrade pip + python -m pip install build twine + + - name: Build distributions (sdist + wheel) + run: python -m build + + - name: Publish to PyPI env: TWINE_USERNAME: __token__ TWINE_PASSWORD: ${{ secrets.TWINE_API_KEY }} - run: | - python -m build - ls dist/ - tar tvf dist/*.tar.gz - python3 -m twine upload --verbose dist/* + run: python -m twine upload --verbose dist/* From ae9550c5d1d023b6354bc9b3bb28439c9da44c4f Mon Sep 17 00:00:00 2001 From: C-Achard Date: Mon, 16 Feb 2026 20:50:23 +0100 Subject: [PATCH 04/60] Set version in pyproject.toml to 2.0.0rc0 Replace dynamic = ["version"] with an explicit version = "2.0.0rc0" placeholder in pyproject.toml. This makes the package version static (release candidate) for packaging/CI needs; update the placeholder as appropriate for final releases. --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index c7ab122..ba9369a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,6 +4,7 @@ requires = [ "setuptools>=68" ] [project] name = "deeplabcut-live-gui" +version = "2.0.0rc0" # PLACEHOLDER description = "PySide6-based GUI to run real time DeepLabCut experiments" readme = "README.md" keywords = [ "deep learning", "deeplabcut", "gui", "pose estimation", "real-time" ] @@ -21,7 +22,6 @@ classifiers = [ "Programming Language :: Python :: 3.12", "Topic :: Scientific/Engineering :: Artificial Intelligence", ] -dynamic = [ "version" ] dependencies = [ "cv2-enumerate-cameras", "deeplabcut-live==1.1", From 21a7e280f88ce385838806a6fc0f980124006a5c Mon Sep 17 00:00:00 2001 From: C-Achard Date: Mon, 16 Feb 2026 21:02:12 +0100 Subject: [PATCH 05/60] Update project metadata in pyproject.toml Clarify package description to mention pose estimation experiments with DeepLabCut; fix authors by splitting the previous entry into two distinct authors (M-Lab of Adaptive Intelligence and Mathis Group for Computational Neuroscience and AI) with their respective emails; add a FIXME comment to the Documentation URL to mark docs as a placeholder. --- pyproject.toml | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index ba9369a..716079a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,7 +5,7 @@ requires = [ "setuptools>=68" ] [project] name = "deeplabcut-live-gui" version = "2.0.0rc0" # PLACEHOLDER -description = "PySide6-based GUI to run real time DeepLabCut experiments" +description = "PySide6-based GUI to run real time pose estimation experiments with DeepLabCut" readme = "README.md" keywords = [ "deep learning", "deeplabcut", "gui", "pose estimation", "real-time" ] license-files = [ "LICENSE" ] @@ -34,8 +34,11 @@ dependencies = [ "vidgear[core]", ] [[project.authors]] -name = "A. & M. Mathis Labs" -email = "adim@deeplabcut.org" +name = "M-Lab of Adaptive Intelligence" +email = "mackenzie@deeplabcut.org" +[[project.authors]] +name = "Mathis Group for Computational Neuroscience and AI" +email = "alexander@deeplabcut.org" [project.optional-dependencies] all = [ "harvesters", "pypylon" ] basler = [ "pypylon" ] @@ -65,7 +68,7 @@ tf = [ dlclivegui = "dlclivegui:main" [project.urls] "Bug Tracker" = "https://github.com/DeepLabCut/DeepLabCut-live-GUI/issues" -Documentation = "https://github.com/DeepLabCut/DeepLabCut-live-GUI" +Documentation = "https://github.com/DeepLabCut/DeepLabCut-live-GUI" # FIXME @C-Achard replace once docs are up Homepage = "https://github.com/DeepLabCut/DeepLabCut-live-GUI" Repository = "https://github.com/DeepLabCut/DeepLabCut-live-GUI" From 3ca5d4c80fc40f3626ff9057cbcdf3ad6aa13c9b Mon Sep 17 00:00:00 2001 From: C-Achard Date: Mon, 16 Feb 2026 21:09:30 +0100 Subject: [PATCH 06/60] CI: note lockfile and install wheel with deps Add a comment in the Python CI workflow recommending inclusion of the lock file for caching and validation. Update the smoke-test step to install the built wheel along with its dependencies by removing `--no-deps`. Also include minor whitespace cleanup. --- .github/workflows/python-package.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/python-package.yml b/.github/workflows/python-package.yml index 8a15aaf..077d303 100644 --- a/.github/workflows/python-package.yml +++ b/.github/workflows/python-package.yml @@ -32,6 +32,7 @@ jobs: with: python-version: ${{ matrix.python-version }} + # If we use UV later, the lock file should be included here for caching and validation - name: Cache pip uses: actions/cache@v4 with: @@ -61,7 +62,7 @@ jobs: - name: Install from wheel & smoke test run: | # Install from the built wheel (not from the source tree) - python -m pip install --no-deps dist/*.whl + python -m pip install dist/*.whl python - <<'PY' import importlib From 002e06b9cd203b4a20ca9dcc5dc8a1b7e7e5cec3 Mon Sep 17 00:00:00 2001 From: C-Achard Date: Mon, 16 Feb 2026 22:30:11 +0100 Subject: [PATCH 07/60] Add ASCII art API, CLI flag, and fixes Introduce dlclivegui.assets.ascii_art: a cross-platform, CI-safe ASCII/ANSI art generator using OpenCV and numpy (alpha compositing, content cropping, resizing, color modes, and terminal detection). Integrate it into main: add argparse with a --no-art flag, print a startup banner (unless disabled), and use the art helper for help text. Fix QApplication SIGINT keepalive timer handling (avoid duplicate timers and stop existing one before replacing). Change default SHOW_SPLASH to False in theme.py and update tests to restore SHOW_SPLASH=True via an autouse fixture so existing splash-dependent tests continue to pass. --- dlclivegui/assets/ascii_art.py | 371 +++++++++++++++++++++++++++++++ dlclivegui/gui/theme.py | 2 +- dlclivegui/main.py | 25 ++- tests/gui/test_app_entrypoint.py | 6 + 4 files changed, 397 insertions(+), 7 deletions(-) create mode 100644 dlclivegui/assets/ascii_art.py diff --git a/dlclivegui/assets/ascii_art.py b/dlclivegui/assets/ascii_art.py new file mode 100644 index 0000000..42d97d0 --- /dev/null +++ b/dlclivegui/assets/ascii_art.py @@ -0,0 +1,371 @@ +""" +Utilities to generate ASCII (optionally ANSI-colored) art for the user's terminal. + +Cross-platform and CI-safe: +- Detects terminal width using shutil.get_terminal_size (portable across OSes). +- Respects NO_COLOR and a color mode (auto|always|never). +- Enables ANSI color on Windows PowerShell/cmd via os.system("") when needed. +- Supports transparent PNGs (alpha) by compositing over a chosen background color. +- Optional crop-to-content using alpha or a background heuristic when no alpha. + +Dependencies: opencv-python, numpy +""" + +# dlclivegui/assets/ascii.py +from __future__ import annotations + +import os +import shutil +import sys +from collections.abc import Iterable +from importlib import resources +from typing import Literal + +import numpy as np + +try: + import cv2 as cv +except Exception as e: # pragma: no cover + raise RuntimeError( + "OpenCV (opencv-python) is required for dlclivegui.assets.ascii.\nInstall with: pip install opencv-python" + ) from e + +# Character ramps (dense -> sparse) +ASCII_RAMP_SIMPLE = "@%#*+=-:. " +ASCII_RAMP_FINE = "@$B%8&WM#*oahkbdpqwmZO0QLCJUYXzcvunxrjft/\\|()1{}[]?-_+~<>i!lI;:,\"^`'. " + +ColorMode = Literal["auto", "always", "never"] + +# ----------------------------- +# Terminal / ANSI capabilities +# ----------------------------- + + +def enable_windows_ansi_support() -> None: + """Enable ANSI escape support in Windows terminals. + Safe to call on any OS; no-op on non-Windows. + """ + if os.name == "nt": + # This call toggles the console mode to enable VT processing in many hosts + os.system("") + + +def get_terminal_width(default: int = 80) -> int: + """Return terminal width in columns, or a fallback if stdout is not a TTY.""" + try: + if not sys.stdout.isatty(): + return default + return shutil.get_terminal_size((default, 24)).columns + except Exception: + return default + + +def should_use_color(mode: ColorMode = "auto") -> bool: + """Determine if colored ANSI output should be emitted. + + - 'never': never use color + - 'always': always use color (even when redirected) + - 'auto': use color only when stdout is a TTY and NO_COLOR is not set + """ + if mode == "never": + return False + if mode == "always": + return True + # auto + if os.environ.get("NO_COLOR"): + return False + return sys.stdout.isatty() + + +def terminal_is_wide_enough(min_width: int = 60) -> bool: + if not sys.stdout.isatty(): + return False + return get_terminal_width() >= min_width + + +# ----------------------------- +# Image helpers +# ----------------------------- + + +def _to_bgr(img: np.ndarray) -> np.ndarray: + """Ensure an image array is 3-channel BGR.""" + if img.ndim == 2: + return cv.cvtColor(img, cv.COLOR_GRAY2BGR) + if img.ndim == 3 and img.shape[2] == 3: + return img + if img.ndim == 3 and img.shape[2] == 4: + # Caller should composite first; keep as-is for now + b, g, r, a = cv.split(img) + return cv.merge((b, g, r)) + raise ValueError(f"Unsupported image shape for BGR conversion: {img.shape!r}") + + +def composite_over_color(img: np.ndarray, bg_bgr: tuple[int, int, int] = (255, 255, 255)) -> np.ndarray: + """If img has alpha (BGRA), alpha-composite over a solid BGR color and return BGR.""" + if img.ndim == 3 and img.shape[2] == 4: + b, g, r, a = cv.split(img) + af = (a.astype(np.float32) / 255.0)[..., None] # (H,W,1) + bgr = cv.merge((b, g, r)).astype(np.float32) + bg = np.empty_like(bgr, dtype=np.float32) + bg[..., 0] = bg_bgr[0] + bg[..., 1] = bg_bgr[1] + bg[..., 2] = bg_bgr[2] + out = af * bgr + (1.0 - af) * bg + return np.clip(out, 0, 255).astype(np.uint8) + return _to_bgr(img) + + +def crop_to_content_alpha(img_bgra: np.ndarray, alpha_thresh: int = 1, pad: int = 0) -> np.ndarray: + """Crop to bounding box of pixels where alpha > alpha_thresh. Returns BGRA.""" + if not (img_bgra.ndim == 3 and img_bgra.shape[2] == 4): + return img_bgra + a = img_bgra[..., 3] + mask = a > alpha_thresh + if not mask.any(): + return img_bgra + ys, xs = np.where(mask) + y0, y1 = ys.min(), ys.max() + x0, x1 = xs.min(), xs.max() + if pad: + h, w = a.shape + y0 = max(0, y0 - pad) + x0 = max(0, x0 - pad) + y1 = min(h - 1, y1 + pad) + x1 = min(w - 1, x1 + pad) + return img_bgra[y0 : y1 + 1, x0 : x1 + 1, :] + + +def crop_to_content_bg( + img_bgr: np.ndarray, bg: Literal["white", "black"] = "white", tol: int = 10, pad: int = 0 +) -> np.ndarray: + """Heuristic crop when no alpha: assume uniform white or black background. + Returns BGR. + """ + if not (img_bgr.ndim == 3 and img_bgr.shape[2] == 3): + img_bgr = _to_bgr(img_bgr) + if bg == "white": + dist = 255 - img_bgr.max(axis=2) # darker than white + mask = dist > tol + else: + dist = img_bgr.max(axis=2) # brighter than black + mask = dist > tol + if not mask.any(): + return img_bgr + ys, xs = np.where(mask) + y0, y1 = ys.min(), ys.max() + x0, x1 = xs.min(), xs.max() + if pad: + h, w = mask.shape + y0 = max(0, y0 - pad) + x0 = max(0, x0 - pad) + y1 = min(h - 1, y1 + pad) + x1 = min(w - 1, x1 + pad) + return img_bgr[y0 : y1 + 1, x0 : x1 + 1, :] + + +def resize_for_terminal(img: np.ndarray, width: int | None, aspect: float | None) -> np.ndarray: + """Resize image for terminal display. + + Parameters + ---------- + width: target character width (None -> current terminal width) + aspect: character cell height/width ratio; default 0.5 is good for many fonts. + """ + h, w = img.shape[:2] + if width is None: + width = get_terminal_width(100) + width = max(20, int(width)) + if aspect is None: + # Allow override by env var, else default 0.5 + try: + aspect = float(os.environ.get("DLCLIVE_ASCII_ASPECT", "0.5")) + except ValueError: + aspect = 0.5 + new_h = max(1, int((h / w) * width * aspect)) + return cv.resize(img, (width, new_h), interpolation=cv.INTER_AREA) + + +# ----------------------------- +# Rendering +# ----------------------------- + + +def _map_luminance_to_chars(gray: np.ndarray, fine: bool) -> Iterable[str]: + ramp = ASCII_RAMP_FINE if fine else ASCII_RAMP_SIMPLE + idx = (gray.astype(np.float32) / 255.0 * (len(ramp) - 1)).astype(np.int32) + lines = ["".join(ramp[i] for i in row) for row in idx] + return lines + + +def _color_ascii_lines(img_bgr: np.ndarray, fine: bool, invert: bool) -> Iterable[str]: + ramp = ASCII_RAMP_FINE if fine else ASCII_RAMP_SIMPLE + b, g, r = cv.split(img_bgr) + lum = (0.0722 * b + 0.7152 * g + 0.2126 * r).astype(np.float32) + if invert: + lum = 255.0 - lum + idx = (lum / 255.0 * (len(ramp) - 1)).astype(np.int32) + h, w = idx.shape + lines = [] + for y in range(h): + seg = [] + for x in range(w): + ch = ramp[idx[y, x]] + bb, gg, rr = img_bgr[y, x] + seg.append(f"\x1b[38;2;{rr};{gg};{bb}m{ch}\x1b[0m") + lines.append("".join(seg)) + return lines + + +# ----------------------------- +# Public API +# ----------------------------- + + +def generate_ascii_lines( + image_path: str, + *, + width: int | None = None, + aspect: float | None = None, + color: ColorMode = "auto", + fine: bool = False, + invert: bool = False, + crop_content: bool = False, + crop_bg: Literal["none", "white", "black"] = "none", + alpha_thresh: int = 1, + crop_pad: int = 0, + bg_bgr: tuple[int, int, int] = (255, 255, 255), +) -> Iterable[str]: + """Load an image and return ASCII art lines sized for the user's terminal. + + Parameters + ---------- + image_path: path to the input image + width: output width in characters (None -> detect terminal width) + aspect: character cell height/width ratio (None -> 0.5 or env override) + color: 'auto'|'always'|'never' color mode + fine: use a finer 70+ character ramp + invert: invert luminance mapping + crop_content: crop to non-transparent content (alpha) if present + crop_bg: when no alpha, optionally crop assuming a uniform 'white' or 'black' background + alpha_thresh: threshold for alpha-based crop (0-255) + crop_pad: pixels of padding around detected content + bg_bgr: background color used for alpha compositing (default white) + """ + enable_windows_ansi_support() + + if not os.path.isfile(image_path): + raise FileNotFoundError(image_path) + + # Load preserving alpha if present + img = cv.imread(image_path, cv.IMREAD_UNCHANGED) + if img is None: + raise RuntimeError(f"Failed to load image with OpenCV: {image_path}") + + # Crop prior to compositing/resizing + if crop_content and img.ndim == 3 and img.shape[2] == 4: + img = crop_to_content_alpha(img, alpha_thresh=alpha_thresh, pad=crop_pad) + elif crop_content and (img.ndim != 3 or img.shape[2] != 4) and crop_bg in ("white", "black"): + img = crop_to_content_bg(_to_bgr(img), bg=crop_bg, tol=10, pad=crop_pad) + + # Composite transparency to solid background for correct visual result + img_bgr = composite_over_color(img, bg_bgr=bg_bgr) + + # Resize for terminal cell ratio + img_bgr = resize_for_terminal(img_bgr, width=width, aspect=aspect) + + use_color = should_use_color(color) + + if use_color: + return _color_ascii_lines(img_bgr, fine=fine, invert=invert) + else: + gray = cv.cvtColor(img_bgr, cv.COLOR_BGR2GRAY) + if invert: + gray = 255 - gray + return _map_luminance_to_chars(gray, fine=fine) + + +def print_ascii( + image_path: str, + *, + width: int | None = None, + aspect: float | None = None, + color: ColorMode = "auto", + fine: bool = False, + invert: bool = False, + crop_content: bool = False, + crop_bg: Literal["none", "white", "black"] = "none", + alpha_thresh: int = 1, + crop_pad: int = 0, + bg_bgr: tuple[int, int, int] = (255, 255, 255), + output: str | None = None, +) -> None: + """Convenience: generate and print ASCII art; optionally write it to a file.""" + lines = list( + generate_ascii_lines( + image_path, + width=width, + aspect=aspect, + color=color, + fine=fine, + invert=invert, + crop_content=crop_content, + crop_bg=crop_bg, + alpha_thresh=alpha_thresh, + crop_pad=crop_pad, + bg_bgr=bg_bgr, + ) + ) + + # Print to stdout + for line in lines: + print(line) + + # Optionally write raw ANSI/plain text to a file + if output: + with open(output, "w", encoding="utf-8", newline="\n") as f: + for line in lines: + f.write(line) + f.write("\n") + + +# ----------------------------- +# Optional: Help banner helpers +# ----------------------------- +ASCII_IMAGE_PATH = resources.files("dlclivegui.assets") / "logo_transparent.png" + + +def build_help_description( + static_banner: str | None = None, *, desc=None, color: ColorMode = "auto", min_width: int = 60 +) -> str: + """Return a help description string that conditionally includes a colored ASCII banner. + + - If stdout is a TTY and wide enough, returns banner + description. + - Otherwise returns a plain, single-line description. + - If static_banner is None, uses ASCII_BANNER (empty by default). + """ + enable_windows_ansi_support() + desc = "DeepLabCut-Live GUI — launch the graphical interface." if desc is None else desc + if static_banner is None: + banner = "\n".join( + generate_ascii_lines( + str(ASCII_IMAGE_PATH), + width=shutil.get_terminal_size((80, 24)).columns - 10, + aspect=0.5, + color=color, + fine=True, + invert=False, + crop_content=True, + crop_bg="white", + alpha_thresh=1, + crop_pad=1, + bg_bgr=(255, 255, 255), + ) + ) + else: + banner = static_banner + if banner and terminal_is_wide_enough(min_width=min_width): + if should_use_color(color): + banner = f"\x1b[36m{banner}\x1b[0m" + return banner + "\n" + desc + return desc diff --git a/dlclivegui/gui/theme.py b/dlclivegui/gui/theme.py index cdc9ace..cc6db38 100644 --- a/dlclivegui/gui/theme.py +++ b/dlclivegui/gui/theme.py @@ -10,7 +10,7 @@ from PySide6.QtWidgets import QApplication # ---- Splash screen config ---- -SHOW_SPLASH = True +SHOW_SPLASH = False SPLASH_SCREEN_WIDTH = 600 SPLASH_SCREEN_HEIGHT = 400 SPLASH_SCREEN_DURATION_MS = 1000 diff --git a/dlclivegui/main.py b/dlclivegui/main.py index 35494f7..0f05966 100644 --- a/dlclivegui/main.py +++ b/dlclivegui/main.py @@ -1,6 +1,7 @@ # dlclivegui/gui/main.py from __future__ import annotations +import argparse import logging import signal import sys @@ -9,6 +10,7 @@ from PySide6.QtGui import QIcon from PySide6.QtWidgets import QApplication +from dlclivegui.assets import ascii_art as art from dlclivegui.gui.main_window import DLCLiveMainWindow from dlclivegui.gui.misc.splash import SplashConfig, show_splash from dlclivegui.gui.theme import ( @@ -42,22 +44,33 @@ def _sigint_handler(_signum, _frame) -> None: signal.signal(signal.SIGINT, _sigint_handler) # Keepalive timer to allow Python to handle signals while Qt is running. - sig_timer = QTimer(app) + sig_timer = QTimer() sig_timer.setInterval(100) # 50–200ms typical; keep low overhead sig_timer.timeout.connect(lambda: None) sig_timer.start() - if not hasattr(app, "_sig_timer"): - app._sig_timer = sig_timer - else: - raise RuntimeError("QApplication already has _sig_timer attribute, which is reserved for SIGINT handling.") + if hasattr(app, "_sig_timer"): + app._sig_timer.stop() # Stop any existing timer to avoid duplicates + app._sig_timer = sig_timer # Store on app to keep it alive and allow cleanup on exit + + +def parse_args(argv=None): + parser = argparse.ArgumentParser( + description=art.build_help_description(), + formatter_class=argparse.RawDescriptionHelpFormatter, + ) + parser.add_argument("--no-art", action="store_true", help="Disable ASCII art in help and when launching.") + return parser.parse_known_args(argv) def main() -> None: - # signal.signal(signal.SIGINT, signal.SIG_DFL) + args, _unknown = parse_args() # HiDPI pixmaps - always enabled in Qt 6 so no need to set it explicitly # QApplication.setAttribute(Qt.ApplicationAttribute.AA_UseHighDpiPixmaps, True) + print("Starting DeepLabCut-Live GUI...") + if not args.no_art: + print(art.build_help_description(desc="Welcome to DeepLabCut-Live GUI!")) app = QApplication(sys.argv) app.setWindowIcon(QIcon(LOGO)) diff --git a/tests/gui/test_app_entrypoint.py b/tests/gui/test_app_entrypoint.py index f28ddc3..bd3d4c5 100644 --- a/tests/gui/test_app_entrypoint.py +++ b/tests/gui/test_app_entrypoint.py @@ -16,6 +16,12 @@ def _import_fresh(): return importlib.import_module(MODULE_UNDER_TEST) +@pytest.fixture(autouse=True) +def set_use_splash_true(monkeypatch): + # Ensure thems.py SHOW_SPLASH is True for tests that rely on it, without affecting other tests + monkeypatch.setattr("dlclivegui.gui.theme.SHOW_SPLASH", True) + + @pytest.mark.gui def test_main_with_splash(monkeypatch): appmod = _import_fresh() From 36117740b0855ff0e2eb7bd8c8fc38c2c39da8f4 Mon Sep 17 00:00:00 2001 From: C-Achard Date: Mon, 16 Feb 2026 22:35:39 +0100 Subject: [PATCH 08/60] Use develop mode in tox testenv --- tox.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index 3ac6e62..e16c20b 100644 --- a/tox.ini +++ b/tox.ini @@ -8,7 +8,7 @@ skip_missing_interpreters = true [testenv] description = Unit + smoke tests (exclude hardware) with coverage -package = wheel +use_develop = true extras = test # Keep behavior aligned with your GitHub Actions job: From 502e16e05a1040f8cc2e32221a36463e3f305faa Mon Sep 17 00:00:00 2001 From: C-Achard Date: Mon, 16 Feb 2026 22:50:15 +0100 Subject: [PATCH 09/60] Add ascii_art tests and update coverage omit Fix a small header comment in dlclivegui/assets/ascii_art.py and add dlclivegui/assets/* to coverage omit. Expand tests/gui/test_app_entrypoint.py with extensive unit tests for dlclivegui.assets.ascii_art (terminal/ANSI behavior, image helpers, rendering, generate/print API, and help banner), including TTY/NOTTY fixtures, image fixtures (using OpenCV, skipping if missing), and helpers for deterministic terminal sizing. Also adjust imports in the test to reference dlclivegui.assets.ascii_art. --- dlclivegui/assets/ascii_art.py | 2 +- pyproject.toml | 1 + tests/gui/test_app_entrypoint.py | 280 +++++++++++++++++++++++++++++++ 3 files changed, 282 insertions(+), 1 deletion(-) diff --git a/dlclivegui/assets/ascii_art.py b/dlclivegui/assets/ascii_art.py index 42d97d0..9f3ca76 100644 --- a/dlclivegui/assets/ascii_art.py +++ b/dlclivegui/assets/ascii_art.py @@ -11,7 +11,7 @@ Dependencies: opencv-python, numpy """ -# dlclivegui/assets/ascii.py +# dlclivegui/assets/ascii_art.py from __future__ import annotations import os diff --git a/pyproject.toml b/pyproject.toml index 716079a..ce12d5e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -133,6 +133,7 @@ exclude_lines = [ ] omit = [ "tests/*", + "dlclivegui/assets/*", ] [tool.coverage.run] branch = true diff --git a/tests/gui/test_app_entrypoint.py b/tests/gui/test_app_entrypoint.py index bd3d4c5..0d34a30 100644 --- a/tests/gui/test_app_entrypoint.py +++ b/tests/gui/test_app_entrypoint.py @@ -2,11 +2,19 @@ from __future__ import annotations import importlib +import os import sys +from pathlib import Path from unittest.mock import MagicMock +import numpy as np import pytest +# Import the module under test +# Adjust the import to your package layout as needed: +# from dlclivegui.assets import ascii as ascii_mod +import dlclivegui.assets.ascii_art as ascii_mod + MODULE_UNDER_TEST = "dlclivegui.main" @@ -128,3 +136,275 @@ def test_main_without_splash(monkeypatch): show_splash_mock.assert_not_called() assert calls["count"] == 0 win_instance.show.assert_called_once() + + +try: + import cv2 as cv +except Exception: + pytest.skip("OpenCV (opencv-python) is required for these tests.", allow_module_level=True) + + +# ------------------------- +# Fixtures & small helpers +# ------------------------- + + +@pytest.fixture +def tmp_png_gray(tmp_path: Path): + """Create a simple 16x8 gray gradient PNG without alpha.""" + h, w = 8, 16 + # Horizontal gradient from black to white in BGR + x = np.linspace(0, 255, w, dtype=np.uint8) + img = np.tile(x, (h, 1)) + bgr = cv.cvtColor(img, cv.COLOR_GRAY2BGR) + p = tmp_path / "gray.png" + assert cv.imwrite(str(p), bgr) + return p + + +@pytest.fixture +def tmp_png_bgra_logo(tmp_path: Path): + """Create a small BGRA image with a transparent border and opaque center.""" + h, w = 10, 20 + bgra = np.zeros((h, w, 4), dtype=np.uint8) + # Opaque blue rectangle in center + bgra[2:8, 5:15, 0] = 255 # B + bgra[2:8, 5:15, 3] = 255 # A + p = tmp_path / "logo_bgra.png" + assert cv.imwrite(str(p), bgra) + return p + + +def _force_isatty(monkeypatch, obj, value: bool): + """ + Ensure obj.isatty() returns value. + Try instance patch first; if the object disallows attribute assignment, + patch the method on its class. + """ + try: + # Try patching the instance directly + monkeypatch.setattr(obj, "isatty", lambda: value, raising=False) + except Exception: + # Fallback: patch the class method + cls = type(obj) + monkeypatch.setattr(cls, "isatty", lambda self: value, raising=True) + + +@pytest.fixture +def force_tty(monkeypatch): + """ + Pretend stdout is a TTY and provide a stable terminal size inside the + module-under-test namespace (and the actual sys). + """ + # NO_COLOR must be unset for should_use_color("auto") + monkeypatch.delenv("NO_COLOR", raising=False) + + # Make whatever stdout object exists report TTY=True + _force_isatty(monkeypatch, sys.stdout, True) + _force_isatty(monkeypatch, ascii_mod.sys.stdout, True) + + # Ensure terminal size used by the module is deterministic + monkeypatch.setattr( + ascii_mod.shutil, + "get_terminal_size", + lambda fallback=None: os.terminal_size((80, 24)), + raising=True, + ) + return sys.stdout # not used directly, but handy + + +@pytest.fixture +def force_notty(monkeypatch): + """ + Pretend stdout is not a TTY. + """ + _force_isatty(monkeypatch, sys.stdout, False) + _force_isatty(monkeypatch, ascii_mod.sys.stdout, False) + return sys.stdout + + +# ------------------------- +# Terminal / ANSI behavior +# ------------------------- + + +def test_get_terminal_width_tty(force_tty): + width = ascii_mod.get_terminal_width(default=123) + assert width == 80 + + +def test_get_terminal_width_notty(force_notty): + width = ascii_mod.get_terminal_width(default=123) + assert width == 123 + + +def test_should_use_color_auto_tty(force_tty, monkeypatch): + monkeypatch.delenv("NO_COLOR", raising=False) + assert ascii_mod.should_use_color("auto") is True + + +def test_should_use_color_auto_no_color(force_tty, monkeypatch): + monkeypatch.setenv("NO_COLOR", "1") + assert ascii_mod.should_use_color("auto") is False + + +def test_should_use_color_modes(force_notty): + assert ascii_mod.should_use_color("never") is False + assert ascii_mod.should_use_color("always") is True + + +def test_terminal_is_wide_enough(force_tty): + assert ascii_mod.terminal_is_wide_enough(60) is True + assert ascii_mod.terminal_is_wide_enough(100) is False + + +# ------------------------- +# Image helpers +# ------------------------- + + +def test__to_bgr_converts_gray(): + gray = np.zeros((5, 7), dtype=np.uint8) + bgr = ascii_mod._to_bgr(gray) + assert bgr.shape == (5, 7, 3) + assert bgr.dtype == np.uint8 + + +def test_composite_over_color_bgra(tmp_png_bgra_logo): + img = cv.imread(str(tmp_png_bgra_logo), cv.IMREAD_UNCHANGED) + assert img.shape[2] == 4 + bgr = ascii_mod.composite_over_color(img, bg_bgr=(10, 20, 30)) + assert bgr.shape[2] == 3 + # Transparent border should become the bg color + assert tuple(bgr[0, 0]) == (10, 20, 30) + # Opaque center should keep blue channel high + assert bgr[5, 10, 0] == 255 + + +def test_crop_to_content_alpha(tmp_png_bgra_logo): + img = cv.imread(str(tmp_png_bgra_logo), cv.IMREAD_UNCHANGED) + cropped = ascii_mod.crop_to_content_alpha(img, alpha_thresh=1, pad=0) + h, w = cropped.shape[:2] + assert h == 6 # 2..7 -> 6 rows + assert w == 10 # 5..14 -> 10 cols + assert cropped[..., 3].min() == 255 + + +def test_crop_to_content_bg_white(tmp_path): + # Create white background with a black rectangle + h, w = 12, 20 + bgr = np.full((h, w, 3), 255, dtype=np.uint8) + bgr[3:10, 4:15] = 0 + p = tmp_path / "white_bg.png" + assert cv.imwrite(str(p), bgr) + cropped = ascii_mod.crop_to_content_bg(bgr, bg="white", tol=10, pad=0) + assert cropped.shape[0] == 7 # 3..9 -> 7 rows + assert cropped.shape[1] == 11 # 4..14 -> 11 cols + + +def test_resize_for_terminal_aspect_env(monkeypatch): + img = np.zeros((100, 200, 3), dtype=np.uint8) + monkeypatch.setenv("DLCLIVE_ASCII_ASPECT", "0.25") + resized = ascii_mod.resize_for_terminal(img, width=80, aspect=None) + # new_h = (h/w) * width * aspect = (100/200)*80*0.25 = 10 + assert resized.shape[:2] == (10, 80) + + +# ------------------------- +# Rendering +# ------------------------- + + +def test_map_luminance_to_chars_simple(): + gray = np.array([[0, 127, 255]], dtype=np.uint8) + lines = list(ascii_mod._map_luminance_to_chars(gray, fine=False)) + assert len(lines) == 1 + # First char should be the densest in the simple ramp '@', last should be space + assert lines[0][0] == ascii_mod.ASCII_RAMP_SIMPLE[0] + assert lines[0][-1] == ascii_mod.ASCII_RAMP_SIMPLE[-1] + + +def test_color_ascii_lines_basic(): + # Small 2x3 color blocks + img = np.zeros((2, 3, 3), dtype=np.uint8) + img[:] = (10, 20, 30) + lines = list(ascii_mod._color_ascii_lines(img, fine=False, invert=False)) + assert len(lines) == 2 + # Expect ANSI 24-bit color sequence present + assert "\x1b[38;2;" in lines[0] + # Reset code present + assert lines[0].endswith("\x1b[0m" * 3) is False # individual chars have resets, but line won't end with triple + + +# ------------------------- +# Public API: generate & print +# ------------------------- + + +@pytest.mark.parametrize("use_color", ["never", "always"]) +def test_generate_ascii_lines_gray(tmp_png_gray, use_color, force_tty): + lines = list( + ascii_mod.generate_ascii_lines( + str(tmp_png_gray), + width=40, + aspect=0.5, + color=use_color, + fine=False, + invert=False, + crop_content=False, + crop_bg="none", + ) + ) + assert len(lines) > 0 + # Width equals number of characters per line + assert all(len(line) == 40 or ("\x1b[38;2;" in line and len(_strip_ansi(line)) == 40) for line in lines) + + +def _strip_ansi(s: str) -> str: + import re + + return re.sub(r"\x1b\[[0-9;]*m", "", s) + + +def test_generate_ascii_lines_crop_alpha(tmp_png_bgra_logo, force_tty): + lines_no_crop = list( + ascii_mod.generate_ascii_lines(str(tmp_png_bgra_logo), width=40, aspect=0.5, color="never", crop_content=False) + ) + lines_crop = list( + ascii_mod.generate_ascii_lines(str(tmp_png_bgra_logo), width=40, aspect=0.5, color="never", crop_content=True) + ) + # Both are non-empty; height may change either way depending on aspect ratio + assert len(lines_no_crop) > 0 and len(lines_crop) > 0 + # Optional: assert they differ (most likely); comment out if flakiness observed + assert len(lines_crop) != len(lines_no_crop) + + +def test_print_ascii_writes_file(tmp_png_gray, force_tty, tmp_path): + out_path = tmp_path / "out.txt" + ascii_mod.print_ascii( + str(tmp_png_gray), + width=30, + aspect=0.5, + color="never", + output=str(out_path), + ) + assert out_path.exists() + text = out_path.read_text(encoding="utf-8") + # Expect multiple lines of length 30 + lines = [ln for ln in text.splitlines() if ln] + assert len(lines) > 0 + assert all(len(ln) == 30 for ln in lines) + + +def test_build_help_description_tty(tmp_png_bgra_logo, monkeypatch, force_tty): + monkeypatch.setattr(ascii_mod, "ASCII_IMAGE_PATH", Path(tmp_png_bgra_logo)) + desc = ascii_mod.build_help_description(static_banner=None, color="always", min_width=60) + assert "DeepLabCut-Live GUI" in desc + assert "\x1b[36m" in desc # cyan wrapper now present since TTY is mocked correctly + + +def test_build_help_description_notty(tmp_png_bgra_logo, monkeypatch, force_notty): + monkeypatch.setattr(ascii_mod, "ASCII_IMAGE_PATH", Path(tmp_png_bgra_logo)) + desc = ascii_mod.build_help_description(static_banner=None, color="always", min_width=60) + # Not a TTY -> no banner, just the plain description + assert desc.strip() == "DeepLabCut-Live GUI — launch the graphical interface." From d79eec4a9f2afb97dc194fb28781dbad08745996 Mon Sep 17 00:00:00 2001 From: C-Achard Date: Tue, 17 Feb 2026 07:57:24 +0100 Subject: [PATCH 10/60] Revert "Use develop mode in tox testenv" This reverts commit 36117740b0855ff0e2eb7bd8c8fc38c2c39da8f4. --- tox.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index e16c20b..3ac6e62 100644 --- a/tox.ini +++ b/tox.ini @@ -8,7 +8,7 @@ skip_missing_interpreters = true [testenv] description = Unit + smoke tests (exclude hardware) with coverage -use_develop = true +package = wheel extras = test # Keep behavior aligned with your GitHub Actions job: From de9de7f423af4a831a6f2d752d2e423bac68f0c3 Mon Sep 17 00:00:00 2001 From: C-Achard Date: Tue, 17 Feb 2026 08:17:20 +0100 Subject: [PATCH 11/60] Bump actions and harden CLI smoke test MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Update GitHub Actions usages to actions/checkout@v6 and actions/setup-python@v6 in format and python-package workflows for consistency and newer runner support. Improve the python-package workflow's CLI smoke test to explicitly check for the 'dlclivegui' entry point, run its --help when present, and fail the job if the help command returns non-zero—providing clearer diagnostics instead of silently continuing. --- .github/workflows/format.yml | 4 ++-- .github/workflows/python-package.yml | 15 +++++++++++---- 2 files changed, 13 insertions(+), 6 deletions(-) diff --git a/.github/workflows/format.yml b/.github/workflows/format.yml index a226ae0..9858a21 100644 --- a/.github/workflows/format.yml +++ b/.github/workflows/format.yml @@ -10,12 +10,12 @@ jobs: pre_commit_checks: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v6 with: fetch-depth: 0 ref: ${{ github.head_ref }} - - uses: actions/setup-python@v4 + - uses: actions/setup-python@v6 with: python-version: '3.10' diff --git a/.github/workflows/python-package.yml b/.github/workflows/python-package.yml index 077d303..c354f24 100644 --- a/.github/workflows/python-package.yml +++ b/.github/workflows/python-package.yml @@ -25,10 +25,10 @@ jobs: python-version: [ "3.10", "3.11", "3.12" ] # adjust to what you support steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@v6 - name: Setup Python ${{ matrix.python-version }} - uses: actions/setup-python@v5 + uses: actions/setup-python@v6 with: python-version: ${{ matrix.python-version }} @@ -71,8 +71,15 @@ jobs: print("Imported:", m.__name__, "version:", getattr(m, "__version__", "n/a")) PY - # Console entry point best-effort check (adjust name if different) - (command -v dlclivegui && dlclivegui --help) || echo "CLI not available or returned non-zero; continuing." + if ! command -v dlclivegui >/dev/null 2>&1; then + echo "CLI entry point 'dlclivegui' not found in PATH; skipping CLI smoke test." + else + echo "Running 'dlclivegui --help' smoke test..." + if ! dlclivegui --help >/dev/null 2>&1; then + echo "::error::'dlclivegui --help' failed; this indicates a problem with the installed CLI package." + exit 1 + fi + fi publish: name: Publish to PyPI From 7f5b75b66bb6a3d083032bd4f51237648f64cb40 Mon Sep 17 00:00:00 2001 From: C-Achard Date: Tue, 17 Feb 2026 08:17:55 +0100 Subject: [PATCH 12/60] Move version to package and mark pyproject dynamic Add a __version__ = "2.0.0rc0" placeholder in dlclivegui/__init__.py and update pyproject.toml to use dynamic = ["version"] instead of a hardcoded version field. This makes the project metadata pull the version from the package source rather than storing it twice. --- dlclivegui/__init__.py | 1 + pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/dlclivegui/__init__.py b/dlclivegui/__init__.py index 9cc2640..f302f5b 100644 --- a/dlclivegui/__init__.py +++ b/dlclivegui/__init__.py @@ -24,3 +24,4 @@ "CameraConfigDialog", "main", ] +__version__ = "2.0.0rc0" # PLACEHOLDER diff --git a/pyproject.toml b/pyproject.toml index ce12d5e..8b28f5d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,6 @@ requires = [ "setuptools>=68" ] [project] name = "deeplabcut-live-gui" -version = "2.0.0rc0" # PLACEHOLDER description = "PySide6-based GUI to run real time pose estimation experiments with DeepLabCut" readme = "README.md" keywords = [ "deep learning", "deeplabcut", "gui", "pose estimation", "real-time" ] @@ -22,6 +21,7 @@ classifiers = [ "Programming Language :: Python :: 3.12", "Topic :: Scientific/Engineering :: Artificial Intelligence", ] +dynamic = [ "version" ] # version is set in dlclivegui/__init__.py dependencies = [ "cv2-enumerate-cameras", "deeplabcut-live==1.1", From 6d21bd2215f535d62d6d7c35475c9c4ff48a9e89 Mon Sep 17 00:00:00 2001 From: C-Achard Date: Tue, 17 Feb 2026 08:18:27 +0100 Subject: [PATCH 13/60] Handle ASCII art errors & use logging Wrap ASCII-art generation in a try/except to avoid crashing when the asset or terminal ops fail (catch FileNotFoundError, RuntimeError, OSError and fall back to no banner). Add a clarifying comment for the Windows ANSI os.system("") workaround. In argument parsing, allow parse_args to accept argv, detect --no-art early, safely build the help description with a fallback default and log a warning on failure. Replace startup prints with logging.info and use the computed description for argparse. These changes make startup more robust and avoid surprising exceptions when ASCII art cannot be produced. --- dlclivegui/assets/ascii_art.py | 34 +++++++++++++++++++--------------- dlclivegui/main.py | 20 +++++++++++++++++--- 2 files changed, 36 insertions(+), 18 deletions(-) diff --git a/dlclivegui/assets/ascii_art.py b/dlclivegui/assets/ascii_art.py index 9f3ca76..2009734 100644 --- a/dlclivegui/assets/ascii_art.py +++ b/dlclivegui/assets/ascii_art.py @@ -47,7 +47,8 @@ def enable_windows_ansi_support() -> None: """ if os.name == "nt": # This call toggles the console mode to enable VT processing in many hosts - os.system("") + # Always leave the string empty. + os.system("") # This is a known, safe workaround to enable ANSI support on Windows. def get_terminal_width(default: int = 80) -> int: @@ -347,21 +348,24 @@ def build_help_description( enable_windows_ansi_support() desc = "DeepLabCut-Live GUI — launch the graphical interface." if desc is None else desc if static_banner is None: - banner = "\n".join( - generate_ascii_lines( - str(ASCII_IMAGE_PATH), - width=shutil.get_terminal_size((80, 24)).columns - 10, - aspect=0.5, - color=color, - fine=True, - invert=False, - crop_content=True, - crop_bg="white", - alpha_thresh=1, - crop_pad=1, - bg_bgr=(255, 255, 255), + try: + banner = "\n".join( + generate_ascii_lines( + str(ASCII_IMAGE_PATH), + width=shutil.get_terminal_size((80, 24)).columns - 10, + aspect=0.5, + color=color, + fine=True, + invert=False, + crop_content=True, + crop_bg="white", + alpha_thresh=1, + crop_pad=1, + bg_bgr=(255, 255, 255), + ) ) - ) + except (FileNotFoundError, RuntimeError, OSError): + banner = None else: banner = static_banner if banner and terminal_is_wide_enough(min_width=min_width): diff --git a/dlclivegui/main.py b/dlclivegui/main.py index 0f05966..04d55cf 100644 --- a/dlclivegui/main.py +++ b/dlclivegui/main.py @@ -55,8 +55,22 @@ def _sigint_handler(_signum, _frame) -> None: def parse_args(argv=None): + if argv is None: + argv = sys.argv[1:] + + default_desc = "Welcome to DeepLabCut-Live GUI!" + no_art_flag = "--no-art" in argv + if not no_art_flag: + try: + desc = art.build_help_description() + except Exception as e: + logging.warning(f"Failed to build ASCII art for help description: {e}") + desc = default_desc + else: + desc = default_desc + parser = argparse.ArgumentParser( - description=art.build_help_description(), + description=desc, formatter_class=argparse.RawDescriptionHelpFormatter, ) parser.add_argument("--no-art", action="store_true", help="Disable ASCII art in help and when launching.") @@ -68,9 +82,9 @@ def main() -> None: # HiDPI pixmaps - always enabled in Qt 6 so no need to set it explicitly # QApplication.setAttribute(Qt.ApplicationAttribute.AA_UseHighDpiPixmaps, True) - print("Starting DeepLabCut-Live GUI...") + logging.info("Starting DeepLabCut-Live GUI...") if not args.no_art: - print(art.build_help_description(desc="Welcome to DeepLabCut-Live GUI!")) + logging.info(art.build_help_description(desc="Welcome to DeepLabCut-Live GUI!")) app = QApplication(sys.argv) app.setWindowIcon(QIcon(LOGO)) From 756836ba05610d2472cda22d8c3f5d861fe57aa9 Mon Sep 17 00:00:00 2001 From: C-Achard Date: Tue, 17 Feb 2026 08:19:05 +0100 Subject: [PATCH 14/60] Use fixtures to control SHOW_SPLASH in tests Replace the autouse fixture with explicit fixtures to toggle dlclivegui.gui.theme.SHOW_SPLASH per-test. Adds set_use_splash_true and set_use_splash_false fixtures, updates tests to accept them, and removes the manual SHOW_SPLASH override in test_main_without_splash. Also fixes a comment typo (thems.py -> theme.py) to make splash behavior explicit and avoid cross-test side effects. --- tests/gui/test_app_entrypoint.py | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/tests/gui/test_app_entrypoint.py b/tests/gui/test_app_entrypoint.py index 0d34a30..26b3315 100644 --- a/tests/gui/test_app_entrypoint.py +++ b/tests/gui/test_app_entrypoint.py @@ -24,14 +24,20 @@ def _import_fresh(): return importlib.import_module(MODULE_UNDER_TEST) -@pytest.fixture(autouse=True) +@pytest.fixture def set_use_splash_true(monkeypatch): - # Ensure thems.py SHOW_SPLASH is True for tests that rely on it, without affecting other tests + # Ensure theme.py SHOW_SPLASH is True for tests that rely on it, without affecting other tests monkeypatch.setattr("dlclivegui.gui.theme.SHOW_SPLASH", True) +@pytest.fixture +def set_use_splash_false(monkeypatch): + # Ensure theme.py SHOW_SPLASH is False for tests that rely on it, without affecting other tests + monkeypatch.setattr("dlclivegui.gui.theme.SHOW_SPLASH", False) + + @pytest.mark.gui -def test_main_with_splash(monkeypatch): +def test_main_with_splash(monkeypatch, set_use_splash_true): appmod = _import_fresh() # --- Patch Qt app & icon in the entry module's namespace --- @@ -101,7 +107,7 @@ def immediate_single_shot(ms, fn): @pytest.mark.gui -def test_main_without_splash(monkeypatch): +def test_main_without_splash(monkeypatch, set_use_splash_false): appmod = _import_fresh() # Patch Qt app creation & window icon @@ -111,9 +117,6 @@ def test_main_without_splash(monkeypatch): monkeypatch.setattr(appmod, "QApplication", QApplication_cls) monkeypatch.setattr(appmod, "QIcon", MagicMock(name="QIcon")) - # Force the no-splash branch - appmod.SHOW_SPLASH = False - # show_splash should not be called show_splash_mock = MagicMock(name="show_splash") monkeypatch.setattr(appmod, "show_splash", show_splash_mock) From 6893b69ab3ad93eb699c8e0846da83fc0ec0da11 Mon Sep 17 00:00:00 2001 From: C-Achard Date: Tue, 17 Feb 2026 08:28:54 +0100 Subject: [PATCH 15/60] Vectorize mapping and add color cache for ASCII Improve ASCII art rendering performance by replacing per-pixel Python loops with vectorized operations and a byte-level cache. _map_luminance_to_chars now uses a numpy char LUT and joins rows from a (H,W) array of 1-char strings. _color_ascii_lines avoids cv.split and per-pixel f-strings: it computes luminance in float, uses a uint16 index map, packs RGB into a uint32 color key, and caches formatted ANSI-colored byte sequences keyed by (color<<8)|idx. Lines are built as bytearrays and decoded once. Functionality/formatting is preserved while reducing Python-level overhead. --- dlclivegui/assets/ascii_art.py | 65 ++++++++++++++++++++++++++++------ 1 file changed, 54 insertions(+), 11 deletions(-) diff --git a/dlclivegui/assets/ascii_art.py b/dlclivegui/assets/ascii_art.py index 2009734..039f9a0 100644 --- a/dlclivegui/assets/ascii_art.py +++ b/dlclivegui/assets/ascii_art.py @@ -194,27 +194,70 @@ def resize_for_terminal(img: np.ndarray, width: int | None, aspect: float | None def _map_luminance_to_chars(gray: np.ndarray, fine: bool) -> Iterable[str]: ramp = ASCII_RAMP_FINE if fine else ASCII_RAMP_SIMPLE + ramp_arr = np.array(list(ramp), dtype=" Iterable[str]: ramp = ASCII_RAMP_FINE if fine else ASCII_RAMP_SIMPLE - b, g, r = cv.split(img_bgr) - lum = (0.0722 * b + 0.7152 * g + 0.2126 * r).astype(np.float32) + ramp_bytes = [c.encode("utf-8") for c in ramp] # 1-byte ASCII in practice + reset = b"\x1b[0m" + + # luminance in float32 like your current code + b = img_bgr[..., 0].astype(np.float32) + g = img_bgr[..., 1].astype(np.float32) + r = img_bgr[..., 2].astype(np.float32) + lum = 0.0722 * b + 0.7152 * g + 0.2126 * r if invert: lum = 255.0 - lum - idx = (lum / 255.0 * (len(ramp) - 1)).astype(np.int32) + + idx = (lum / 255.0 * (len(ramp) - 1)).astype(np.uint16) # small dtype is fine + + # Pack color into one int: 0xRRGGBB (faster dict key than tuple) + rr = img_bgr[..., 2].astype(np.uint32) + gg = img_bgr[..., 1].astype(np.uint32) + bb = img_bgr[..., 0].astype(np.uint32) + color_key = (rr << 16) | (gg << 8) | bb # (H,W) uint32 + + # Cache: (color_key<<8)|idx -> bytes for full colored char INCLUDING reset + cache: dict[int, bytes] = {} + h, w = idx.shape - lines = [] + lines: list[str] = [] + for y in range(h): - seg = [] + ba = bytearray() + ck_row = color_key[y] + idx_row = idx[y] + img_bgr[y] # for extracting r/g/b when cache miss + for x in range(w): - ch = ramp[idx[y, x]] - bb, gg, rr = img_bgr[y, x] - seg.append(f"\x1b[38;2;{rr};{gg};{bb}m{ch}\x1b[0m") - lines.append("".join(seg)) + ik = int(idx_row[x]) + ck = int(ck_row[x]) + subkey = (ck << 8) | ik + + piece = cache.get(subkey) + if piece is None: + # Decode r,g,b from packed key (same as current rr,gg,bb) + rr_i = (ck >> 16) & 255 + gg_i = (ck >> 8) & 255 + bb_i = ck & 255 + + # EXACT same formatting as before + # \x1b[38;2;{rr};{gg};{bb}m{ch}\x1b[0m + prefix = f"\x1b[38;2;{rr_i};{gg_i};{bb_i}m".encode("ascii") + piece = prefix + ramp_bytes[ik] + reset + cache[subkey] = piece + + ba.extend(piece) + + lines.append(ba.decode("utf-8", errors="strict")) + return lines From 6d7a3e683b1433c74ab8c566d24000298d73b3da Mon Sep 17 00:00:00 2001 From: C-Achard Date: Tue, 17 Feb 2026 09:31:29 +0100 Subject: [PATCH 16/60] Add setuptools dynamic version & refine tests Add a setuptools dynamic version entry in pyproject.toml so package version is taken from dlclivegui.__version__. Also include package-data for dlclivegui.assets. Refactor tests/gui/test_app_entrypoint.py to import cv2 at module level and skip the test module early when OpenCV is not available (removed a duplicated try/except). Tighten the assertion in test_generate_ascii_lines_crop_alpha to compare the actual generated ASCII content (lines) rather than only lengths, and clean up related comments. --- pyproject.toml | 4 +++- tests/gui/test_app_entrypoint.py | 18 +++++++----------- 2 files changed, 10 insertions(+), 12 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 8b28f5d..6241705 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -72,9 +72,11 @@ Documentation = "https://github.com/DeepLabCut/DeepLabCut-live-GUI" # FIXME @C- Homepage = "https://github.com/DeepLabCut/DeepLabCut-live-GUI" Repository = "https://github.com/DeepLabCut/DeepLabCut-live-GUI" +[tool.setuptools.dynamic] +version = { attr = "dlclivegui.__version__" } + # [tool.setuptools] # include-package-data = true - [tool.setuptools.package-data] "dlclivegui.assets" = [ "*.png" ] [tool.setuptools.packages] diff --git a/tests/gui/test_app_entrypoint.py b/tests/gui/test_app_entrypoint.py index 26b3315..12007df 100644 --- a/tests/gui/test_app_entrypoint.py +++ b/tests/gui/test_app_entrypoint.py @@ -10,9 +10,11 @@ import numpy as np import pytest -# Import the module under test -# Adjust the import to your package layout as needed: -# from dlclivegui.assets import ascii as ascii_mod +try: + import cv2 as cv +except Exception: + pytest.skip("OpenCV (opencv-python) is required for these tests.", allow_module_level=True) + import dlclivegui.assets.ascii_art as ascii_mod MODULE_UNDER_TEST = "dlclivegui.main" @@ -141,12 +143,6 @@ def test_main_without_splash(monkeypatch, set_use_splash_false): win_instance.show.assert_called_once() -try: - import cv2 as cv -except Exception: - pytest.skip("OpenCV (opencv-python) is required for these tests.", allow_module_level=True) - - # ------------------------- # Fixtures & small helpers # ------------------------- @@ -378,8 +374,8 @@ def test_generate_ascii_lines_crop_alpha(tmp_png_bgra_logo, force_tty): ) # Both are non-empty; height may change either way depending on aspect ratio assert len(lines_no_crop) > 0 and len(lines_crop) > 0 - # Optional: assert they differ (most likely); comment out if flakiness observed - assert len(lines_crop) != len(lines_no_crop) + # Cropping should affect the generated ASCII content + assert lines_crop != lines_no_crop def test_print_ascii_writes_file(tmp_png_gray, force_tty, tmp_path): From 7230f696d1420e4ec3d22747a0293078fa7787c3 Mon Sep 17 00:00:00 2001 From: C-Achard Date: Tue, 17 Feb 2026 09:34:44 +0100 Subject: [PATCH 17/60] Optimize color ASCII rendering and fix module name Fix the OpenCV error message to reference dlclivegui.assets.ascii_art and refactor _color_ascii_lines for performance and clarity. Changes include: encode ASCII ramp once, clarify luminance comment, pack colors into 0xRRGGBB, use a prefix_cache for ANSI SGR color prefixes, emit a color sequence only when the color changes (using prev_ck), use memoryview for row access, append a single reset per line, and simplify character emission. Functionality and ANSI formatting are preserved while reducing per-character allocations and cache complexity. --- dlclivegui/assets/ascii_art.py | 59 ++++++++++++++++++---------------- 1 file changed, 32 insertions(+), 27 deletions(-) diff --git a/dlclivegui/assets/ascii_art.py b/dlclivegui/assets/ascii_art.py index 039f9a0..6359134 100644 --- a/dlclivegui/assets/ascii_art.py +++ b/dlclivegui/assets/ascii_art.py @@ -27,7 +27,7 @@ import cv2 as cv except Exception as e: # pragma: no cover raise RuntimeError( - "OpenCV (opencv-python) is required for dlclivegui.assets.ascii.\nInstall with: pip install opencv-python" + "OpenCV (opencv-python) is required for dlclivegui.assets.ascii_art.\nInstall with: pip install opencv-python" ) from e # Character ramps (dense -> sparse) @@ -205,10 +205,12 @@ def _map_luminance_to_chars(gray: np.ndarray, fine: bool) -> Iterable[str]: def _color_ascii_lines(img_bgr: np.ndarray, fine: bool, invert: bool) -> Iterable[str]: ramp = ASCII_RAMP_FINE if fine else ASCII_RAMP_SIMPLE - ramp_bytes = [c.encode("utf-8") for c in ramp] # 1-byte ASCII in practice + # ramp is ASCII; encode once + ramp_bytes = [c.encode("utf-8") for c in ramp] + reset = b"\x1b[0m" - # luminance in float32 like your current code + # Luminance (same coefficients you used; keep exact behavior) b = img_bgr[..., 0].astype(np.float32) g = img_bgr[..., 1].astype(np.float32) r = img_bgr[..., 2].astype(np.float32) @@ -216,45 +218,48 @@ def _color_ascii_lines(img_bgr: np.ndarray, fine: bool, invert: bool) -> Iterabl if invert: lum = 255.0 - lum - idx = (lum / 255.0 * (len(ramp) - 1)).astype(np.uint16) # small dtype is fine + idx = (lum / 255.0 * (len(ramp) - 1)).astype(np.uint16) - # Pack color into one int: 0xRRGGBB (faster dict key than tuple) + # Pack color into 0xRRGGBB for fast comparisons rr = img_bgr[..., 2].astype(np.uint32) gg = img_bgr[..., 1].astype(np.uint32) bb = img_bgr[..., 0].astype(np.uint32) color_key = (rr << 16) | (gg << 8) | bb # (H,W) uint32 - # Cache: (color_key<<8)|idx -> bytes for full colored char INCLUDING reset - cache: dict[int, bytes] = {} + # Cache SGR prefixes by packed color + # e.g. 0xRRGGBB -> b"\x1b[38;2;R;G;Bm" + prefix_cache: dict[int, bytes] = {} h, w = idx.shape lines: list[str] = [] for y in range(h): ba = bytearray() - ck_row = color_key[y] - idx_row = idx[y] - img_bgr[y] # for extracting r/g/b when cache miss + + ck_row = memoryview(color_key[y]) + idx_row = memoryview(idx[y]) + + prev_ck: int | None = None for x in range(w): - ik = int(idx_row[x]) ck = int(ck_row[x]) - subkey = (ck << 8) | ik - - piece = cache.get(subkey) - if piece is None: - # Decode r,g,b from packed key (same as current rr,gg,bb) - rr_i = (ck >> 16) & 255 - gg_i = (ck >> 8) & 255 - bb_i = ck & 255 - - # EXACT same formatting as before - # \x1b[38;2;{rr};{gg};{bb}m{ch}\x1b[0m - prefix = f"\x1b[38;2;{rr_i};{gg_i};{bb_i}m".encode("ascii") - piece = prefix + ramp_bytes[ik] + reset - cache[subkey] = piece - - ba.extend(piece) + + # Emit new color code only when color changes + if ck != prev_ck: + prefix = prefix_cache.get(ck) + if prefix is None: + rr_i = (ck >> 16) & 255 + gg_i = (ck >> 8) & 255 + bb_i = ck & 255 + prefix = f"\x1b[38;2;{rr_i};{gg_i};{bb_i}m".encode("ascii") + prefix_cache[ck] = prefix + ba.extend(prefix) + prev_ck = ck + + ba.extend(ramp_bytes[int(idx_row[x])]) + + # Reset once per line to prevent color bleed into subsequent terminal output + ba.extend(reset) lines.append(ba.decode("utf-8", errors="strict")) From 58bb304354c0ae0c582706eb2f03ad39eb041b53 Mon Sep 17 00:00:00 2001 From: C-Achard Date: Tue, 17 Feb 2026 10:58:59 +0100 Subject: [PATCH 18/60] Refactor GenTL CTI discovery and loading Introduce a dedicated gentl_discovery utility to locate .cti GenTL producer files (from explicit files, env vars, glob patterns and extra dirs) and add selection helpers. Refactor GenTLCameraBackend to use the new discovery logic: resolve multiple CTIs, load all viable producers, persist CTI diagnostics into settings (cti_files, cti_files_loaded, cti_files_failed), and provide robust class-level Harvester building for discovery/quick_ping/get_device_count/rebind_settings. Remove legacy glob/search helpers and change behavior to prefer explicit user config while failing only when no producers can be found/loaded. Update tests and fixtures to use tmp_path-created dummy .cti files, adjust patched Harvester setup, and add unit tests covering CTI discovery and selection policies (including newest/raise-if-multiple behavior). --- dlclivegui/cameras/backends/gentl_backend.py | 382 ++++++++++++------ .../cameras/backends/utils/gentl_discovery.py | 244 +++++++++++ tests/cameras/backends/conftest.py | 24 +- tests/cameras/backends/test_gentl_backend.py | 137 ++++++- 4 files changed, 651 insertions(+), 136 deletions(-) create mode 100644 dlclivegui/cameras/backends/utils/gentl_discovery.py diff --git a/dlclivegui/cameras/backends/gentl_backend.py b/dlclivegui/cameras/backends/gentl_backend.py index c74c0ab..e4db409 100644 --- a/dlclivegui/cameras/backends/gentl_backend.py +++ b/dlclivegui/cameras/backends/gentl_backend.py @@ -3,17 +3,16 @@ # dlclivegui/cameras/backends/gentl_backend.py from __future__ import annotations -import glob import logging -import os import time -from collections.abc import Iterable from typing import ClassVar import cv2 import numpy as np from ..base import CameraBackend, SupportLevel, register_backend +from ..factory import DetectedCamera +from .utils import gentl_discovery as cti_finder LOG = logging.getLogger(__name__) @@ -50,12 +49,6 @@ def __init__(self, settings): if not isinstance(ns, dict): ns = {} - # --- CTI / transport configuration --- - self._cti_file: str | None = ns.get("cti_file") or props.get("cti_file") - self._cti_search_paths: tuple[str, ...] = self._parse_cti_paths( - ns.get("cti_search_paths", props.get("cti_search_paths")) - ) - # --- Fast probe mode (CameraProbeWorker sets this) --- # When fast_start=True, open() should avoid starting acquisition if possible. self._fast_start: bool = bool(ns.get("fast_start", False)) @@ -171,7 +164,7 @@ def static_capabilities(cls) -> dict[str, SupportLevel]: @classmethod def get_device_count(cls) -> int: - """Get the actual number of GenTL devices detected by Harvester. + """Get the number of GenTL devices detected by Harvester. Returns the number of devices found, or -1 if detection fails. """ @@ -180,16 +173,11 @@ def get_device_count(cls) -> int: harvester = None try: - harvester = Harvester() - # Use the static helper to find CTI file with default patterns - cti_file = cls._search_cti_file(cls._DEFAULT_CTI_PATTERNS) - - if not cti_file: + harvester, _, _ = cls._build_harvester_for_discovery(strict_single=False) + if harvester is None: return -1 - - harvester.add_file(cti_file) - harvester.update() - return len(harvester.device_info_list) + infos = harvester.device_info_list or [] + return len(infos) except Exception: return -1 finally: @@ -199,8 +187,150 @@ def get_device_count(cls) -> int: except Exception: pass + def _resolve_cti_files_for_settings(self) -> list[str]: + """ + Resolve CTI files to load. + + Policy: + - If the user explicitly provides ctis (cti_files / cti_file), use only those. + - Otherwise, discover all CTIs (env vars + configured patterns/dirs) and return all. + - Never raise just because multiple CTIs exist. + - Raise only when none are found. + """ + props = self.settings.properties if isinstance(self.settings.properties, dict) else {} + ns = props.get(self.OPTIONS_KEY, {}) + if not isinstance(ns, dict): + ns = {} + + # Explicit CTIs (namespace first, then legacy top-level) + ns_cti_files = ns.get("cti_files") + ns_cti_file = ns.get("cti_file") + legacy_cti_files = props.get("cti_files") + legacy_cti_file = props.get("cti_file") + + # 1) If user provided explicit list/file in namespace, use that only + if ns_cti_files or ns_cti_file: + candidates, diag = cti_finder.discover_cti_files( + cti_file=str(ns_cti_file) if ns_cti_file else None, + cti_files=cti_finder.cti_files_as_list(ns_cti_files) if ns_cti_files else None, + include_env=False, + must_exist=True, + ) + if not candidates: + raise RuntimeError( + "No valid GenTL producer (.cti) found from properties.gentl.cti_file/cti_files.\n\n" + f"Discovery details:\n{diag.summarize()}" + ) + return list(candidates) + + # 2) If user provided explicit list/file in legacy top-level, use that only + if legacy_cti_files or legacy_cti_file: + candidates, diag = cti_finder.discover_cti_files( + cti_file=str(legacy_cti_file) if legacy_cti_file else None, + cti_files=cti_finder.cti_files_as_list(legacy_cti_files) if legacy_cti_files else None, + include_env=False, + must_exist=True, + ) + if not candidates: + raise RuntimeError( + "No valid GenTL producer (.cti) found from properties.cti_file/cti_files.\n\n" + f"Discovery details:\n{diag.summarize()}" + ) + return list(candidates) + + # 3) Discovery path: find all CTIs from env vars + configured patterns/dirs + search_paths = ns.get("cti_search_paths", props.get("cti_search_paths")) + extra_dirs = ns.get("cti_dirs", props.get("cti_dirs")) + + candidates, diag = cti_finder.discover_cti_files( + cti_search_paths=cti_finder.cti_files_as_list(search_paths) if search_paths is not None else None, + include_env=True, + extra_dirs=cti_finder.cti_files_as_list(extra_dirs) if extra_dirs is not None else None, + recursive_env_search=False, + recursive_extra_search=False, + must_exist=True, + ) + + if not candidates: + raise RuntimeError( + "Could not locate any GenTL producer (.cti) file.\n\n" + "Fix options:\n" + " - Set camera.properties.gentl.cti_file to the full path of a .cti file\n" + " - Or set GENICAM_GENTL64_PATH / GENICAM_GENTL32_PATH to include the producer directory\n" + " - Or provide camera.properties.gentl.cti_search_paths with glob patterns\n\n" + f"Discovery details:\n{diag.summarize()}" + ) + + # Default: try to load ALL discovered producers + return list(candidates) + + @classmethod + def _build_harvester_for_discovery( + cls, + *, + strict_single: bool = False, # retained for optional future use + ): + """ + Build a Harvester instance and load CTI producers for class-level operations + (discover_devices, quick_ping, get_device_count, rebind_settings). + + Default policy: try to load ALL discovered producers. + """ + if Harvester is None: + return None, [], None + + candidates, diag = cti_finder.discover_cti_files( + include_env=True, + cti_search_paths=list(cls._DEFAULT_CTI_PATTERNS), + must_exist=True, + ) + + if not candidates: + return None, [], diag + + # Default: load all candidates + cti_files = list(candidates) + + # Optional strict mode (off by default) + if strict_single: + # If you ever want strict, use choose_cti_files here; otherwise ignore. + cti_files = cti_finder.choose_cti_files( + cti_files, policy=cti_finder.GenTLDiscoveryPolicy.RAISE_IF_MULTIPLE, max_files=1 + ) + + harvester = Harvester() + loaded: list[str] = [] + failures: list[tuple[str, str]] = [] + + for cti in cti_files: + try: + harvester.add_file(cti) + loaded.append(cti) + except Exception as exc: + failures.append((cti, str(exc))) + LOG.warning("Failed to load CTI '%s': %s", cti, exc) + + if not loaded: + try: + harvester.reset() + except Exception: + pass + return None, [], diag + + try: + harvester.update() + except Exception as exc: + LOG.warning("Harvester.update() failed during discovery: %s", exc) + try: + harvester.reset() + except Exception: + pass + return None, loaded, diag + + return harvester, loaded, diag + def open(self) -> None: - if Harvester is None: # pragma: no cover - optional dependency + if Harvester is None: # pragma: no cover raise RuntimeError( "The 'harvesters' package is required for the GenTL backend. Install it via 'pip install harvesters'." ) @@ -214,23 +344,60 @@ def open(self) -> None: ns = {} props[self.OPTIONS_KEY] = ns + # Resolve CTIs (may return many). This no longer raises just because there are multiple. + cti_files = self._resolve_cti_files_for_settings() + self._harvester = Harvester() - # Resolve CTI file: explicit > configured > search - cti_file = self._cti_file or ns.get("cti_file") or props.get("cti_file") or self._find_cti_file() - self._harvester.add_file(cti_file) + loaded: list[str] = [] + failed: list[tuple[str, str]] = [] + + for cti in cti_files: + try: + self._harvester.add_file(cti) + loaded.append(cti) + except Exception as exc: + failed.append((cti, str(exc))) + LOG.warning("Failed to load CTI '%s': %s", cti, exc) + + # Persist diagnostics for UI / debugging + ns["cti_files"] = [str(p) for p in cti_files] # all resolved candidates + ns["cti_files_loaded"] = [str(p) for p in loaded] # successfully added to harvester + ns["cti_files_failed"] = [{"cti": c, "error": e} for c, e in failed] # load failures + + # Keep single-cti convenience key for backward compatibility / display + if loaded: + ns["cti_file"] = str(loaded[0]) + elif cti_files: + ns["cti_file"] = str(cti_files[0]) # best effort + + if not loaded: + raise RuntimeError( + "No GenTL producer (.cti) could be loaded.\n\n" + f"Resolved CTIs: {cti_files}\n" + f"Failures: {failed}\n" + "Fix: remove/repair incompatible producers or" + " set properties.gentl.cti_file to a known working producer." + ) + + # Update device list after loading producers self._harvester.update() if not self._harvester.device_info_list: - raise RuntimeError("No GenTL cameras detected via Harvesters") + raise RuntimeError( + "No GenTL cameras detected via Harvesters after loading producers.\n\n" + f"Loaded CTIs: {loaded}\n" + f"Failed CTIs: {failed}\n" + "Fix: ensure your camera vendor's GenTL producer is installed and working." + ) infos = list(self._harvester.device_info_list) - # Helper: robustly read device_info fields (supports dict-like or attribute-like entries) + # Helper: robustly read device_info fields (dict-like or attribute-like) def _info_get(info, key: str, default=None): try: if hasattr(info, "get"): - v = info.get(key) # type: ignore[attr-defined] + v = info.get(key) if v is not None: return v except Exception: @@ -250,12 +417,11 @@ def _info_get(info, key: str, default=None): selected_index: int | None = None selected_serial: str | None = None - # 1) Try stable device_id first (supports "serial:..." and "fp:...") target_device_id = self._device_id or ns.get("device_id") or props.get("device_id") if target_device_id: target_device_id = str(target_device_id).strip() - # Match exact against computed device_id_from_info(info) + # Exact match against computed device_id for idx, info in enumerate(infos): try: did = self._device_id_from_info(info) @@ -296,7 +462,7 @@ def _info_get(info, key: str, default=None): f"Ambiguous GenTL serial match for '{serial_target}'. Candidates: {candidates}" ) - # 2) Try legacy serial selection if still not selected + # Legacy serial selection fallback if selected_index is None: serial = self._serial_number if serial: @@ -327,7 +493,7 @@ def _info_get(info, key: str, default=None): available = [str(_info_get(i, "serial_number", "")).strip() for i in infos] raise RuntimeError(f"Camera with serial '{serial}' not found. Available cameras: {available}") - # 3) Fallback to index selection + # Index fallback if selected_index is None: device_count = len(infos) if requested_index < 0 or requested_index >= device_count: @@ -336,20 +502,17 @@ def _info_get(info, key: str, default=None): sn = _info_get(infos[selected_index], "serial_number", "") selected_serial = str(sn).strip() if sn else None - # Update settings.index to the actual selected index (important for UI merge-back + stability) + # Update settings.index to actual selected index (UI stability) self.settings.index = int(selected_index) selected_info = infos[int(selected_index)] - # ------------------------------------------------------------------ - # Create ImageAcquirer using the latest Harvesters API: Harvester.create(...) - # ------------------------------------------------------------------ + # Create ImageAcquirer via Harvester.create(...) try: if selected_serial: self._acquirer = self._harvester.create({"serial_number": str(selected_serial)}) else: self._acquirer = self._harvester.create(int(selected_index)) except TypeError: - # Some versions accept keyword argument; keep as a safety net without reintroducing legacy API. if selected_serial: self._acquirer = self._harvester.create({"serial_number": str(selected_serial)}) else: @@ -358,21 +521,16 @@ def _info_get(info, key: str, default=None): remote = self._acquirer.remote_device node_map = remote.node_map - # Resolve human label for UI self._device_label = self._resolve_device_label(node_map) - # ------------------------------------------------------------------ - # Apply configuration (existing behavior) - # ------------------------------------------------------------------ + # Apply configuration self._configure_pixel_format(node_map) self._configure_resolution(node_map) self._configure_exposure(node_map) self._configure_gain(node_map) self._configure_frame_rate(node_map) - # ------------------------------------------------------------------ - # Capture "actual" telemetry for GUI (existing behavior) - # ------------------------------------------------------------------ + # Read back telemetry try: self._actual_width = int(node_map.Width.value) self._actual_height = int(node_map.Height.value) @@ -394,9 +552,7 @@ def _info_get(info, key: str, default=None): except Exception: self._actual_gain = None - # ------------------------------------------------------------------ - # Persist identity + richer device metadata back into settings for UI merge-back - # ------------------------------------------------------------------ + # Persist identity + metadata computed_id = None try: computed_id = self._device_id_from_info(selected_info) @@ -408,16 +564,13 @@ def _info_get(info, key: str, default=None): elif selected_serial: ns["device_id"] = f"serial:{selected_serial}" - # Canonical serial storage if selected_serial: ns["serial_number"] = str(selected_serial) ns["device_serial_number"] = str(selected_serial) - # UI-friendly name if self._device_label: ns["device_name"] = str(self._device_label) - # Extra metadata from discovery info (helps debugging and stable identity fallbacks) ns["device_display_name"] = str(_info_get(selected_info, "display_name", "") or "") ns["device_info_id"] = str(_info_get(selected_info, "id_", "") or "") ns["device_vendor"] = str(_info_get(selected_info, "vendor", "") or "") @@ -427,12 +580,7 @@ def _info_get(info, key: str, default=None): ns["device_version"] = str(_info_get(selected_info, "version", "") or "") ns["device_access_status"] = _info_get(selected_info, "access_status", None) - # Preserve CTI used (useful for stable operation) - ns["cti_file"] = str(cti_file) - - # ------------------------------------------------------------------ - # Start streaming unless fast_start probe mode is requested - # ------------------------------------------------------------------ + # Start acquisition unless fast_start if getattr(self, "_fast_start", False): LOG.info("GenTL open() in fast_start probe mode: acquisition not started.") return @@ -505,13 +653,15 @@ def discover_devices( """ Rich discovery path for CameraFactory.detect_cameras(). Returns a list of DetectedCamera with device_id filled when possible. + + Cross-platform CTI discovery: + - Uses GENICAM_GENTL64_PATH / GENICAM_GENTL32_PATH when available + - Falls back to built-in Windows patterns + - Best-effort loads multiple CTI producers """ if Harvester is None: return [] - # Local import to avoid circulars at import time - from ..factory import DetectedCamera - def _canceled() -> bool: return bool(should_cancel and should_cancel()) @@ -520,17 +670,15 @@ def _canceled() -> bool: if progress_cb: progress_cb("Initializing GenTL discovery…") - harvester = Harvester() + harvester, loaded, _ = cls._build_harvester_for_discovery(strict_single=False) - # Use default CTI search; we don't have per-camera settings here. - cti_file = cls._search_cti_file(cls._DEFAULT_CTI_PATTERNS) - if not cti_file: + if harvester is None or not loaded: if progress_cb: - progress_cb("No .cti found (GenTL producer missing).") + progress_cb("No GenTL producers could be loaded.") return [] - harvester.add_file(cti_file) - harvester.update() + if progress_cb: + progress_cb(f"Loaded {len(loaded)} GenTL producer(s). Scanning devices…") infos = list(harvester.device_info_list or []) if not infos: @@ -543,8 +691,8 @@ def _canceled() -> bool: if _canceled(): break - # Create a label for the UI, using display_name if available, otherwise vendor/model/serial. info = infos[idx] + display_name = None try: display_name = ( @@ -565,10 +713,7 @@ def _canceled() -> bool: or (info.get("serial_number") if hasattr(info, "get") else None) or "" ) - vendor = str(vendor).strip() - model = str(model).strip() - serial = str(serial).strip() - + vendor, model, serial = str(vendor).strip(), str(model).strip(), str(serial).strip() label = f"{vendor} {model}".strip() if (vendor or model) else f"GenTL device {idx}" if serial: label = f"{label} ({serial})" @@ -580,7 +725,6 @@ def _canceled() -> bool: index=idx, label=label, device_id=device_id, - # GenTL usually doesn't expose vid/pid/path consistently; leave None unless you have it vid=None, pid=None, path=None, @@ -595,8 +739,6 @@ def _canceled() -> bool: return out except Exception: - # Returning None would trigger probing fallback; but since you declared discovery supported, - # returning [] is usually less surprising than a slow probe storm. return [] finally: if harvester is not None: @@ -610,6 +752,10 @@ def rebind_settings(cls, settings): """ If a stable identity exists in settings.properties['gentl'], map it to the correct current index (and serial_number if available). + + Strategy: + - If ctis were previously persisted (cti_files/cti_file), prefer those. + - Otherwise, fall back to env-var + pattern discovery (best-effort). """ if Harvester is None: return settings @@ -625,25 +771,48 @@ def rebind_settings(cls, settings): harvester = None try: - harvester = Harvester() - cti_file = ns.get("cti_file") or props.get("cti_file") or cls._search_cti_file(cls._DEFAULT_CTI_PATTERNS) - if not cti_file: - return settings - - harvester.add_file(cti_file) - harvester.update() + # Prefer persisted CTIs for stability if present + explicit_files = ns.get("cti_files") or props.get("cti_files") + explicit_file = ns.get("cti_file") or props.get("cti_file") + + if explicit_files or explicit_file: + candidates, diag = cti_finder.discover_cti_files( + cti_file=explicit_file, + cti_files=cti_finder.cti_files_as_list(explicit_files), + include_env=False, + must_exist=True, + ) + if not candidates: + return settings + + harvester = Harvester() + loaded: list[str] = [] + for cti in candidates: + try: + harvester.add_file(cti) + loaded.append(cti) + except Exception: + continue + + if not loaded: + return settings + + harvester.update() + else: + harvester, loaded, diag = cls._build_harvester_for_discovery(strict_single=False) + if harvester is None: + return settings infos = list(harvester.device_info_list or []) if not infos: return settings - # Try exact match by computed device_id first + target_id_str = str(target_id).strip() + match_index = None match_serial = None - # Normalize - target_id_str = str(target_id).strip() - + # 1) Exact match by computed device_id for idx, info in enumerate(infos): dev_id = cls._device_id_from_info(info) if dev_id and dev_id == target_id_str: @@ -651,7 +820,7 @@ def rebind_settings(cls, settings): match_serial = getattr(info, "serial_number", None) break - # If not found, fallback: treat target as serial-ish substring (legacy behavior) + # 2) Fallback: treat target as serial-ish substring if match_index is None: for idx, info in enumerate(infos): serial = getattr(info, "serial_number", None) @@ -666,7 +835,7 @@ def rebind_settings(cls, settings): # Apply rebinding settings.index = int(match_index) - # Keep namespace consistent for open() + # Ensure namespace exists if not isinstance(settings.properties, dict): settings.properties = {} ns2 = settings.properties.setdefault(cls.OPTIONS_KEY, {}) @@ -674,7 +843,6 @@ def rebind_settings(cls, settings): ns2 = {} settings.properties[cls.OPTIONS_KEY] = ns2 - # If we got a serial, save it for open() selection (backward compatible) if match_serial: ns2["serial_number"] = str(match_serial) ns2["device_id"] = target_id_str @@ -682,7 +850,6 @@ def rebind_settings(cls, settings): return settings except Exception: - # Any failure should not prevent fallback to index-based open return settings finally: if harvester is not None: @@ -702,12 +869,9 @@ def quick_ping(cls, index: int, _unused=None) -> bool: harvester = None try: - harvester = Harvester() - cti_file = cls._search_cti_file(cls._DEFAULT_CTI_PATTERNS) - if not cti_file: + harvester, _, _ = cls._build_harvester_for_discovery(strict_single=False) + if harvester is None: return False - harvester.add_file(cti_file) - harvester.update() infos = harvester.device_info_list or [] return 0 <= int(index) < len(infos) except Exception: @@ -795,15 +959,6 @@ def close(self) -> None: # Helpers # ------------------------------------------------------------------ - def _parse_cti_paths(self, value) -> tuple[str, ...]: - if value is None: - return self._DEFAULT_CTI_PATTERNS - if isinstance(value, str): - return (value,) - if isinstance(value, Iterable): - return tuple(str(item) for item in value) - return self._DEFAULT_CTI_PATTERNS - def _parse_crop(self, crop) -> tuple[int, int, int, int] | None: if isinstance(crop, (list, tuple)) and len(crop) == 4: return tuple(int(v) for v in crop) @@ -881,31 +1036,6 @@ def _configure_resolution(self, node_map) -> None: else: LOG.info(f"Resolution set to {actual_width}x{actual_height}") - @staticmethod - def _search_cti_file(patterns: tuple[str, ...]) -> str | None: - """Search for a CTI file using the given patterns. - - Returns the first CTI file found, or None if none found. - """ - for pattern in patterns: - for file_path in glob.glob(pattern): - if os.path.isfile(file_path): - return file_path - return None - - def _find_cti_file(self) -> str: - """Find a CTI file using configured or default search paths. - - Raises RuntimeError if no CTI file is found. - """ - cti_file = self._search_cti_file(self._cti_search_paths) - if cti_file is None: - raise RuntimeError( - "Could not locate a GenTL producer (.cti) file. Set 'cti_file' in " - "camera.properties or provide search paths via 'cti_search_paths'." - ) - return cti_file - def _available_serials(self) -> list[str]: assert self._harvester is not None serials: list[str] = [] diff --git a/dlclivegui/cameras/backends/utils/gentl_discovery.py b/dlclivegui/cameras/backends/utils/gentl_discovery.py new file mode 100644 index 0000000..ca73662 --- /dev/null +++ b/dlclivegui/cameras/backends/utils/gentl_discovery.py @@ -0,0 +1,244 @@ +"""Helpers to locate .cti GenTL producer files from various sources +(explicit, env vars, glob patterns, etc.) for GenTL-based camera backends.""" + +# dlclivegui/cameras/backends/utils/gentl_discovery.py +from __future__ import annotations + +import glob +import os +from collections.abc import Iterable, Sequence +from dataclasses import dataclass, field +from enum import Enum, auto +from pathlib import Path + + +class GenTLDiscoveryPolicy(Enum): + FIRST = auto() # default: take first N candidates in order found + NEWEST = auto() # take N candidates with most recent modification time (mtime) + RAISE_IF_MULTIPLE = auto() # if > N candidates, raise an error to avoid ambiguity (forces explicit config) + + +@dataclass +class CTIDiscoveryDiagnostics: + explicit_files: list[str] = field(default_factory=list) + glob_patterns: list[str] = field(default_factory=list) + env_vars_used: dict[str, str] = field(default_factory=dict) # name -> raw value + env_paths_expanded: list[str] = field(default_factory=list) # directories/files derived from env vars + extra_dirs: list[str] = field(default_factory=list) + + candidates: list[str] = field(default_factory=list) + rejected: list[tuple[str, str]] = field(default_factory=list) # (path, reason) + + def summarize(self) -> str: + lines = [] + if self.explicit_files: + lines.append(f"Explicit CTI file(s): {self.explicit_files}") + if self.glob_patterns: + lines.append(f"CTI glob pattern(s): {self.glob_patterns}") + if self.env_vars_used: + # Keep raw env var values in summary; you can redact if needed + lines.append(f"Env vars: {self.env_vars_used}") + if self.env_paths_expanded: + lines.append(f"Env-derived path entries: {self.env_paths_expanded}") + if self.extra_dirs: + lines.append(f"Extra CTI dirs: {self.extra_dirs}") + if self.candidates: + lines.append(f"CTI candidate(s) ({len(self.candidates)}): {self.candidates}") + if self.rejected: + lines.append(f"Rejected ({len(self.rejected)}): " + "; ".join([f"{p} ({r})" for p, r in self.rejected])) + return "\n".join(lines) + + +def cti_files_as_list(value) -> list[str]: + if value is None: + return [] + if isinstance(value, (list, tuple, set)): + return [str(v) for v in value if v is not None and str(v).strip()] + s = str(value).strip() + return [s] if s else [] + + +def _normalize_path(p: str) -> str: + """ + Normalize a filesystem path in a cross-platform way: + - expands ~ and environment variables + - resolves to absolute where possible (without requiring existence) + """ + pp = Path(os.path.expandvars(os.path.expanduser(p))) + try: + # resolve(False) avoids raising if parts don't exist (py>=3.9) + return str(pp.resolve(strict=False)) + except Exception: + return str(pp.absolute()) + + +def _iter_cti_files_in_dir(directory: str, recursive: bool = False) -> Iterable[str]: + """ + Yield *.cti files in directory. Non-recursive by default (faster, safer). + """ + d = Path(directory) + if not d.is_dir(): + return + if recursive: + yield from (str(p) for p in d.rglob("*.cti")) + else: + yield from (str(p) for p in d.glob("*.cti")) + + +def _split_env_paths(raw: str) -> list[str]: + """ + Split environment variable paths using os.pathsep (cross-platform). + Also trims whitespace and strips surrounding quotes. + """ + out: list[str] = [] + for item in (raw or "").split(os.pathsep): + s = item.strip().strip('"').strip("'") + if s: + out.append(s) + return out + + +def discover_cti_files( + *, + cti_file: str | None = None, + cti_files: Sequence[str] | None = None, + cti_search_paths: Sequence[str] | None = None, + include_env: bool = True, + env_vars: Sequence[str] = ("GENICAM_GENTL64_PATH", "GENICAM_GENTL32_PATH"), + extra_dirs: Sequence[str] | None = None, + recursive_env_search: bool = False, + recursive_extra_search: bool = False, + must_exist: bool = True, +) -> tuple[list[str], CTIDiscoveryDiagnostics]: + """ + Discover candidate GenTL producer (.cti) files from multiple sources. + + Returns: + (candidates, diagnostics) + + Notes: + - If must_exist=True (recommended), only existing files are returned. + - Env vars are parsed as path lists; each entry may be a directory OR a .cti file. + """ + diag = CTIDiscoveryDiagnostics() + + # 1) Explicit CTI file(s) + explicit = [] + explicit += cti_files_as_list(cti_file) + explicit += cti_files_as_list(cti_files) + diag.explicit_files = explicit[:] + + # 2) Glob patterns + patterns = cti_files_as_list(cti_search_paths) + diag.glob_patterns = patterns[:] + + # 3) Env var paths + env_entries: list[str] = [] + if include_env: + for name in env_vars: + raw = os.environ.get(name, "") + if raw: + diag.env_vars_used[name] = raw + env_entries.extend(_split_env_paths(raw)) + diag.env_paths_expanded = env_entries[:] + + # 4) Extra directories + extras = cti_files_as_list(extra_dirs) + diag.extra_dirs = extras[:] + + candidates: list[str] = [] + rejected: list[tuple[str, str]] = [] + + def _add_candidate(path: str, reason_ctx: str) -> None: + norm = _normalize_path(path) + if must_exist and not Path(norm).is_file(): + rejected.append((norm, f"not a file ({reason_ctx})")) + return + if not norm.lower().endswith(".cti"): + rejected.append((norm, f"not a .cti ({reason_ctx})")) + return + candidates.append(norm) + + # Process explicit files + for p in explicit: + _add_candidate(p, "explicit") + + # Process glob patterns + for pat in patterns: + # Normalize only for readability; glob needs pattern semantics, so we expanduser/vars but keep globbing + expanded_pat = os.path.expandvars(os.path.expanduser(pat)) + for hit in glob.glob(expanded_pat): + _add_candidate(hit, f"glob:{pat}") + + # Process env var entries + for entry in env_entries: + norm_entry = _normalize_path(entry) + p = Path(norm_entry) + if p.is_file() and p.suffix.lower() == ".cti": + _add_candidate(norm_entry, "env:file") + elif p.is_dir(): + for f in _iter_cti_files_in_dir(norm_entry, recursive=recursive_env_search): + _add_candidate(f, "env:dir") + else: + rejected.append((norm_entry, "env entry missing (not file/dir)")) + + # Process extra dirs + for d in extras: + norm_d = _normalize_path(d) + if Path(norm_d).is_dir(): + for f in _iter_cti_files_in_dir(norm_d, recursive=recursive_extra_search): + _add_candidate(f, "extra:dir") + elif Path(norm_d).is_file(): + _add_candidate(norm_d, "extra:file") + else: + rejected.append((norm_d, "extra entry missing (not file/dir)")) + + # Deduplicate while preserving order + seen = set() + unique: list[str] = [] + for c in candidates: + key = c.lower() if os.name == "nt" else c # Windows case-insensitive + if key in seen: + continue + seen.add(key) + unique.append(c) + + diag.candidates = unique[:] + diag.rejected = rejected[:] + return unique, diag + + +def choose_cti_files( + candidates: Sequence[str], + *, + policy: GenTLDiscoveryPolicy = GenTLDiscoveryPolicy.FIRST, + max_files: int = 1, +) -> list[str]: + """ + Choose which CTI file(s) to load from candidates. + + policy: + - FIRST: take the first N candidates (default) + - NEWEST: take the N most recently modified candidates + - RAISE_IF_MULTIPLE: if more than N candidates, raise an error (to avoid ambiguity) + """ + cand = [str(c) for c in candidates if c] + if not cand: + return [] + + if policy == GenTLDiscoveryPolicy.NEWEST: + cand_sorted = sorted(cand, key=lambda p: Path(p).stat().st_mtime if Path(p).exists() else 0.0, reverse=True) + return cand_sorted[:max_files] + + if policy == GenTLDiscoveryPolicy.FIRST: + return cand[:max_files] + + if policy == GenTLDiscoveryPolicy.RAISE_IF_MULTIPLE: + if len(cand) > max_files: + raise RuntimeError( + f"Multiple GenTL producers (.cti) found ({len(cand)}). " + f"Please set properties.gentl.cti_file explicitly. Candidates: {cand}" + ) + return cand[:max_files] + + raise ValueError(f"Unknown policy: {policy!r}") diff --git a/tests/cameras/backends/conftest.py b/tests/cameras/backends/conftest.py index 0fb3ded..57a049a 100644 --- a/tests/cameras/backends/conftest.py +++ b/tests/cameras/backends/conftest.py @@ -830,10 +830,11 @@ def _make(): @pytest.fixture() -def patch_gentl_sdk(monkeypatch, fake_harvester_factory): +def patch_gentl_sdk(monkeypatch, fake_harvester_factory, tmp_path): """ Patch dlclivegui.cameras.backends.gentl_backend to use FakeHarvester + Fake timeout. - Also bypass CTI search logic so tests never hit filesystem/SDK paths. + Also ensure CTI discovery succeeds for classmethods (discover_devices/quick_ping) + by creating a real dummy .cti and exposing it via GENICAM_GENTL64_PATH. """ import dlclivegui.cameras.backends.gentl_backend as gb @@ -843,17 +844,19 @@ def patch_gentl_sdk(monkeypatch, fake_harvester_factory): # Keep your backend timeout contract as-is: it catches HarvesterTimeoutError monkeypatch.setattr(gb, "HarvesterTimeoutError", FakeGenTLTimeoutException, raising=False) - # Avoid filesystem CTI searching - monkeypatch.setattr(gb.GenTLCameraBackend, "_find_cti_file", lambda self: "dummy.cti", raising=False) - monkeypatch.setattr( - gb.GenTLCameraBackend, "_search_cti_file", staticmethod(lambda patterns: "dummy.cti"), raising=False - ) + # Create a real CTI file and advertise it via env var (cross-platform via os.pathsep) + cti_file = tmp_path / "dummy.cti" + if not cti_file.exists(): + cti_file.write_text("fake", encoding="utf-8") + + monkeypatch.setenv("GENICAM_GENTL64_PATH", str(tmp_path)) + monkeypatch.delenv("GENICAM_GENTL32_PATH", raising=False) return gb @pytest.fixture() -def gentl_settings_factory(): +def gentl_settings_factory(tmp_path): """ Convenience factory for CameraSettings for gentl backend tests. """ @@ -871,8 +874,13 @@ def _make( enabled=True, properties=None, ): + cti = tmp_path / "dummy.cti" + if not cti.exists(): + cti.write_text("fake", encoding="utf-8") props = properties if isinstance(properties, dict) else {} props.setdefault("gentl", {}) + props["gentl"] = dict(props["gentl"]) + props["gentl"].setdefault("cti_file", str(cti)) return CameraSettings( name=name, index=index, diff --git a/tests/cameras/backends/test_gentl_backend.py b/tests/cameras/backends/test_gentl_backend.py index d31bc9c..7ee8ac1 100644 --- a/tests/cameras/backends/test_gentl_backend.py +++ b/tests/cameras/backends/test_gentl_backend.py @@ -1,11 +1,20 @@ # tests/cameras/backends/test_gentl_backend.py from __future__ import annotations +import os +import time import types +from pathlib import Path import numpy as np import pytest +from dlclivegui.cameras.backends.utils.gentl_discovery import ( + GenTLDiscoveryPolicy, + choose_cti_files, + discover_cti_files, +) + # --------------------------------------------------------------------- # Core lifecycle + strict transaction model @@ -235,10 +244,12 @@ def test_open_persists_rich_metadata_in_namespace(patch_gentl_sdk, gentl_setting be.close() -def test_open_persists_cti_file_even_when_provided_in_props(patch_gentl_sdk, gentl_settings_factory): +def test_open_persists_cti_file_even_when_provided_in_props(patch_gentl_sdk, gentl_settings_factory, tmp_path): gb = patch_gentl_sdk - settings = gentl_settings_factory(properties={"cti_file": "from-props.cti", "gentl": {}}) + cti = tmp_path / "from-props.cti" + cti.write_text("dummy", encoding="utf-8") + settings = gentl_settings_factory(properties={"cti_file": str(cti), "gentl": {}}) be = gb.GenTLCameraBackend(settings) be.open() @@ -529,3 +540,125 @@ def create_image_acquirer(self, *args, **kwargs): # Error message should include some context about attempted creation methods msg = str(ei.value).lower() assert "failed to initialise gentl image acquirer" in msg + + +# ---------------------------------- +# CTI discovery and selection logic +# ---------------------------------- + + +def _make_cti(tmp_path: Path, name: str = "Producer.cti") -> Path: + p = tmp_path / name + p.write_text("dummy", encoding="utf-8") + return p + + +def test_discover_explicit_cti_file(tmp_path): + cti = _make_cti(tmp_path, "A.cti") + + candidates, diag = discover_cti_files(cti_file=str(cti), include_env=False, must_exist=True) + + assert len(candidates) == 1 + assert Path(candidates[0]).name == "A.cti" + assert diag.explicit_files == [str(cti)] + + +def test_discover_missing_explicit_cti_is_rejected(tmp_path): + missing = tmp_path / "Missing.cti" + candidates, diag = discover_cti_files(cti_file=str(missing), include_env=False, must_exist=True) + + assert candidates == [] + assert any("not a file" in reason for _, reason in diag.rejected) + + +def test_discover_glob_patterns(tmp_path): + _make_cti(tmp_path, "One.cti") + _make_cti(tmp_path, "Two.cti") + + pattern = str(tmp_path / "*.cti") + candidates, diag = discover_cti_files(cti_search_paths=[pattern], include_env=False, must_exist=True) + + names = sorted(Path(c).name for c in candidates) + assert names == ["One.cti", "Two.cti"] + assert pattern in diag.glob_patterns + + +def test_discover_env_var_directory(monkeypatch, tmp_path): + _make_cti(tmp_path, "Env.cti") + + monkeypatch.setenv("GENICAM_GENTL64_PATH", str(tmp_path)) + + candidates, diag = discover_cti_files(include_env=True, must_exist=True) + + assert any(Path(c).name == "Env.cti" for c in candidates) + assert "GENICAM_GENTL64_PATH" in diag.env_vars_used + + +def test_discover_env_var_direct_file(monkeypatch, tmp_path): + cti = _make_cti(tmp_path, "Direct.cti") + + monkeypatch.setenv("GENICAM_GENTL64_PATH", str(cti)) + + candidates, diag = discover_cti_files(include_env=True, must_exist=True) + + assert candidates == [str(cti.resolve())] or Path(candidates[0]).name == "Direct.cti" + assert diag.env_paths_expanded # should include the raw env entry + + +def test_discover_env_var_multiple_entries(monkeypatch, tmp_path): + d1 = tmp_path / "d1" + d2 = tmp_path / "d2" + d1.mkdir() + d2.mkdir() + + _make_cti(d1, "A.cti") + _make_cti(d2, "B.cti") + + combined = f"{d1}{os.pathsep}{d2}" + monkeypatch.setenv("GENICAM_GENTL64_PATH", combined) + + candidates, _ = discover_cti_files(include_env=True, must_exist=True) + names = sorted(Path(c).name for c in candidates) + + assert names == ["A.cti", "B.cti"] + + +def test_discover_deduplicates_same_file_from_multiple_sources(monkeypatch, tmp_path): + cti = _make_cti(tmp_path, "Dup.cti") + + # Discover it twice: explicit + env dir + monkeypatch.setenv("GENICAM_GENTL64_PATH", str(tmp_path)) + + candidates, _ = discover_cti_files( + cti_file=str(cti), + include_env=True, + must_exist=True, + ) + + # Should appear only once + assert len(candidates) == 1 + assert Path(candidates[0]).name == "Dup.cti" + + +def test_choose_cti_files_raises_if_multiple_candidates(tmp_path): + c1 = _make_cti(tmp_path, "One.cti") + c2 = _make_cti(tmp_path, "Two.cti") + + # This is the key test: "duplicates should raise" (i.e. >1 CTI found) + with pytest.raises(RuntimeError) as exc: + choose_cti_files([str(c1), str(c2)], policy=GenTLDiscoveryPolicy.RAISE_IF_MULTIPLE, max_files=1) + + assert "Multiple GenTL producers" in str(exc.value) + + +def test_choose_cti_files_newest_policy(tmp_path): + old = _make_cti(tmp_path, "Old.cti") + new = _make_cti(tmp_path, "New.cti") + + # Ensure distinct mtimes + time.sleep(0.01) + new.write_text("dummy2", encoding="utf-8") + + selected = choose_cti_files([str(old), str(new)], policy=GenTLDiscoveryPolicy.NEWEST, max_files=1) + assert len(selected) == 1 + assert Path(selected[0]).name == "New.cti" From d21cd3e5200e378d6761d8e47170ae557389765b Mon Sep 17 00:00:00 2001 From: C-Achard Date: Thu, 19 Feb 2026 09:40:00 +0100 Subject: [PATCH 19/60] Address review comments --- dlclivegui/cameras/backends/gentl_backend.py | 2 ++ .../cameras/backends/utils/gentl_discovery.py | 13 +++++++++++-- tests/cameras/backends/test_gentl_backend.py | 8 +++++--- 3 files changed, 18 insertions(+), 5 deletions(-) diff --git a/dlclivegui/cameras/backends/gentl_backend.py b/dlclivegui/cameras/backends/gentl_backend.py index e4db409..2dd6551 100644 --- a/dlclivegui/cameras/backends/gentl_backend.py +++ b/dlclivegui/cameras/backends/gentl_backend.py @@ -372,6 +372,7 @@ def open(self) -> None: ns["cti_file"] = str(cti_files[0]) # best effort if not loaded: + self._harvester = None raise RuntimeError( "No GenTL producer (.cti) could be loaded.\n\n" f"Resolved CTIs: {cti_files}\n" @@ -384,6 +385,7 @@ def open(self) -> None: self._harvester.update() if not self._harvester.device_info_list: + self._harvester = None raise RuntimeError( "No GenTL cameras detected via Harvesters after loading producers.\n\n" f"Loaded CTIs: {loaded}\n" diff --git a/dlclivegui/cameras/backends/utils/gentl_discovery.py b/dlclivegui/cameras/backends/utils/gentl_discovery.py index ca73662..a8c2006 100644 --- a/dlclivegui/cameras/backends/utils/gentl_discovery.py +++ b/dlclivegui/cameras/backends/utils/gentl_discovery.py @@ -174,7 +174,7 @@ def _add_candidate(path: str, reason_ctx: str) -> None: for entry in env_entries: norm_entry = _normalize_path(entry) p = Path(norm_entry) - if p.is_file() and p.suffix.lower() == ".cti": + if p.is_file(): # let _add_candidate check .cti extension and existence _add_candidate(norm_entry, "env:file") elif p.is_dir(): for f in _iter_cti_files_in_dir(norm_entry, recursive=recursive_env_search): @@ -227,7 +227,16 @@ def choose_cti_files( return [] if policy == GenTLDiscoveryPolicy.NEWEST: - cand_sorted = sorted(cand, key=lambda p: Path(p).stat().st_mtime if Path(p).exists() else 0.0, reverse=True) + + def _newest_mtime(p: str) -> float: + try: + if not Path(p).exists(): + return 0.0 + return Path(p).stat().st_mtime + except OSError: + return 0.0 + + cand_sorted = sorted(cand, key=_newest_mtime, reverse=True) return cand_sorted[:max_files] if policy == GenTLDiscoveryPolicy.FIRST: diff --git a/tests/cameras/backends/test_gentl_backend.py b/tests/cameras/backends/test_gentl_backend.py index 7ee8ac1..8e9a40f 100644 --- a/tests/cameras/backends/test_gentl_backend.py +++ b/tests/cameras/backends/test_gentl_backend.py @@ -2,7 +2,6 @@ from __future__ import annotations import os -import time import types from pathlib import Path @@ -655,9 +654,12 @@ def test_choose_cti_files_newest_policy(tmp_path): old = _make_cti(tmp_path, "Old.cti") new = _make_cti(tmp_path, "New.cti") - # Ensure distinct mtimes - time.sleep(0.01) + # Ensure distinct mtimes (platform agnostic) new.write_text("dummy2", encoding="utf-8") + old_stat = old.stat() + new_stat = new.stat() + if new_stat.st_mtime <= old_stat.st_mtime: + os.utime(new, (new_stat.st_atime, old_stat.st_mtime + 1)) selected = choose_cti_files([str(old), str(new)], policy=GenTLDiscoveryPolicy.NEWEST, max_files=1) assert len(selected) == 1 From 70284dbc4aab56defbe00f0c51a6412022f51d7e Mon Sep 17 00:00:00 2001 From: C-Achard Date: Thu, 19 Feb 2026 16:08:10 +0100 Subject: [PATCH 20/60] gentl: reset harvester and CTI failure tests Add a _reset_harvester helper on GenTLCameraBackend and call it where the harvester must be torn down on failure to ensure proper cleanup. Enhance FakeHarvester to support deterministic add_file failures and to record reset/add/update/create calls; keep create_image_acquirer for compatibility. Update test fixtures (conftest) to expose gentl_fail_add_file_for control, inject a dummy CTI only when none are explicitly provided, and expose gb.fail_add_file_for from patch_gentl_sdk. Add test helpers and new tests to isolate GENICAM env vars and verify CTI load diagnostics for all-success, partial-failure, and complete-failure scenarios. Also adjust some discovery tests to explicitly isolate the environment and tighten assertions. --- dlclivegui/cameras/backends/gentl_backend.py | 19 +++- tests/cameras/backends/conftest.py | 68 ++++++++--- tests/cameras/backends/test_gentl_backend.py | 114 ++++++++++++++++++- 3 files changed, 178 insertions(+), 23 deletions(-) diff --git a/dlclivegui/cameras/backends/gentl_backend.py b/dlclivegui/cameras/backends/gentl_backend.py index 2dd6551..405a7ab 100644 --- a/dlclivegui/cameras/backends/gentl_backend.py +++ b/dlclivegui/cameras/backends/gentl_backend.py @@ -372,7 +372,7 @@ def open(self) -> None: ns["cti_file"] = str(cti_files[0]) # best effort if not loaded: - self._harvester = None + self._reset_harvester() raise RuntimeError( "No GenTL producer (.cti) could be loaded.\n\n" f"Resolved CTIs: {cti_files}\n" @@ -385,7 +385,7 @@ def open(self) -> None: self._harvester.update() if not self._harvester.device_info_list: - self._harvester = None + self._reset_harvester() raise RuntimeError( "No GenTL cameras detected via Harvesters after loading producers.\n\n" f"Loaded CTIs: {loaded}\n" @@ -797,6 +797,7 @@ def rebind_settings(cls, settings): continue if not loaded: + cls._reset_select_harvester(harvester) return settings harvester.update() @@ -936,6 +937,20 @@ def stop(self) -> None: except Exception: pass + @staticmethod + def _reset_select_harvester(harvester) -> None: + if harvester is not None: + try: + harvester.reset() + except Exception: + pass + + def _reset_harvester(self) -> None: + try: + self._reset_select_harvester(self._harvester) + finally: + self._harvester = None + def close(self) -> None: if self._acquirer is not None: try: diff --git a/tests/cameras/backends/conftest.py b/tests/cameras/backends/conftest.py index 57a049a..2d965a2 100644 --- a/tests/cameras/backends/conftest.py +++ b/tests/cameras/backends/conftest.py @@ -2,6 +2,7 @@ from __future__ import annotations import importlib +import logging import os from dataclasses import dataclass from typing import Any @@ -9,6 +10,8 @@ import numpy as np import pytest +logger = logging.getLogger(__name__) + # ----------------------------- # Dependency detection helpers @@ -720,21 +723,20 @@ class FakeHarvester: Inventory-driven so tests can control enumeration. """ - def __init__(self, inventory: list[dict[str, Any]] | None = None): + def __init__(self, inventory: list[dict[str, Any]] | None = None, *, fail_add_file_for: set[str] | None = None): self._files: list[str] = [] self._inventory: list[dict[str, Any]] = list(inventory or []) self.device_info_list: list[Any] = [] + # NEW: failure control + self._fail_add_file_for = set(fail_add_file_for or []) + # Call tracing self.add_file_calls: list[str] = [] self.update_calls = 0 self.reset_calls = 0 self.create_calls: list[Any] = [] - def add_file(self, file_path: str): - self._files.append(str(file_path)) - self.add_file_calls.append(str(file_path)) - def update(self): self.update_calls += 1 # If not provided, default to a single fake device @@ -788,6 +790,16 @@ def create(self, selector=None, index: int | None = None, *args, **kwargs): def create_image_acquirer(self, *args, **kwargs): return self.create(*args, **kwargs) + def add_file(self, file_path: str): + p = str(file_path) + self.add_file_calls.append(p) + + # NEW: fail deterministically if requested + if p in self._fail_add_file_for: + raise RuntimeError(f"Simulated CTI load failure for: {p}") + + self._files.append(p) + # ----------------------------------------------------------------------------- # GentL fixtures: inventory, patching, settings factory @@ -817,34 +829,43 @@ def gentl_inventory(): @pytest.fixture() -def fake_harvester_factory(gentl_inventory): +def fake_harvester_factory(gentl_inventory, gentl_fail_add_file_for): """ - Factory that returns a FakeHarvester bound to the current gentl_inventory. - Allows tests to mutate gentl_inventory before calling backend.open(). + Factory that returns a FakeHarvester bound to the current gentl_inventory and + gentl_fail_add_file_for. Tests can mutate both before calling backend.open(). """ def _make(): - return FakeHarvester(inventory=gentl_inventory) + return FakeHarvester(inventory=gentl_inventory, fail_add_file_for=gentl_fail_add_file_for) return _make @pytest.fixture() -def patch_gentl_sdk(monkeypatch, fake_harvester_factory, tmp_path): +def gentl_fail_add_file_for(): + """ + Mutable set of CTI file paths that FakeHarvester.add_file should fail on. + Tests can add/remove paths to simulate partial/complete CTI load failures. + """ + return set() + + +@pytest.fixture() +def patch_gentl_sdk(monkeypatch, fake_harvester_factory, gentl_fail_add_file_for, tmp_path): """ Patch dlclivegui.cameras.backends.gentl_backend to use FakeHarvester + Fake timeout. - Also ensure CTI discovery succeeds for classmethods (discover_devices/quick_ping) - by creating a real dummy .cti and exposing it via GENICAM_GENTL64_PATH. + Ensure CTI discovery succeeds for classmethods by creating a real dummy .cti and + exposing it via GENICAM_GENTL64_PATH. """ import dlclivegui.cameras.backends.gentl_backend as gb # Patch Harvester symbol (the backend calls Harvester() directly) monkeypatch.setattr(gb, "Harvester", lambda: fake_harvester_factory(), raising=False) - # Keep your backend timeout contract as-is: it catches HarvesterTimeoutError + # Keep timeout contract monkeypatch.setattr(gb, "HarvesterTimeoutError", FakeGenTLTimeoutException, raising=False) - # Create a real CTI file and advertise it via env var (cross-platform via os.pathsep) + # Create a real CTI file and advertise it via env var cti_file = tmp_path / "dummy.cti" if not cti_file.exists(): cti_file.write_text("fake", encoding="utf-8") @@ -852,6 +873,9 @@ def patch_gentl_sdk(monkeypatch, fake_harvester_factory, tmp_path): monkeypatch.setenv("GENICAM_GENTL64_PATH", str(tmp_path)) monkeypatch.delenv("GENICAM_GENTL32_PATH", raising=False) + # OPTIONAL: expose failure control so tests can do gb.fail_add_file_for.add(...) + gb.fail_add_file_for = gentl_fail_add_file_for + return gb @@ -877,10 +901,22 @@ def _make( cti = tmp_path / "dummy.cti" if not cti.exists(): cti.write_text("fake", encoding="utf-8") + props = properties if isinstance(properties, dict) else {} props.setdefault("gentl", {}) - props["gentl"] = dict(props["gentl"]) - props["gentl"].setdefault("cti_file", str(cti)) + props["gentl"] = dict(props["gentl"]) # copy to avoid mutating caller dict unexpectedly + + ns = props["gentl"] + + # Detect whether CTIs were explicitly provided in *either* namespace or legacy keys + explicit_ns = bool(ns.get("cti_file") or ns.get("cti_files")) + explicit_legacy = bool(props.get("cti_file") or props.get("cti_files")) + + # Only inject a default dummy.cti if nothing explicit was provided + if not explicit_ns and not explicit_legacy: + logger.debug("No CTI file(s) explicitly provided in settings; injecting dummy CTI for gentl tests.") + ns.setdefault("cti_file", str(cti)) + return CameraSettings( name=name, index=index, diff --git a/tests/cameras/backends/test_gentl_backend.py b/tests/cameras/backends/test_gentl_backend.py index 8e9a40f..8d4094d 100644 --- a/tests/cameras/backends/test_gentl_backend.py +++ b/tests/cameras/backends/test_gentl_backend.py @@ -14,6 +14,31 @@ discover_cti_files, ) +# ---------------------------------------------------------------------- +# Helper functions +# ---------------------------------------------------------------------- + + +@pytest.fixture +def isolate_gentl_env(monkeypatch): + monkeypatch.delenv("GENICAM_GENTL64_PATH", raising=False) + monkeypatch.delenv("GENICAM_GENTL32_PATH", raising=False) + yield + + +def _force_only_these_ctis(settings, ctis: list[str]) -> None: + # Ensure namespace exists + props = settings.properties + props.setdefault("gentl", {}) + ns = props["gentl"] + + # Make sure no default single-cti sneaks in (dummy.cti) + ns.pop("cti_file", None) + props.pop("cti_file", None) + + # Explicit list + ns["cti_files"] = ctis + # --------------------------------------------------------------------- # Core lifecycle + strict transaction model @@ -582,7 +607,7 @@ def test_discover_glob_patterns(tmp_path): assert pattern in diag.glob_patterns -def test_discover_env_var_directory(monkeypatch, tmp_path): +def test_discover_env_var_directory(monkeypatch, tmp_path, isolate_gentl_env): _make_cti(tmp_path, "Env.cti") monkeypatch.setenv("GENICAM_GENTL64_PATH", str(tmp_path)) @@ -593,18 +618,19 @@ def test_discover_env_var_directory(monkeypatch, tmp_path): assert "GENICAM_GENTL64_PATH" in diag.env_vars_used -def test_discover_env_var_direct_file(monkeypatch, tmp_path): +def test_discover_env_var_direct_file(monkeypatch, tmp_path, isolate_gentl_env): cti = _make_cti(tmp_path, "Direct.cti") monkeypatch.setenv("GENICAM_GENTL64_PATH", str(cti)) candidates, diag = discover_cti_files(include_env=True, must_exist=True) - assert candidates == [str(cti.resolve())] or Path(candidates[0]).name == "Direct.cti" + assert len(candidates) == 1 + assert Path(candidates[0]).name == "Direct.cti" assert diag.env_paths_expanded # should include the raw env entry -def test_discover_env_var_multiple_entries(monkeypatch, tmp_path): +def test_discover_env_var_multiple_entries(monkeypatch, tmp_path, isolate_gentl_env): d1 = tmp_path / "d1" d2 = tmp_path / "d2" d1.mkdir() @@ -622,7 +648,7 @@ def test_discover_env_var_multiple_entries(monkeypatch, tmp_path): assert names == ["A.cti", "B.cti"] -def test_discover_deduplicates_same_file_from_multiple_sources(monkeypatch, tmp_path): +def test_discover_deduplicates_same_file_from_multiple_sources(monkeypatch, tmp_path, isolate_gentl_env): cti = _make_cti(tmp_path, "Dup.cti") # Discover it twice: explicit + env dir @@ -664,3 +690,81 @@ def test_choose_cti_files_newest_policy(tmp_path): selected = choose_cti_files([str(old), str(new)], policy=GenTLDiscoveryPolicy.NEWEST, max_files=1) assert len(selected) == 1 assert Path(selected[0]).name == "New.cti" + + +def test_open_persists_cti_load_diagnostics_all_success(patch_gentl_sdk, gentl_settings_factory, tmp_path): + gb = patch_gentl_sdk + + c1 = _make_cti(tmp_path, "A.cti") + c2 = _make_cti(tmp_path, "B.cti") + + # Provide multiple CTIs (how your backend reads these may vary) + settings = gentl_settings_factory(properties={"gentl": {"cti_files": [str(c1), str(c2)]}}) + _force_only_these_ctis(settings, [str(c1), str(c2)]) + be = gb.GenTLCameraBackend(settings) + + be.open() + + ns = settings.properties["gentl"] + assert ns["cti_files"] == [str(c1), str(c2)] + assert ns["cti_files_loaded"] == [str(c1), str(c2)] + # If dict: + assert ns["cti_files_failed"] == [] + + be.close() + + +def test_open_persists_cti_load_diagnostics_partial_failure(patch_gentl_sdk, gentl_settings_factory, tmp_path): + gb = patch_gentl_sdk + + ok = _make_cti(tmp_path, "OK.cti") + bad = _make_cti(tmp_path, "BAD.cti") + + gb.fail_add_file_for.clear() + gb.fail_add_file_for.add(str(bad)) + + settings = gentl_settings_factory(properties={"gentl": {"cti_files": [str(ok), str(bad)]}}) + _force_only_these_ctis(settings, [str(ok), str(bad)]) + + be = gb.GenTLCameraBackend(settings) + be.open() + + ns = settings.properties["gentl"] + assert ns["cti_files"] == [str(ok), str(bad)] + assert ns["cti_files_loaded"] == [str(ok)] + + failed = ns["cti_files_failed"] + assert isinstance(failed, list) + assert len(failed) == 1 + assert failed[0]["cti"] == str(bad) + assert isinstance(failed[0]["error"], str) and failed[0]["error"] + + be.close() + + +def test_open_persists_cti_load_diagnostics_complete_failure(patch_gentl_sdk, gentl_settings_factory, tmp_path): + gb = patch_gentl_sdk + + b1 = _make_cti(tmp_path, "B1.cti") + b2 = _make_cti(tmp_path, "B2.cti") + + gb.fail_add_file_for.clear() + gb.fail_add_file_for.update({str(b1), str(b2)}) + + settings = gentl_settings_factory(properties={"gentl": {"cti_files": [str(b1), str(b2)]}}) + _force_only_these_ctis(settings, [str(b1), str(b2)]) + be = gb.GenTLCameraBackend(settings) + + with pytest.raises(RuntimeError): + be.open() + + # Keys should still be persisted for debugging even though open failed. + ns = settings.properties.get("gentl", {}) + assert ns.get("cti_files") == [str(b1), str(b2)] + assert ns.get("cti_files_loaded") == [] + + failed = ns.get("cti_files_failed") + assert isinstance(failed, list) + assert sorted(d["cti"] for d in failed) == sorted([str(b1), str(b2)]) + for d in failed: + assert isinstance(d.get("error"), str) and d["error"] From 4c5f8e114da65630affcf88aa06d71a7b600c610 Mon Sep 17 00:00:00 2001 From: C-Achard Date: Thu, 19 Feb 2026 16:23:59 +0100 Subject: [PATCH 21/60] Bump GitHub Actions and refine help banner Update CI steps to use actions/checkout@v6 and actions/setup-python@v6. Revise ASCII help banner generation: import LOGO_ALPHA from the theme, remove static resource path, add max_width parameter, compute banner width from terminal size (clamped between min_width and max_width), and skip rendering the banner when stdout is not a TTY. Simplifies banner selection logic and keeps colored banner output when available. --- .github/workflows/python-package.yml | 4 ++-- dlclivegui/assets/ascii_art.py | 25 ++++++++++++++++--------- 2 files changed, 18 insertions(+), 11 deletions(-) diff --git a/.github/workflows/python-package.yml b/.github/workflows/python-package.yml index c354f24..3640f00 100644 --- a/.github/workflows/python-package.yml +++ b/.github/workflows/python-package.yml @@ -88,10 +88,10 @@ jobs: if: ${{ startsWith(github.ref, 'refs/tags/v') }} # only on tag pushes like v1.2.3 steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@v6 - name: Setup Python - uses: actions/setup-python@v5 + uses: actions/setup-python@v6 with: python-version: "3.x" diff --git a/dlclivegui/assets/ascii_art.py b/dlclivegui/assets/ascii_art.py index 6359134..51a8d2f 100644 --- a/dlclivegui/assets/ascii_art.py +++ b/dlclivegui/assets/ascii_art.py @@ -18,11 +18,12 @@ import shutil import sys from collections.abc import Iterable -from importlib import resources from typing import Literal import numpy as np +from dlclivegui.gui.theme import LOGO_ALPHA + try: import cv2 as cv except Exception as e: # pragma: no cover @@ -381,11 +382,10 @@ def print_ascii( # ----------------------------- # Optional: Help banner helpers # ----------------------------- -ASCII_IMAGE_PATH = resources.files("dlclivegui.assets") / "logo_transparent.png" def build_help_description( - static_banner: str | None = None, *, desc=None, color: ColorMode = "auto", min_width: int = 60 + static_banner: str | None = None, *, desc=None, color: ColorMode = "auto", min_width: int = 60, max_width: int = 120 ) -> str: """Return a help description string that conditionally includes a colored ASCII banner. @@ -395,12 +395,20 @@ def build_help_description( """ enable_windows_ansi_support() desc = "DeepLabCut-Live GUI — launch the graphical interface." if desc is None else desc - if static_banner is None: + if not sys.stdout.isatty() and terminal_is_wide_enough(min_width=min_width): + return desc + + banner: str | None + if static_banner is not None: + banner = static_banner + else: try: + term_width = get_terminal_width(default=max_width) + width = max(min(term_width, max_width), min_width) banner = "\n".join( generate_ascii_lines( - str(ASCII_IMAGE_PATH), - width=shutil.get_terminal_size((80, 24)).columns - 10, + str(LOGO_ALPHA), + width=width, aspect=0.5, color=color, fine=True, @@ -414,9 +422,8 @@ def build_help_description( ) except (FileNotFoundError, RuntimeError, OSError): banner = None - else: - banner = static_banner - if banner and terminal_is_wide_enough(min_width=min_width): + + if banner: if should_use_color(color): banner = f"\x1b[36m{banner}\x1b[0m" return banner + "\n" + desc From 0a72842e0747d4f77c1f98a9b369670a903a4ec2 Mon Sep 17 00:00:00 2001 From: C-Achard Date: Thu, 19 Feb 2026 16:28:30 +0100 Subject: [PATCH 22/60] Show banner only for help in TTY Avoid building the ASCII/banner art unless help is being requested: parse_args now detects -h/--help and only constructs the help description in that case. At startup, the banner is printed (not logged) only when stdout is a TTY and the terminal is wide enough; failures while building/printing the banner are caught to keep startup robust. Also removed dlclivegui/assets/* from the coverage omit list so those files will be considered by coverage tooling. --- dlclivegui/main.py | 17 ++++++++++++----- pyproject.toml | 1 - 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/dlclivegui/main.py b/dlclivegui/main.py index 04d55cf..eb444aa 100644 --- a/dlclivegui/main.py +++ b/dlclivegui/main.py @@ -60,7 +60,10 @@ def parse_args(argv=None): default_desc = "Welcome to DeepLabCut-Live GUI!" no_art_flag = "--no-art" in argv - if not no_art_flag: + wants_help = any(a in ("-h", "--help") for a in argv) + + # Only build banner description if we're about to print help + if wants_help and not no_art_flag: try: desc = art.build_help_description() except Exception as e: @@ -80,11 +83,15 @@ def parse_args(argv=None): def main() -> None: args, _unknown = parse_args() - # HiDPI pixmaps - always enabled in Qt 6 so no need to set it explicitly - # QApplication.setAttribute(Qt.ApplicationAttribute.AA_UseHighDpiPixmaps, True) logging.info("Starting DeepLabCut-Live GUI...") - if not args.no_art: - logging.info(art.build_help_description(desc="Welcome to DeepLabCut-Live GUI!")) + + # If you want a startup banner, PRINT it (not log), and only in TTY contexts. + if not args.no_art and sys.stdout.isatty() and art.terminal_is_wide_enough(): + try: + print(art.build_help_description(desc="Welcome to DeepLabCut-Live GUI!")) + except Exception: + # Keep startup robust; don't fail if banner fails + pass app = QApplication(sys.argv) app.setWindowIcon(QIcon(LOGO)) diff --git a/pyproject.toml b/pyproject.toml index 6241705..7fba79a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -135,7 +135,6 @@ exclude_lines = [ ] omit = [ "tests/*", - "dlclivegui/assets/*", ] [tool.coverage.run] branch = true From cf51c1f66201859fddbd4f772e94ba54a3f2dcac Mon Sep 17 00:00:00 2001 From: C-Achard Date: Thu, 19 Feb 2026 16:33:23 +0100 Subject: [PATCH 23/60] Move ascii_art tests to new file and require cv2 Extract the ASCII-art-related fixtures and tests from tests/gui/test_app_entrypoint.py into a new tests/gui/test_ascii_art.py to separate concerns and improve test organization. Replace pytest.skip on missing OpenCV with raising ImportError so tests fail fast when cv2 is not installed (ensures main deps are present). Keep a small assertion fix in the original file (win_instance.show.assert_called_once()). --- tests/gui/test_app_entrypoint.py | 276 ------------------------------ tests/gui/test_ascii_art.py | 278 +++++++++++++++++++++++++++++++ 2 files changed, 278 insertions(+), 276 deletions(-) create mode 100644 tests/gui/test_ascii_art.py diff --git a/tests/gui/test_app_entrypoint.py b/tests/gui/test_app_entrypoint.py index 12007df..0a68bb2 100644 --- a/tests/gui/test_app_entrypoint.py +++ b/tests/gui/test_app_entrypoint.py @@ -2,21 +2,11 @@ from __future__ import annotations import importlib -import os import sys -from pathlib import Path from unittest.mock import MagicMock -import numpy as np import pytest -try: - import cv2 as cv -except Exception: - pytest.skip("OpenCV (opencv-python) is required for these tests.", allow_module_level=True) - -import dlclivegui.assets.ascii_art as ascii_mod - MODULE_UNDER_TEST = "dlclivegui.main" @@ -141,269 +131,3 @@ def test_main_without_splash(monkeypatch, set_use_splash_false): show_splash_mock.assert_not_called() assert calls["count"] == 0 win_instance.show.assert_called_once() - - -# ------------------------- -# Fixtures & small helpers -# ------------------------- - - -@pytest.fixture -def tmp_png_gray(tmp_path: Path): - """Create a simple 16x8 gray gradient PNG without alpha.""" - h, w = 8, 16 - # Horizontal gradient from black to white in BGR - x = np.linspace(0, 255, w, dtype=np.uint8) - img = np.tile(x, (h, 1)) - bgr = cv.cvtColor(img, cv.COLOR_GRAY2BGR) - p = tmp_path / "gray.png" - assert cv.imwrite(str(p), bgr) - return p - - -@pytest.fixture -def tmp_png_bgra_logo(tmp_path: Path): - """Create a small BGRA image with a transparent border and opaque center.""" - h, w = 10, 20 - bgra = np.zeros((h, w, 4), dtype=np.uint8) - # Opaque blue rectangle in center - bgra[2:8, 5:15, 0] = 255 # B - bgra[2:8, 5:15, 3] = 255 # A - p = tmp_path / "logo_bgra.png" - assert cv.imwrite(str(p), bgra) - return p - - -def _force_isatty(monkeypatch, obj, value: bool): - """ - Ensure obj.isatty() returns value. - Try instance patch first; if the object disallows attribute assignment, - patch the method on its class. - """ - try: - # Try patching the instance directly - monkeypatch.setattr(obj, "isatty", lambda: value, raising=False) - except Exception: - # Fallback: patch the class method - cls = type(obj) - monkeypatch.setattr(cls, "isatty", lambda self: value, raising=True) - - -@pytest.fixture -def force_tty(monkeypatch): - """ - Pretend stdout is a TTY and provide a stable terminal size inside the - module-under-test namespace (and the actual sys). - """ - # NO_COLOR must be unset for should_use_color("auto") - monkeypatch.delenv("NO_COLOR", raising=False) - - # Make whatever stdout object exists report TTY=True - _force_isatty(monkeypatch, sys.stdout, True) - _force_isatty(monkeypatch, ascii_mod.sys.stdout, True) - - # Ensure terminal size used by the module is deterministic - monkeypatch.setattr( - ascii_mod.shutil, - "get_terminal_size", - lambda fallback=None: os.terminal_size((80, 24)), - raising=True, - ) - return sys.stdout # not used directly, but handy - - -@pytest.fixture -def force_notty(monkeypatch): - """ - Pretend stdout is not a TTY. - """ - _force_isatty(monkeypatch, sys.stdout, False) - _force_isatty(monkeypatch, ascii_mod.sys.stdout, False) - return sys.stdout - - -# ------------------------- -# Terminal / ANSI behavior -# ------------------------- - - -def test_get_terminal_width_tty(force_tty): - width = ascii_mod.get_terminal_width(default=123) - assert width == 80 - - -def test_get_terminal_width_notty(force_notty): - width = ascii_mod.get_terminal_width(default=123) - assert width == 123 - - -def test_should_use_color_auto_tty(force_tty, monkeypatch): - monkeypatch.delenv("NO_COLOR", raising=False) - assert ascii_mod.should_use_color("auto") is True - - -def test_should_use_color_auto_no_color(force_tty, monkeypatch): - monkeypatch.setenv("NO_COLOR", "1") - assert ascii_mod.should_use_color("auto") is False - - -def test_should_use_color_modes(force_notty): - assert ascii_mod.should_use_color("never") is False - assert ascii_mod.should_use_color("always") is True - - -def test_terminal_is_wide_enough(force_tty): - assert ascii_mod.terminal_is_wide_enough(60) is True - assert ascii_mod.terminal_is_wide_enough(100) is False - - -# ------------------------- -# Image helpers -# ------------------------- - - -def test__to_bgr_converts_gray(): - gray = np.zeros((5, 7), dtype=np.uint8) - bgr = ascii_mod._to_bgr(gray) - assert bgr.shape == (5, 7, 3) - assert bgr.dtype == np.uint8 - - -def test_composite_over_color_bgra(tmp_png_bgra_logo): - img = cv.imread(str(tmp_png_bgra_logo), cv.IMREAD_UNCHANGED) - assert img.shape[2] == 4 - bgr = ascii_mod.composite_over_color(img, bg_bgr=(10, 20, 30)) - assert bgr.shape[2] == 3 - # Transparent border should become the bg color - assert tuple(bgr[0, 0]) == (10, 20, 30) - # Opaque center should keep blue channel high - assert bgr[5, 10, 0] == 255 - - -def test_crop_to_content_alpha(tmp_png_bgra_logo): - img = cv.imread(str(tmp_png_bgra_logo), cv.IMREAD_UNCHANGED) - cropped = ascii_mod.crop_to_content_alpha(img, alpha_thresh=1, pad=0) - h, w = cropped.shape[:2] - assert h == 6 # 2..7 -> 6 rows - assert w == 10 # 5..14 -> 10 cols - assert cropped[..., 3].min() == 255 - - -def test_crop_to_content_bg_white(tmp_path): - # Create white background with a black rectangle - h, w = 12, 20 - bgr = np.full((h, w, 3), 255, dtype=np.uint8) - bgr[3:10, 4:15] = 0 - p = tmp_path / "white_bg.png" - assert cv.imwrite(str(p), bgr) - cropped = ascii_mod.crop_to_content_bg(bgr, bg="white", tol=10, pad=0) - assert cropped.shape[0] == 7 # 3..9 -> 7 rows - assert cropped.shape[1] == 11 # 4..14 -> 11 cols - - -def test_resize_for_terminal_aspect_env(monkeypatch): - img = np.zeros((100, 200, 3), dtype=np.uint8) - monkeypatch.setenv("DLCLIVE_ASCII_ASPECT", "0.25") - resized = ascii_mod.resize_for_terminal(img, width=80, aspect=None) - # new_h = (h/w) * width * aspect = (100/200)*80*0.25 = 10 - assert resized.shape[:2] == (10, 80) - - -# ------------------------- -# Rendering -# ------------------------- - - -def test_map_luminance_to_chars_simple(): - gray = np.array([[0, 127, 255]], dtype=np.uint8) - lines = list(ascii_mod._map_luminance_to_chars(gray, fine=False)) - assert len(lines) == 1 - # First char should be the densest in the simple ramp '@', last should be space - assert lines[0][0] == ascii_mod.ASCII_RAMP_SIMPLE[0] - assert lines[0][-1] == ascii_mod.ASCII_RAMP_SIMPLE[-1] - - -def test_color_ascii_lines_basic(): - # Small 2x3 color blocks - img = np.zeros((2, 3, 3), dtype=np.uint8) - img[:] = (10, 20, 30) - lines = list(ascii_mod._color_ascii_lines(img, fine=False, invert=False)) - assert len(lines) == 2 - # Expect ANSI 24-bit color sequence present - assert "\x1b[38;2;" in lines[0] - # Reset code present - assert lines[0].endswith("\x1b[0m" * 3) is False # individual chars have resets, but line won't end with triple - - -# ------------------------- -# Public API: generate & print -# ------------------------- - - -@pytest.mark.parametrize("use_color", ["never", "always"]) -def test_generate_ascii_lines_gray(tmp_png_gray, use_color, force_tty): - lines = list( - ascii_mod.generate_ascii_lines( - str(tmp_png_gray), - width=40, - aspect=0.5, - color=use_color, - fine=False, - invert=False, - crop_content=False, - crop_bg="none", - ) - ) - assert len(lines) > 0 - # Width equals number of characters per line - assert all(len(line) == 40 or ("\x1b[38;2;" in line and len(_strip_ansi(line)) == 40) for line in lines) - - -def _strip_ansi(s: str) -> str: - import re - - return re.sub(r"\x1b\[[0-9;]*m", "", s) - - -def test_generate_ascii_lines_crop_alpha(tmp_png_bgra_logo, force_tty): - lines_no_crop = list( - ascii_mod.generate_ascii_lines(str(tmp_png_bgra_logo), width=40, aspect=0.5, color="never", crop_content=False) - ) - lines_crop = list( - ascii_mod.generate_ascii_lines(str(tmp_png_bgra_logo), width=40, aspect=0.5, color="never", crop_content=True) - ) - # Both are non-empty; height may change either way depending on aspect ratio - assert len(lines_no_crop) > 0 and len(lines_crop) > 0 - # Cropping should affect the generated ASCII content - assert lines_crop != lines_no_crop - - -def test_print_ascii_writes_file(tmp_png_gray, force_tty, tmp_path): - out_path = tmp_path / "out.txt" - ascii_mod.print_ascii( - str(tmp_png_gray), - width=30, - aspect=0.5, - color="never", - output=str(out_path), - ) - assert out_path.exists() - text = out_path.read_text(encoding="utf-8") - # Expect multiple lines of length 30 - lines = [ln for ln in text.splitlines() if ln] - assert len(lines) > 0 - assert all(len(ln) == 30 for ln in lines) - - -def test_build_help_description_tty(tmp_png_bgra_logo, monkeypatch, force_tty): - monkeypatch.setattr(ascii_mod, "ASCII_IMAGE_PATH", Path(tmp_png_bgra_logo)) - desc = ascii_mod.build_help_description(static_banner=None, color="always", min_width=60) - assert "DeepLabCut-Live GUI" in desc - assert "\x1b[36m" in desc # cyan wrapper now present since TTY is mocked correctly - - -def test_build_help_description_notty(tmp_png_bgra_logo, monkeypatch, force_notty): - monkeypatch.setattr(ascii_mod, "ASCII_IMAGE_PATH", Path(tmp_png_bgra_logo)) - desc = ascii_mod.build_help_description(static_banner=None, color="always", min_width=60) - # Not a TTY -> no banner, just the plain description - assert desc.strip() == "DeepLabCut-Live GUI — launch the graphical interface." diff --git a/tests/gui/test_ascii_art.py b/tests/gui/test_ascii_art.py new file mode 100644 index 0000000..50d702c --- /dev/null +++ b/tests/gui/test_ascii_art.py @@ -0,0 +1,278 @@ +import os +import sys +from pathlib import Path + +import numpy as np +import pytest + +try: + import cv2 as cv +except Exception as e: + raise ImportError("OpenCV (cv2) is required for these tests. Please install the main package dependencies.") from e + +import dlclivegui.assets.ascii_art as ascii_mod + +# ------------------------- +# Fixtures & small helpers +# ------------------------- + + +@pytest.fixture +def tmp_png_gray(tmp_path: Path): + """Create a simple 16x8 gray gradient PNG without alpha.""" + h, w = 8, 16 + # Horizontal gradient from black to white in BGR + x = np.linspace(0, 255, w, dtype=np.uint8) + img = np.tile(x, (h, 1)) + bgr = cv.cvtColor(img, cv.COLOR_GRAY2BGR) + p = tmp_path / "gray.png" + assert cv.imwrite(str(p), bgr) + return p + + +@pytest.fixture +def tmp_png_bgra_logo(tmp_path: Path): + """Create a small BGRA image with a transparent border and opaque center.""" + h, w = 10, 20 + bgra = np.zeros((h, w, 4), dtype=np.uint8) + # Opaque blue rectangle in center + bgra[2:8, 5:15, 0] = 255 # B + bgra[2:8, 5:15, 3] = 255 # A + p = tmp_path / "logo_bgra.png" + assert cv.imwrite(str(p), bgra) + return p + + +def _force_isatty(monkeypatch, obj, value: bool): + """ + Ensure obj.isatty() returns value. + Try instance patch first; if the object disallows attribute assignment, + patch the method on its class. + """ + try: + # Try patching the instance directly + monkeypatch.setattr(obj, "isatty", lambda: value, raising=False) + except Exception: + # Fallback: patch the class method + cls = type(obj) + monkeypatch.setattr(cls, "isatty", lambda self: value, raising=True) + + +@pytest.fixture +def force_tty(monkeypatch): + """ + Pretend stdout is a TTY and provide a stable terminal size inside the + module-under-test namespace (and the actual sys). + """ + # NO_COLOR must be unset for should_use_color("auto") + monkeypatch.delenv("NO_COLOR", raising=False) + + # Make whatever stdout object exists report TTY=True + _force_isatty(monkeypatch, sys.stdout, True) + _force_isatty(monkeypatch, ascii_mod.sys.stdout, True) + + # Ensure terminal size used by the module is deterministic + monkeypatch.setattr( + ascii_mod.shutil, + "get_terminal_size", + lambda fallback=None: os.terminal_size((80, 24)), + raising=True, + ) + return sys.stdout # not used directly, but handy + + +@pytest.fixture +def force_notty(monkeypatch): + """ + Pretend stdout is not a TTY. + """ + _force_isatty(monkeypatch, sys.stdout, False) + _force_isatty(monkeypatch, ascii_mod.sys.stdout, False) + return sys.stdout + + +# ------------------------- +# Terminal / ANSI behavior +# ------------------------- + + +def test_get_terminal_width_tty(force_tty): + width = ascii_mod.get_terminal_width(default=123) + assert width == 80 + + +def test_get_terminal_width_notty(force_notty): + width = ascii_mod.get_terminal_width(default=123) + assert width == 123 + + +def test_should_use_color_auto_tty(force_tty, monkeypatch): + monkeypatch.delenv("NO_COLOR", raising=False) + assert ascii_mod.should_use_color("auto") is True + + +def test_should_use_color_auto_no_color(force_tty, monkeypatch): + monkeypatch.setenv("NO_COLOR", "1") + assert ascii_mod.should_use_color("auto") is False + + +def test_should_use_color_modes(force_notty): + assert ascii_mod.should_use_color("never") is False + assert ascii_mod.should_use_color("always") is True + + +def test_terminal_is_wide_enough(force_tty): + assert ascii_mod.terminal_is_wide_enough(60) is True + assert ascii_mod.terminal_is_wide_enough(100) is False + + +# ------------------------- +# Image helpers +# ------------------------- + + +def test__to_bgr_converts_gray(): + gray = np.zeros((5, 7), dtype=np.uint8) + bgr = ascii_mod._to_bgr(gray) + assert bgr.shape == (5, 7, 3) + assert bgr.dtype == np.uint8 + + +def test_composite_over_color_bgra(tmp_png_bgra_logo): + img = cv.imread(str(tmp_png_bgra_logo), cv.IMREAD_UNCHANGED) + assert img.shape[2] == 4 + bgr = ascii_mod.composite_over_color(img, bg_bgr=(10, 20, 30)) + assert bgr.shape[2] == 3 + # Transparent border should become the bg color + assert tuple(bgr[0, 0]) == (10, 20, 30) + # Opaque center should keep blue channel high + assert bgr[5, 10, 0] == 255 + + +def test_crop_to_content_alpha(tmp_png_bgra_logo): + img = cv.imread(str(tmp_png_bgra_logo), cv.IMREAD_UNCHANGED) + cropped = ascii_mod.crop_to_content_alpha(img, alpha_thresh=1, pad=0) + h, w = cropped.shape[:2] + assert h == 6 # 2..7 -> 6 rows + assert w == 10 # 5..14 -> 10 cols + assert cropped[..., 3].min() == 255 + + +def test_crop_to_content_bg_white(tmp_path): + # Create white background with a black rectangle + h, w = 12, 20 + bgr = np.full((h, w, 3), 255, dtype=np.uint8) + bgr[3:10, 4:15] = 0 + p = tmp_path / "white_bg.png" + assert cv.imwrite(str(p), bgr) + cropped = ascii_mod.crop_to_content_bg(bgr, bg="white", tol=10, pad=0) + assert cropped.shape[0] == 7 # 3..9 -> 7 rows + assert cropped.shape[1] == 11 # 4..14 -> 11 cols + + +def test_resize_for_terminal_aspect_env(monkeypatch): + img = np.zeros((100, 200, 3), dtype=np.uint8) + monkeypatch.setenv("DLCLIVE_ASCII_ASPECT", "0.25") + resized = ascii_mod.resize_for_terminal(img, width=80, aspect=None) + # new_h = (h/w) * width * aspect = (100/200)*80*0.25 = 10 + assert resized.shape[:2] == (10, 80) + + +# ------------------------- +# Rendering +# ------------------------- + + +def test_map_luminance_to_chars_simple(): + gray = np.array([[0, 127, 255]], dtype=np.uint8) + lines = list(ascii_mod._map_luminance_to_chars(gray, fine=False)) + assert len(lines) == 1 + # First char should be the densest in the simple ramp '@', last should be space + assert lines[0][0] == ascii_mod.ASCII_RAMP_SIMPLE[0] + assert lines[0][-1] == ascii_mod.ASCII_RAMP_SIMPLE[-1] + + +def test_color_ascii_lines_basic(): + # Small 2x3 color blocks + img = np.zeros((2, 3, 3), dtype=np.uint8) + img[:] = (10, 20, 30) + lines = list(ascii_mod._color_ascii_lines(img, fine=False, invert=False)) + assert len(lines) == 2 + # Expect ANSI 24-bit color sequence present + assert "\x1b[38;2;" in lines[0] + # Reset code present + assert lines[0].endswith("\x1b[0m" * 3) is False # individual chars have resets, but line won't end with triple + + +# ------------------------- +# Public API: generate & print +# ------------------------- + + +@pytest.mark.parametrize("use_color", ["never", "always"]) +def test_generate_ascii_lines_gray(tmp_png_gray, use_color, force_tty): + lines = list( + ascii_mod.generate_ascii_lines( + str(tmp_png_gray), + width=40, + aspect=0.5, + color=use_color, + fine=False, + invert=False, + crop_content=False, + crop_bg="none", + ) + ) + assert len(lines) > 0 + # Width equals number of characters per line + assert all(len(line) == 40 or ("\x1b[38;2;" in line and len(_strip_ansi(line)) == 40) for line in lines) + + +def _strip_ansi(s: str) -> str: + import re + + return re.sub(r"\x1b\[[0-9;]*m", "", s) + + +def test_generate_ascii_lines_crop_alpha(tmp_png_bgra_logo, force_tty): + lines_no_crop = list( + ascii_mod.generate_ascii_lines(str(tmp_png_bgra_logo), width=40, aspect=0.5, color="never", crop_content=False) + ) + lines_crop = list( + ascii_mod.generate_ascii_lines(str(tmp_png_bgra_logo), width=40, aspect=0.5, color="never", crop_content=True) + ) + # Both are non-empty; height may change either way depending on aspect ratio + assert len(lines_no_crop) > 0 and len(lines_crop) > 0 + # Cropping should affect the generated ASCII content + assert lines_crop != lines_no_crop + + +def test_print_ascii_writes_file(tmp_png_gray, force_tty, tmp_path): + out_path = tmp_path / "out.txt" + ascii_mod.print_ascii( + str(tmp_png_gray), + width=30, + aspect=0.5, + color="never", + output=str(out_path), + ) + assert out_path.exists() + text = out_path.read_text(encoding="utf-8") + # Expect multiple lines of length 30 + lines = [ln for ln in text.splitlines() if ln] + assert len(lines) > 0 + assert all(len(ln) == 30 for ln in lines) + + +def test_build_help_description_tty(tmp_png_bgra_logo, monkeypatch, force_tty): + monkeypatch.setattr(ascii_mod, "ASCII_IMAGE_PATH", Path(tmp_png_bgra_logo)) + desc = ascii_mod.build_help_description(static_banner=None, color="always", min_width=60) + assert "DeepLabCut-Live GUI" in desc + assert "\x1b[36m" in desc # cyan wrapper now present since TTY is mocked correctly + + +def test_build_help_description_notty(tmp_png_bgra_logo, monkeypatch, force_notty): + monkeypatch.setattr(ascii_mod, "ASCII_IMAGE_PATH", Path(tmp_png_bgra_logo)) + desc = ascii_mod.build_help_description(static_banner=None, color="always", min_width=60) + # Not a TTY -> no banner, just the plain description + assert desc.strip() == "DeepLabCut-Live GUI — launch the graphical interface." From 09a836442768430117531309fd57af90c1d5bea1 Mon Sep 17 00:00:00 2001 From: C-Achard Date: Thu, 19 Feb 2026 17:06:48 +0100 Subject: [PATCH 24/60] Update python-package.yml --- .github/workflows/python-package.yml | 98 +++++++++++++--------------- 1 file changed, 45 insertions(+), 53 deletions(-) diff --git a/.github/workflows/python-package.yml b/.github/workflows/python-package.yml index 3640f00..fa4a699 100644 --- a/.github/workflows/python-package.yml +++ b/.github/workflows/python-package.yml @@ -2,47 +2,26 @@ name: Build, validate & Release on: push: - tags: - - 'v*.*.*' + tags: [ 'v*.*.*' ] pull_request: - branches: - - main - - public - types: - - labeled - - opened - - edited - - synchronize - - reopened + branches: [ main, public ] + types: [ labeled, opened, edited, synchronize, reopened ] jobs: - build_check: - name: Build & validate package + test: + name: Test / smoke (matrix) runs-on: ubuntu-latest strategy: fail-fast: false matrix: - python-version: [ "3.10", "3.11", "3.12" ] # adjust to what you support + python-version: [ "3.10", "3.11", "3.12" ] steps: - - name: Checkout - uses: actions/checkout@v6 - - - name: Setup Python ${{ matrix.python-version }} - uses: actions/setup-python@v6 + - uses: actions/checkout@v6 + - uses: actions/setup-python@v6 with: python-version: ${{ matrix.python-version }} - # If we use UV later, the lock file should be included here for caching and validation - - name: Cache pip - uses: actions/cache@v4 - with: - path: ~/.cache/pip - key: ${{ runner.os }}-pip-${{ matrix.python-version }}-${{ hashFiles('pyproject.toml', 'setup.cfg', 'setup.py', 'requirements.txt') }} - restore-keys: | - ${{ runner.os }}-pip-${{ matrix.python-version }}- - ${{ runner.os }}-pip- - - - name: Install build tools + - name: Install tools run: | python -m pip install --upgrade pip python -m pip install build twine wheel "packaging>=24.2" @@ -61,50 +40,63 @@ jobs: - name: Install from wheel & smoke test run: | - # Install from the built wheel (not from the source tree) python -m pip install dist/*.whl - python - <<'PY' import importlib - pkg_name = "dlclivegui" # change if your top-level import differs + pkg_name = "dlclivegui" m = importlib.import_module(pkg_name) print("Imported:", m.__name__, "version:", getattr(m, "__version__", "n/a")) PY if ! command -v dlclivegui >/dev/null 2>&1; then - echo "CLI entry point 'dlclivegui' not found in PATH; skipping CLI smoke test." + echo "CLI entry point 'dlclivegui' not found in PATH; skipping CLI smoke test." else + if command -v dlclivegui >/dev/null 2>&1; then echo "Running 'dlclivegui --help' smoke test..." if ! dlclivegui --help >/dev/null 2>&1; then echo "::error::'dlclivegui --help' failed; this indicates a problem with the installed CLI package." exit 1 fi - fi - publish: - name: Publish to PyPI + build: + name: Build release artifacts (single) runs-on: ubuntu-latest - needs: build_check - if: ${{ startsWith(github.ref, 'refs/tags/v') }} # only on tag pushes like v1.2.3 + needs: test + if: ${{ startsWith(github.ref, 'refs/tags/v') }} steps: - - name: Checkout - uses: actions/checkout@v6 - - - name: Setup Python - uses: actions/setup-python@v6 + - uses: actions/checkout@v6 + - uses: actions/setup-python@v6 with: - python-version: "3.x" + python-version: "3.12" - - name: Install build tools + - name: Build distributions (sdist + wheel) run: | python -m pip install --upgrade pip - python -m pip install build twine + python -m pip install build twine wheel "packaging>=24.2" + python -m build + python -m twine check dist/* - - name: Build distributions (sdist + wheel) - run: python -m build + - name: Upload dist artifacts + uses: actions/upload-artifact@v4 + with: + name: dist + path: dist/* + if-no-files-found: error + + publish: + name: Publish to PyPI (OIDC) + runs-on: ubuntu-latest + needs: build + if: ${{ startsWith(github.ref, 'refs/tags/v') }} + environment: pypi + permissions: + id-token: write + steps: + - name: Download dist artifacts + uses: actions/download-artifact@v4 + with: + name: dist + path: dist - name: Publish to PyPI - env: - TWINE_USERNAME: __token__ - TWINE_PASSWORD: ${{ secrets.TWINE_API_KEY }} - run: python -m twine upload --verbose dist/* + uses: pypa/gh-action-pypi-publish@release/v1 From a42d4961b43024e77f3d3ff4dbdf915617648b42 Mon Sep 17 00:00:00 2001 From: Cyril Achard Date: Thu, 19 Feb 2026 17:23:33 +0100 Subject: [PATCH 25/60] Apply suggestions from code review Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- dlclivegui/cameras/backends/gentl_backend.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/dlclivegui/cameras/backends/gentl_backend.py b/dlclivegui/cameras/backends/gentl_backend.py index 405a7ab..ba08b6a 100644 --- a/dlclivegui/cameras/backends/gentl_backend.py +++ b/dlclivegui/cameras/backends/gentl_backend.py @@ -377,8 +377,7 @@ def open(self) -> None: "No GenTL producer (.cti) could be loaded.\n\n" f"Resolved CTIs: {cti_files}\n" f"Failures: {failed}\n" - "Fix: remove/repair incompatible producers or" - " set properties.gentl.cti_file to a known working producer." + "Fix: remove/repair incompatible producers or set properties.gentl.cti_file to a known working producer." ) # Update device list after loading producers From caef5a893788e2cb74eaeca1342c2b19483965b1 Mon Sep 17 00:00:00 2001 From: C-Achard Date: Fri, 20 Feb 2026 10:00:33 +0100 Subject: [PATCH 26/60] Refine pre-commit config: stages and ruff rules Add default_stages and explicitly assign stages to hooks, separating local mutating hooks from CI-only checks. Configure check-* hooks (check-added-large-files, check-yaml, check-toml, check-merge-conflict) to run in pre-commit and manual where appropriate; mark end-of-file-fixer, trailing-whitespace and pyproject-fmt as local-only. Add validate-pyproject to pre-commit/manual. Rework ruff hooks into distinct local autofix entries (ruff-check with --fix/--unsafe-fixes and ruff-format write) and CI check-only entries (named ruff-check (ci) and ruff-format (ci) with non-writing args). Remove the prior name-tests-test entry and add clarifying comments and names for maintainability. --- .pre-commit-config.yaml | 45 +++++++++++++++++++++++++++++++++++------ 1 file changed, 39 insertions(+), 6 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 3d846cf..b69e80a 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,28 +1,61 @@ +default_stages: [pre-commit] + repos: - repo: https://github.com/pre-commit/pre-commit-hooks rev: v6.0.0 hooks: + # These are safe to run in both local & CI (they don't require "fix vs check" split) - id: check-added-large-files + stages: [pre-commit, manual] - id: check-yaml + stages: [pre-commit, manual] - id: check-toml + stages: [pre-commit, manual] + - id: check-merge-conflict + stages: [pre-commit, manual] + + # These modify files. Run locally only (pre-commit stage). - id: end-of-file-fixer - - id: name-tests-test - args: [--pytest-test-first] + stages: [pre-commit] - id: trailing-whitespace - - id: check-merge-conflict + stages: [pre-commit] + - repo: https://github.com/tox-dev/pyproject-fmt rev: v2.15.2 hooks: - id: pyproject-fmt + stages: [pre-commit] # modifies -> local only + - repo: https://github.com/abravalheri/validate-pyproject rev: v0.25 hooks: - id: validate-pyproject + stages: [pre-commit, manual] + - repo: https://github.com/astral-sh/ruff-pre-commit rev: v0.15.0 hooks: - # Run the formatter. + # -------------------------- + # LOCAL AUTOFIX (developers) + # -------------------------- + - id: ruff-check + name: ruff-check (fix) + args: [--fix, --unsafe-fixes] + stages: [pre-commit] + - id: ruff-format - # Run the linter. + name: ruff-format (write) + stages: [pre-commit] + + # -------------------------- + # CI CHECK-ONLY (no writes) + # -------------------------- - id: ruff-check - args: [--fix,--unsafe-fixes] + name: ruff-check (ci) + args: [--output-format=github] + stages: [manual] + + - id: ruff-format + name: ruff-format (ci) + args: [--check, --diff] + stages: [manual] From 892da6a6a268d7caf5f7a6f1439db1f28bb8c529 Mon Sep 17 00:00:00 2001 From: C-Achard Date: Fri, 20 Feb 2026 10:01:24 +0100 Subject: [PATCH 27/60] Run pre-commit on PR changed files only Switch workflow to run pre-commit only for pull requests and only on files changed in the PR. Adds a detect_changes job to fetch the base ref and output changed file list, then a conditional precommit job that runs only when there are changed files. Updates checkout and setup actions (actions/checkout@v4, setup-python@v5), bumps Python to 3.12, installs pre-commit, and runs pre-commit in a CI check-only mode on the changed files to speed up CI and avoid running on the whole repo. --- .github/workflows/format.yml | 58 +++++++++++++++++++++++++++++------- 1 file changed, 48 insertions(+), 10 deletions(-) diff --git a/.github/workflows/format.yml b/.github/workflows/format.yml index 9858a21..95450e5 100644 --- a/.github/workflows/format.yml +++ b/.github/workflows/format.yml @@ -1,23 +1,61 @@ -name: pre-commit-format +name: pre-commit (PR only on changed files) on: - push: - branches: [main] pull_request: - branches: [main] + types: [opened, synchronize, reopened] jobs: - pre_commit_checks: + detect_changes: runs-on: ubuntu-latest + outputs: + changed: ${{ steps.changed_files.outputs.changed }} + steps: - - uses: actions/checkout@v6 + - name: Checkout full history + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Detect changed files + id: changed_files + run: | + git fetch origin ${{ github.base_ref }} + CHANGED_FILES=$(git diff --name-only origin/${{ github.base_ref }}...HEAD) + + { + echo "changed<> "$GITHUB_OUTPUT" + + - name: Show changed files + run: | + echo "Changed files:" + echo "${{ steps.changed_files.outputs.changed }}" + + precommit: + needs: detect_changes + runs-on: ubuntu-latest + if: ${{ needs.detect_changes.outputs.changed != '' }} + + steps: + - name: Checkout PR branch + uses: actions/checkout@v4 with: fetch-depth: 0 ref: ${{ github.head_ref }} - - uses: actions/setup-python@v6 + - name: Set up Python + uses: actions/setup-python@v5 with: - python-version: '3.10' + python-version: "3.12" + + - name: Install pre-commit + run: pip install pre-commit - - run: pip install pre-commit - - run: pre-commit run --all-files + - name: Run pre-commit (CI check-only stage) on changed files + env: + CHANGED_FILES: ${{ needs.detect_changes.outputs.changed }} + run: | + mapfile -t files <<< "$CHANGED_FILES" + pre-commit run --hook-stage manual --files "${files[@]}" --show-diff-on-failure From 5dd22baa3fc609f4df548e6a83cf2c1577abbf6c Mon Sep 17 00:00:00 2001 From: Cyril Achard Date: Fri, 20 Feb 2026 18:12:00 +0100 Subject: [PATCH 28/60] Merge pull request #53 from DeepLabCut/cy/lint-config Add standardized linting workflow and config From 3dfb1deedba15e147a37fa01dd672551598af53f Mon Sep 17 00:00:00 2001 From: C-Achard Date: Sun, 22 Feb 2026 10:00:21 +0100 Subject: [PATCH 29/60] Include 'master' branch in CI triggers Update .github/workflows/ci.yml to also trigger CI on the 'master' branch for both push and pull_request events. This ensures CI runs for repositories or workflows that still use 'master' alongside 'main'. --- .github/workflows/testing-ci.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/testing-ci.yml b/.github/workflows/testing-ci.yml index a1b29ed..27122c3 100644 --- a/.github/workflows/testing-ci.yml +++ b/.github/workflows/testing-ci.yml @@ -4,9 +4,11 @@ on: pull_request: branches: - main + - master push: branches: - main + - master jobs: unit: From e066d9bf739e0c7492c5a3915db7eb3a51a8bdc4 Mon Sep 17 00:00:00 2001 From: C-Achard Date: Sun, 22 Feb 2026 10:16:28 +0100 Subject: [PATCH 30/60] Bump actions versions and refine CI triggers Upgrade GitHub Action usages in workflows: actions/checkout bumped to v6 and actions/setup-python to v6 in format.yml. Simplify testing-ci.yml triggers by replacing branch filters with pull_request types (opened, synchronize, reopened) and remove the explicit push/branches section. Also remove the Codecov token entry (relying on tokenless or other auth), and keep codecov action configuration intact. These changes modernize action versions and streamline CI trigger/auth configuration. --- .github/workflows/format.yml | 6 +++--- .github/workflows/testing-ci.yml | 9 +-------- 2 files changed, 4 insertions(+), 11 deletions(-) diff --git a/.github/workflows/format.yml b/.github/workflows/format.yml index 95450e5..244986e 100644 --- a/.github/workflows/format.yml +++ b/.github/workflows/format.yml @@ -12,7 +12,7 @@ jobs: steps: - name: Checkout full history - uses: actions/checkout@v4 + uses: actions/checkout@v6 with: fetch-depth: 0 @@ -40,13 +40,13 @@ jobs: steps: - name: Checkout PR branch - uses: actions/checkout@v4 + uses: actions/checkout@v6 with: fetch-depth: 0 ref: ${{ github.head_ref }} - name: Set up Python - uses: actions/setup-python@v5 + uses: actions/setup-python@v6 with: python-version: "3.12" diff --git a/.github/workflows/testing-ci.yml b/.github/workflows/testing-ci.yml index 27122c3..1c2f91e 100644 --- a/.github/workflows/testing-ci.yml +++ b/.github/workflows/testing-ci.yml @@ -2,13 +2,7 @@ name: CI on: pull_request: - branches: - - main - - master - push: - branches: - - main - - master + types: [opened, synchronize, reopened] jobs: unit: @@ -63,5 +57,4 @@ jobs: uses: codecov/codecov-action@v5 with: files: ./coverage.xml - token: ${{ secrets.CODECOV_TOKEN }} fail_ci_if_error: true From fa49240e2729d3922787a6669b20c5fa1a282fda Mon Sep 17 00:00:00 2001 From: C-Achard Date: Sun, 22 Feb 2026 10:23:57 +0100 Subject: [PATCH 31/60] Trigger CI From 03dcb8e7403d6b302ef92c568a8ec3d8a9b5cb74 Mon Sep 17 00:00:00 2001 From: C-Achard Date: Sun, 22 Feb 2026 11:37:17 +0100 Subject: [PATCH 32/60] Trigger CI 2: Electric Boogaloo From 2784bd5395deafeebf5c449fcd1949f76a25a7d0 Mon Sep 17 00:00:00 2001 From: C-Achard Date: Fri, 20 Feb 2026 11:08:51 +0100 Subject: [PATCH 33/60] Remove module-level debug logging overrides Remove explicit logger.setLevel / basicConfig calls that force debug/info levels in several modules so the application-wide logging configuration can control log levels (preparing for release). Affected files: dlclivegui/cameras/backends/opencv_backend.py, dlclivegui/gui/camera_config/camera_config_dialog.py, dlclivegui/gui/camera_config/loaders.py, dlclivegui/gui/main_window.py, dlclivegui/processors/dlc_processor_socket.py. This avoids forcing verbose output and prevents accidental global logging reconfiguration. --- dlclivegui/cameras/backends/opencv_backend.py | 1 - dlclivegui/gui/camera_config/camera_config_dialog.py | 1 - dlclivegui/gui/camera_config/loaders.py | 1 - dlclivegui/gui/main_window.py | 2 -- dlclivegui/processors/dlc_processor_socket.py | 1 - 5 files changed, 6 deletions(-) diff --git a/dlclivegui/cameras/backends/opencv_backend.py b/dlclivegui/cameras/backends/opencv_backend.py index 5a742dc..d32736d 100644 --- a/dlclivegui/cameras/backends/opencv_backend.py +++ b/dlclivegui/cameras/backends/opencv_backend.py @@ -25,7 +25,6 @@ ) logger = logging.getLogger(__name__) -logger.setLevel(logging.DEBUG) # FIXME @C-Achard remove before release if TYPE_CHECKING: from dlclivegui.config import CameraSettings diff --git a/dlclivegui/gui/camera_config/camera_config_dialog.py b/dlclivegui/gui/camera_config/camera_config_dialog.py index aba582b..ac9ac88 100644 --- a/dlclivegui/gui/camera_config/camera_config_dialog.py +++ b/dlclivegui/gui/camera_config/camera_config_dialog.py @@ -24,7 +24,6 @@ from .ui_blocks import setup_camera_config_dialog_ui LOGGER = logging.getLogger(__name__) -LOGGER.setLevel(logging.DEBUG) # TODO @C-Achard remove for release class CameraConfigDialog(QDialog): diff --git a/dlclivegui/gui/camera_config/loaders.py b/dlclivegui/gui/camera_config/loaders.py index 6305b8d..5cc7ed8 100644 --- a/dlclivegui/gui/camera_config/loaders.py +++ b/dlclivegui/gui/camera_config/loaders.py @@ -11,7 +11,6 @@ from ...cameras.factory import CameraBackend, CameraFactory LOGGER = logging.getLogger(__name__) -LOGGER.setLevel(logging.DEBUG) # ------------------------------- diff --git a/dlclivegui/gui/main_window.py b/dlclivegui/gui/main_window.py index 9f738a6..6aab34f 100644 --- a/dlclivegui/gui/main_window.py +++ b/dlclivegui/gui/main_window.py @@ -82,8 +82,6 @@ from .recording_manager import RecordingManager from .theme import LOGO, LOGO_ALPHA, AppStyle, apply_theme -# logging.basicConfig(level=logging.INFO) -logging.basicConfig(level=logging.DEBUG) # FIXME @C-Achard set back to INFO for release logger = logging.getLogger("DLCLiveGUI") diff --git a/dlclivegui/processors/dlc_processor_socket.py b/dlclivegui/processors/dlc_processor_socket.py index 0974002..d999690 100644 --- a/dlclivegui/processors/dlc_processor_socket.py +++ b/dlclivegui/processors/dlc_processor_socket.py @@ -17,7 +17,6 @@ from dlclive import Processor # type: ignore logger = logging.getLogger("dlc_processor_socket") -logger.setLevel(logging.INFO) # Avoid duplicate handlers if module is imported multiple times if not any(isinstance(h, logging.StreamHandler) for h in logger.handlers): From 9fd192ec5ea9e204056ffd32ea3b1fa5d0b50e0e Mon Sep 17 00:00:00 2001 From: C-Achard Date: Fri, 20 Feb 2026 11:14:17 +0100 Subject: [PATCH 34/60] Reduce log verbosity and remove auto-load message Lowered several log statements from INFO to DEBUG in the OpenCV camera backend and camera config preview to reduce noisy logs during normal operation. Removed the deprecated auto-loaded myconfig.json statusBar message in the main window. Also clarified a closeEvent comment and updated a test comment to mark a flakiness note as addressed. --- dlclivegui/cameras/backends/opencv_backend.py | 6 +++--- dlclivegui/gui/camera_config/camera_config_dialog.py | 2 +- dlclivegui/gui/main_window.py | 7 +------ tests/services/test_dlc_processor.py | 3 ++- 4 files changed, 7 insertions(+), 11 deletions(-) diff --git a/dlclivegui/cameras/backends/opencv_backend.py b/dlclivegui/cameras/backends/opencv_backend.py index d32736d..74fdede 100644 --- a/dlclivegui/cameras/backends/opencv_backend.py +++ b/dlclivegui/cameras/backends/opencv_backend.py @@ -168,7 +168,7 @@ def open(self) -> None: ns["device_pid"] = int(chosen.pid) if chosen.name: ns["device_name"] = chosen.name - logger.info("Persisted OpenCV device_id=%s", chosen.stable_id) + logger.debug("Persisted OpenCV device_id=%s", chosen.stable_id) self._capture, spec = open_with_fallbacks(index, backend_flag) @@ -398,7 +398,7 @@ def _configure_capture(self) -> None: self._actual_fps = float(self._capture.get(cv2.CAP_PROP_FPS) or 0.0) # For clarity in logs - logger.info("Resolution requested=Auto, actual=%sx%s", self._actual_width, self._actual_height) + logger.debug("Resolution requested=Auto, actual=%sx%s", self._actual_width, self._actual_height) elif not self._fast_start: # Verified, robust path (tries candidates + verifies) @@ -431,7 +431,7 @@ def _configure_capture(self) -> None: if (self._actual_width or 0) > 0 and (self._actual_height or 0) > 0: actual_res = (int(self._actual_width), int(self._actual_height)) - logger.info( + logger.debug( "Resolution requested=%s, actual=%s", f"{req_w}x{req_h}" if (req_w > 0 and req_h > 0) else "Auto", f"{actual_res[0]}x{actual_res[1]}" if actual_res else "unknown", diff --git a/dlclivegui/gui/camera_config/camera_config_dialog.py b/dlclivegui/gui/camera_config/camera_config_dialog.py index ac9ac88..2a6963e 100644 --- a/dlclivegui/gui/camera_config/camera_config_dialog.py +++ b/dlclivegui/gui/camera_config/camera_config_dialog.py @@ -1393,7 +1393,7 @@ def _execute_pending_restart(self, *, reason: str) -> None: if not cam: return - LOGGER.info("[Preview] executing restart reason=%s", reason) + LOGGER.debug("[Preview] executing restart reason=%s", reason) self._begin_preview_load(cam, reason="restart") def _cancel_loading(self) -> None: diff --git a/dlclivegui/gui/main_window.py b/dlclivegui/gui/main_window.py index 6aab34f..5522f9e 100644 --- a/dlclivegui/gui/main_window.py +++ b/dlclivegui/gui/main_window.py @@ -194,11 +194,6 @@ def __init__(self, config: ApplicationSettings | None = None): self._display_timer.timeout.connect(self._update_display_from_pending) self._display_timer.start() - # Show status message if myconfig.json was loaded - # FIXME @C-Achard deprecated behavior, remove later - if self._config_path and self._config_path.name == "myconfig.json": - self.statusBar().showMessage(f"Auto-loaded configuration from {self._config_path}", 5000) - # Validate cameras from loaded config (deferred to allow window to show first) # NOTE IMPORTANT (tests/CI): This is scheduled via a QTimer and may fire during pytest-qt teardown. QTimer.singleShot(100, self._validate_configured_cameras) @@ -208,7 +203,7 @@ def __init__(self, config: ApplicationSettings | None = None): # Mitigations for tests/CI: # - Disable this timer by monkeypatching _validate_configured_cameras in GUI tests # - OR monkeypatch/override _show_warning/_show_error to no-op in GUI tests (easiest) - # - OR use a cancellable QTimer attribute and stop() it in closeEven + # - OR use a cancellable QTimer attribute and stop() it in closeEvent def resizeEvent(self, event): super().resizeEvent(event) diff --git a/tests/services/test_dlc_processor.py b/tests/services/test_dlc_processor.py index 2ae5e3a..d90781b 100644 --- a/tests/services/test_dlc_processor.py +++ b/tests/services/test_dlc_processor.py @@ -63,9 +63,10 @@ def test_worker_processes_frames(qtbot, monkeypatch_dlclive, settings_model): proc.enqueue_frame(frame, timestamp=2.0 + i) qtbot.wait(5) # ms - # FIXME @C-Achard this still fails randomly + # NOTE @C-Achard this still fails randomly # the timeout has to be surprisingly large here # not sure if it's qtbot or threading scheduling delays + # Should be fixed now. qtbot.waitUntil(lambda: proc.get_stats().frames_processed >= 3, timeout=3000) finally: From 0daebba72ac78e9f336b42ce973462765d1f7f1a Mon Sep 17 00:00:00 2001 From: C-Achard Date: Fri, 20 Feb 2026 11:35:08 +0100 Subject: [PATCH 35/60] Infer model backend from selected model path Replace the hardcoded 'pytorch' model_type with a value inferred from the selected model file. main_window.py now reads the model_path into a local variable and uses DLCLiveProcessor.get_model_backend(model_path) when building DLCProcessorSettings. dlc_processor.py imports Engine from dlclive and adds a static get_model_backend method that returns Engine.from_model_path(model_path).value. Preserves existing config fields and error handling. --- dlclivegui/gui/main_window.py | 5 +++-- dlclivegui/services/dlc_processor.py | 9 ++++++++- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/dlclivegui/gui/main_window.py b/dlclivegui/gui/main_window.py index 5522f9e..f777023 100644 --- a/dlclivegui/gui/main_window.py +++ b/dlclivegui/gui/main_window.py @@ -875,14 +875,15 @@ def _parse_json(self, value: str) -> dict: return json.loads(text) def _dlc_settings_from_ui(self) -> DLCProcessorSettings: + model_path = self.model_path_edit.text().strip() return DLCProcessorSettings( - model_path=self.model_path_edit.text().strip(), + model_path=model_path, model_directory=self._config.dlc.model_directory, # Preserve from config device=self._config.dlc.device, # Preserve from config dynamic=self._config.dlc.dynamic, # Preserve from config resize=self._config.dlc.resize, # Preserve from config precision=self._config.dlc.precision, # Preserve from config - model_type="pytorch", # FIXME @C-Achard hardcoded for now, we should allow tf models too + model_type=DLCLiveProcessor.get_model_backend(model_path), # additional_options=self._parse_json(self.additional_options_edit.toPlainText()), ) diff --git a/dlclivegui/services/dlc_processor.py b/dlclivegui/services/dlc_processor.py index 052c952..7d35c2e 100644 --- a/dlclivegui/services/dlc_processor.py +++ b/dlclivegui/services/dlc_processor.py @@ -26,7 +26,10 @@ ENABLE_PROFILING = True try: # pragma: no cover - optional dependency - from dlclive import DLCLive # type: ignore + from dlclive import ( + DLCLive, # type: ignore + Engine, # type: ignore + ) except Exception as e: # pragma: no cover - handled gracefully logger.error(f"dlclive package could not be imported: {e}") DLCLive = None # type: ignore[assignment] @@ -96,6 +99,10 @@ def __init__(self) -> None: self._gpu_inference_times: deque[float] = deque(maxlen=60) self._processor_overhead_times: deque[float] = deque(maxlen=60) + @staticmethod + def get_model_backend(model_path: str) -> str: + return Engine.from_model_path(model_path).value + def configure(self, settings: DLCProcessorSettings, processor: Any | None = None) -> None: self._settings = settings self._processor = processor From cfd67c729fc4603046ac165e31884dd998385273 Mon Sep 17 00:00:00 2001 From: Cyril Achard Date: Fri, 20 Feb 2026 16:13:47 +0100 Subject: [PATCH 36/60] Add temporary Engine enum and use in dlc_processor Introduce a temporary dlclivegui.temp package with an Engine enum to detect model engines. engine.py provides Engine.TENSORFLOW and Engine.PYTORCH plus helpers from_model_type and from_model_path (checks extensions/pose_cfg.yaml and existence). Add __init__.py and update dlclivegui/services/dlc_processor.py to import Engine from dlclivegui.temp (with a TODO to switch to the upstream dlclive package when available). --- dlclivegui/services/dlc_processor.py | 2 +- dlclivegui/temp/__init__.py | 0 dlclivegui/temp/engine.py | 33 ++++++++++++++++++++++++++++ 3 files changed, 34 insertions(+), 1 deletion(-) create mode 100644 dlclivegui/temp/__init__.py create mode 100644 dlclivegui/temp/engine.py diff --git a/dlclivegui/services/dlc_processor.py b/dlclivegui/services/dlc_processor.py index 7d35c2e..f0db46e 100644 --- a/dlclivegui/services/dlc_processor.py +++ b/dlclivegui/services/dlc_processor.py @@ -28,8 +28,8 @@ try: # pragma: no cover - optional dependency from dlclive import ( DLCLive, # type: ignore - Engine, # type: ignore ) + from dlclivegui.temp import Engine # type: ignore # TODO use main package one when released except Exception as e: # pragma: no cover - handled gracefully logger.error(f"dlclive package could not be imported: {e}") DLCLive = None # type: ignore[assignment] diff --git a/dlclivegui/temp/__init__.py b/dlclivegui/temp/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/dlclivegui/temp/engine.py b/dlclivegui/temp/engine.py new file mode 100644 index 0000000..eed0af1 --- /dev/null +++ b/dlclivegui/temp/engine.py @@ -0,0 +1,33 @@ +from enum import Enum +from pathlib import Path + +class Engine(Enum): + TENSORFLOW = "tensorflow" + PYTORCH = "pytorch" + + @classmethod + def from_model_type(cls, model_type: str) -> "Engine": + if model_type.lower() == "pytorch": + return cls.PYTORCH + elif model_type.lower() in ("tensorflow", "base", "tensorrt", "lite"): + return cls.TENSORFLOW + else: + raise ValueError(f"Unknown model type: {model_type}") + + @classmethod + def from_model_path(cls, model_path: str | Path) -> "Engine": + path = Path(model_path) + + if not path.exists(): + raise FileNotFoundError(f"Model path does not exist: {model_path}") + + if path.is_dir(): + has_cfg = (path / "pose_cfg.yaml").is_file() + has_pb = any(p.suffix == ".pb" for p in path.glob("*.pb")) + if has_cfg and has_pb: + return cls.TENSORFLOW + elif path.is_file(): + if path.suffix == ".pt": + return cls.PYTORCH + + raise ValueError(f"Could not determine engine from model path: {model_path}") \ No newline at end of file From b6fd33690add786daf5d1383478bf088f8c39d90 Mon Sep 17 00:00:00 2001 From: C-Achard Date: Fri, 20 Feb 2026 18:08:16 +0100 Subject: [PATCH 37/60] Use Engine enum and improve model detection Introduce a temporary Engine enum and expose it via dlclivegui.temp to standardize model engine types. Update DLCProcessorSettings to use the Engine enum for model_type. Improve model backend detection in the main window by calling DLCLiveProcessor.get_model_backend and raising a clear error if detection fails; tighten file dialog filters to focus on PyTorch models. Enhance engine detection to recognize .pth files. Minor import cleanup in dlc_processor and a clarifying test comment about timeouts to reduce flakiness in CI. --- dlclivegui/config.py | 4 +++- dlclivegui/gui/main_window.py | 12 ++++++++++-- dlclivegui/services/dlc_processor.py | 2 +- dlclivegui/temp/__init__.py | 3 +++ dlclivegui/temp/engine.py | 5 +++-- tests/services/test_dlc_processor.py | 8 ++++---- 6 files changed, 24 insertions(+), 10 deletions(-) diff --git a/dlclivegui/config.py b/dlclivegui/config.py index 645371c..5a32e49 100644 --- a/dlclivegui/config.py +++ b/dlclivegui/config.py @@ -7,6 +7,8 @@ from pydantic import BaseModel, Field, field_validator, model_validator +from dlclivegui.temp import Engine + Rotation = Literal[0, 90, 180, 270] TileLayout = Literal["auto", "2x2", "1x4", "4x1"] Precision = Literal["FP32", "FP16"] @@ -239,7 +241,7 @@ class DLCProcessorSettings(BaseModel): resize: float = Field(default=1.0, gt=0) precision: Precision = "FP32" additional_options: dict[str, Any] = Field(default_factory=dict) - model_type: Literal["pytorch"] = "pytorch" + model_type: Engine = "pytorch" single_animal: bool = True @field_validator("dynamic", mode="before") diff --git a/dlclivegui/gui/main_window.py b/dlclivegui/gui/main_window.py index f777023..10b5b8f 100644 --- a/dlclivegui/gui/main_window.py +++ b/dlclivegui/gui/main_window.py @@ -876,6 +876,14 @@ def _parse_json(self, value: str) -> dict: def _dlc_settings_from_ui(self) -> DLCProcessorSettings: model_path = self.model_path_edit.text().strip() + try: + DLCLiveProcessor.get_model_backend(model_path) + except Exception as e: + raise RuntimeError( + "Could not determine model backend from path." + "Please ensure the model file is valid and has an appropriate extension " + "(.pt, .pth for PyTorch or model directory for TensorFlow)." + ) from e return DLCProcessorSettings( model_path=model_path, model_directory=self._config.dlc.model_directory, # Preserve from config @@ -969,9 +977,9 @@ def _action_browse_model(self) -> None: dlg.setFileMode(QFileDialog.FileMode.ExistingFile) dlg.setNameFilters( [ - "Model files (*.pt *.pth *.pb)", + "Model files (*.pt *.pth)", "PyTorch models (*.pt *.pth)", - "TensorFlow models (*.pb)", + # "TensorFlow models (*.pb)", "All files (*.*)", ] ) diff --git a/dlclivegui/services/dlc_processor.py b/dlclivegui/services/dlc_processor.py index f0db46e..67c8c6e 100644 --- a/dlclivegui/services/dlc_processor.py +++ b/dlclivegui/services/dlc_processor.py @@ -19,6 +19,7 @@ # from dlclivegui.config import DLCProcessorSettings from dlclivegui.processors.processor_utils import instantiate_from_scan +from dlclivegui.temp import Engine # type: ignore # TODO use main package enum when released logger = logging.getLogger(__name__) @@ -29,7 +30,6 @@ from dlclive import ( DLCLive, # type: ignore ) - from dlclivegui.temp import Engine # type: ignore # TODO use main package one when released except Exception as e: # pragma: no cover - handled gracefully logger.error(f"dlclive package could not be imported: {e}") DLCLive = None # type: ignore[assignment] diff --git a/dlclivegui/temp/__init__.py b/dlclivegui/temp/__init__.py index e69de29..320867e 100644 --- a/dlclivegui/temp/__init__.py +++ b/dlclivegui/temp/__init__.py @@ -0,0 +1,3 @@ +from engine import Engine # type: ignore + +__all__ = ["Engine"] diff --git a/dlclivegui/temp/engine.py b/dlclivegui/temp/engine.py index eed0af1..47dfefe 100644 --- a/dlclivegui/temp/engine.py +++ b/dlclivegui/temp/engine.py @@ -1,6 +1,7 @@ from enum import Enum from pathlib import Path + class Engine(Enum): TENSORFLOW = "tensorflow" PYTORCH = "pytorch" @@ -27,7 +28,7 @@ def from_model_path(cls, model_path: str | Path) -> "Engine": if has_cfg and has_pb: return cls.TENSORFLOW elif path.is_file(): - if path.suffix == ".pt": + if path.suffix in (".pt", ".pth"): return cls.PYTORCH - raise ValueError(f"Could not determine engine from model path: {model_path}") \ No newline at end of file + raise ValueError(f"Could not determine engine from model path: {model_path}") diff --git a/tests/services/test_dlc_processor.py b/tests/services/test_dlc_processor.py index d90781b..3f5e0cb 100644 --- a/tests/services/test_dlc_processor.py +++ b/tests/services/test_dlc_processor.py @@ -63,10 +63,10 @@ def test_worker_processes_frames(qtbot, monkeypatch_dlclive, settings_model): proc.enqueue_frame(frame, timestamp=2.0 + i) qtbot.wait(5) # ms - # NOTE @C-Achard this still fails randomly - # the timeout has to be surprisingly large here - # not sure if it's qtbot or threading scheduling delays - # Should be fixed now. + # NOTE @C-Achard The timeout here is intentionally large to account for potential + # Qt event-loop and threading scheduling delays in CI environments. + # This was previously flaky with a smaller timeout; increasing it should + # keep the test stable. qtbot.waitUntil(lambda: proc.get_stats().frames_processed >= 3, timeout=3000) finally: From 58f6110bfcab9d24346254eded290d37a4ba2f94 Mon Sep 17 00:00:00 2001 From: Cyril Achard Date: Mon, 23 Feb 2026 08:45:21 +0100 Subject: [PATCH 38/60] Use Engine enum and validate model path Switch DLCProcessorSettings.model_type default from a raw string to the Engine enum (Engine.PYTORCH) and change DLCLiveProcessor.get_model_backend to return an Engine instead of a string for stronger typing. Update the GUI to validate empty model path input, reuse the resolved model backend (model_bknd) when building settings, improve the backend-detection error message, and allow selecting any file/directory in the model file dialog (adjusted name filter label for TensorFlow model directories). Also fix a relative import in dlclivegui.temp.__init__.py. Note: this changes the get_model_backend return type (callers expecting a string must use .value if needed). --- dlclivegui/config.py | 2 +- dlclivegui/gui/main_window.py | 12 +++++++----- dlclivegui/services/dlc_processor.py | 4 ++-- dlclivegui/temp/__init__.py | 2 +- 4 files changed, 11 insertions(+), 9 deletions(-) diff --git a/dlclivegui/config.py b/dlclivegui/config.py index 5a32e49..b244e44 100644 --- a/dlclivegui/config.py +++ b/dlclivegui/config.py @@ -241,7 +241,7 @@ class DLCProcessorSettings(BaseModel): resize: float = Field(default=1.0, gt=0) precision: Precision = "FP32" additional_options: dict[str, Any] = Field(default_factory=dict) - model_type: Engine = "pytorch" + model_type: Engine = Engine.PYTORCH single_animal: bool = True @field_validator("dynamic", mode="before") diff --git a/dlclivegui/gui/main_window.py b/dlclivegui/gui/main_window.py index 10b5b8f..2cafa77 100644 --- a/dlclivegui/gui/main_window.py +++ b/dlclivegui/gui/main_window.py @@ -876,11 +876,13 @@ def _parse_json(self, value: str) -> dict: def _dlc_settings_from_ui(self) -> DLCProcessorSettings: model_path = self.model_path_edit.text().strip() + if model_path == "": + raise ValueError("Model path cannot be empty. Please enter a valid path to a DLCLive model file.") try: - DLCLiveProcessor.get_model_backend(model_path) + model_bknd = DLCLiveProcessor.get_model_backend(model_path) except Exception as e: raise RuntimeError( - "Could not determine model backend from path." + "Could not determine model backend from path. " "Please ensure the model file is valid and has an appropriate extension " "(.pt, .pth for PyTorch or model directory for TensorFlow)." ) from e @@ -891,7 +893,7 @@ def _dlc_settings_from_ui(self) -> DLCProcessorSettings: dynamic=self._config.dlc.dynamic, # Preserve from config resize=self._config.dlc.resize, # Preserve from config precision=self._config.dlc.precision, # Preserve from config - model_type=DLCLiveProcessor.get_model_backend(model_path), + model_type=model_bknd, # additional_options=self._parse_json(self.additional_options_edit.toPlainText()), ) @@ -974,13 +976,13 @@ def _action_browse_model(self) -> None: preselect = self._model_path_store.suggest_selected_file() dlg = QFileDialog(self, "Select DLCLive model file") - dlg.setFileMode(QFileDialog.FileMode.ExistingFile) + dlg.setFileMode(QFileDialog.FileMode.AnyFile) dlg.setNameFilters( [ "Model files (*.pt *.pth)", "PyTorch models (*.pt *.pth)", # "TensorFlow models (*.pb)", - "All files (*.*)", + "TensorFlow model directory (*.*)", ] ) dlg.setDirectory(start_dir) diff --git a/dlclivegui/services/dlc_processor.py b/dlclivegui/services/dlc_processor.py index 67c8c6e..41cbbbd 100644 --- a/dlclivegui/services/dlc_processor.py +++ b/dlclivegui/services/dlc_processor.py @@ -100,8 +100,8 @@ def __init__(self) -> None: self._processor_overhead_times: deque[float] = deque(maxlen=60) @staticmethod - def get_model_backend(model_path: str) -> str: - return Engine.from_model_path(model_path).value + def get_model_backend(model_path: str) -> Engine: + return Engine.from_model_path(model_path) def configure(self, settings: DLCProcessorSettings, processor: Any | None = None) -> None: self._settings = settings diff --git a/dlclivegui/temp/__init__.py b/dlclivegui/temp/__init__.py index 320867e..bdc8466 100644 --- a/dlclivegui/temp/__init__.py +++ b/dlclivegui/temp/__init__.py @@ -1,3 +1,3 @@ -from engine import Engine # type: ignore +from .engine import Engine # type: ignore __all__ = ["Engine"] From f495649a6d05d396ff1b9bdcd7e44a72b83f1992 Mon Sep 17 00:00:00 2001 From: Cyril Achard Date: Mon, 23 Feb 2026 09:37:41 +0100 Subject: [PATCH 39/60] Update GUI tests and adjust tox config Adapt tests to recent refactors and improve CI reliability: - tests/gui/test_ascii_art.py: use LOGO_ALPHA instead of ASCII_IMAGE_PATH, switch color arg from "always" to "auto", and relax the non-TTY assertion to check for the description substring. - tests/gui/test_main.py: make test_start_inference_emits_pose use tmp_path to create a dummy_model.pt file and set the model path to that file so the existence check passes. - tox.ini: set QT_OPENGL=software to reduce OpenGL/CI flakiness, and comment out the [testenv:lint] section (also remove lint from the py312 env mapping). These changes fix tests broken by the logo variable rename and improve test stability in CI environments. --- tests/gui/test_ascii_art.py | 10 +++++----- tests/gui/test_main.py | 6 ++++-- tox.ini | 19 ++++++++++--------- 3 files changed, 19 insertions(+), 16 deletions(-) diff --git a/tests/gui/test_ascii_art.py b/tests/gui/test_ascii_art.py index 50d702c..2e5800e 100644 --- a/tests/gui/test_ascii_art.py +++ b/tests/gui/test_ascii_art.py @@ -265,14 +265,14 @@ def test_print_ascii_writes_file(tmp_png_gray, force_tty, tmp_path): def test_build_help_description_tty(tmp_png_bgra_logo, monkeypatch, force_tty): - monkeypatch.setattr(ascii_mod, "ASCII_IMAGE_PATH", Path(tmp_png_bgra_logo)) - desc = ascii_mod.build_help_description(static_banner=None, color="always", min_width=60) + monkeypatch.setattr(ascii_mod, "LOGO_ALPHA", Path(tmp_png_bgra_logo)) + desc = ascii_mod.build_help_description(static_banner=None, color="auto", min_width=60) assert "DeepLabCut-Live GUI" in desc assert "\x1b[36m" in desc # cyan wrapper now present since TTY is mocked correctly def test_build_help_description_notty(tmp_png_bgra_logo, monkeypatch, force_notty): - monkeypatch.setattr(ascii_mod, "ASCII_IMAGE_PATH", Path(tmp_png_bgra_logo)) - desc = ascii_mod.build_help_description(static_banner=None, color="always", min_width=60) + monkeypatch.setattr(ascii_mod, "LOGO_ALPHA", Path(tmp_png_bgra_logo)) + desc = ascii_mod.build_help_description(static_banner=None, color="auto", min_width=60) # Not a TTY -> no banner, just the plain description - assert desc.strip() == "DeepLabCut-Live GUI — launch the graphical interface." + assert "DeepLabCut-Live GUI — launch the graphical interface." in desc diff --git a/tests/gui/test_main.py b/tests/gui/test_main.py index 83d4bcc..b9ed7da 100644 --- a/tests/gui/test_main.py +++ b/tests/gui/test_main.py @@ -42,7 +42,7 @@ def test_preview_renders_frames(qtbot, window, multi_camera_controller): @pytest.mark.gui @pytest.mark.functional -def test_start_inference_emits_pose(qtbot, window, multi_camera_controller, dlc_processor): +def test_start_inference_emits_pose(qtbot, window, multi_camera_controller, dlc_processor, tmp_path): """ Validate that: - Preview is running @@ -67,7 +67,9 @@ def test_start_inference_emits_pose(qtbot, window, multi_camera_controller, dlc_ timeout=6000, ) - w.model_path_edit.setText("dummy_model.pt") + model_weights = tmp_path / "dummy_model.pt" + model_weights.touch() # create an empty file to satisfy existence check + w.model_path_edit.setText(str(model_weights)) pose_count = [0] def _on_pose(result): diff --git a/tox.ini b/tox.ini index 3ac6e62..789c111 100644 --- a/tox.ini +++ b/tox.ini @@ -20,6 +20,7 @@ commands = setenv = PYTHONWARNINGS = default QT_QPA_PLATFORM = offscreen + QT_OPENGL = software # Can help avoid some Windows/OpenCV capture backend flakiness when tests touch video I/O: OPENCV_VIDEOIO_PRIORITY_MSMF = 0 @@ -31,14 +32,14 @@ passenv = WAYLAND_DISPLAY XDG_RUNTIME_DIR -[testenv:lint] -description = Ruff linting/format checks (matches pyproject.toml config) -skip_install = true -deps = - ruff -commands = - ruff check . - ruff format --check . +; [testenv:lint] +; description = Ruff linting/format checks (matches pyproject.toml config) +; skip_install = true +; deps = +; ruff +; commands = +; ruff check . +; ruff format --check . # Optional helper if you use tox-gh-actions to map GitHub's python-version to tox envs. # Requires: pip install tox-gh-actions @@ -46,4 +47,4 @@ commands = python = 3.10: py310 3.11: py311 - 3.12: py312, lint + 3.12: py312 ; , lint From f2c5fb609fcfe566b4f02b7a1948e920c48f4ce8 Mon Sep 17 00:00:00 2001 From: Cyril Achard Date: Mon, 23 Feb 2026 09:39:36 +0100 Subject: [PATCH 40/60] Remove GUI and multi-camera exports Stop importing and exporting GUI and multi-camera controller symbols at package level to avoid import-time side effects. Removed imports of CameraConfigDialog, DLCLiveMainWindow, MultiCameraController and MultiFrameData from dlclivegui.__init__ and removed those names from __all__. The top-level export list keeps "main" (callers should import GUI/controller types from their modules when needed). --- dlclivegui/__init__.py | 8 -------- 1 file changed, 8 deletions(-) diff --git a/dlclivegui/__init__.py b/dlclivegui/__init__.py index f302f5b..9d89e55 100644 --- a/dlclivegui/__init__.py +++ b/dlclivegui/__init__.py @@ -7,10 +7,6 @@ MultiCameraSettings, RecordingSettings, ) -from .gui.camera_config.camera_config_dialog import CameraConfigDialog -from .gui.main_window import DLCLiveMainWindow -from .main import main -from .services.multi_camera_controller import MultiCameraController, MultiFrameData __all__ = [ "ApplicationSettings", @@ -18,10 +14,6 @@ "DLCProcessorSettings", "MultiCameraSettings", "RecordingSettings", - "DLCLiveMainWindow", - "MultiCameraController", - "MultiFrameData", - "CameraConfigDialog", "main", ] __version__ = "2.0.0rc0" # PLACEHOLDER From d8da888b9f02c5992ec5da451945f06be1d9fb59 Mon Sep 17 00:00:00 2001 From: Cyril Achard Date: Mon, 23 Feb 2026 09:41:51 +0100 Subject: [PATCH 41/60] Update tox.ini --- tox.ini | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index 789c111..6a65ab1 100644 --- a/tox.ini +++ b/tox.ini @@ -47,4 +47,5 @@ passenv = python = 3.10: py310 3.11: py311 - 3.12: py312 ; , lint + 3.12: py312 + ; , lint From 94a4d04cf93cf56105c4b2525679c29d653968a1 Mon Sep 17 00:00:00 2001 From: Cyril Achard Date: Mon, 23 Feb 2026 10:19:54 +0100 Subject: [PATCH 42/60] Add Qt/OpenGL deps to CI and restrict Codecov Install Qt/OpenGL runtime dependencies on Ubuntu CI runners (libegl1, libgl1, libopengl0, libxkbcommon-x11-0, libxcb-cursor0) so tests that require OpenGL/Qt can run in the workflow. Also restrict the Codecov upload step to push events on refs/heads/main to avoid uploading coverage for other event types or branches. --- .github/workflows/testing-ci.yml | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/.github/workflows/testing-ci.yml b/.github/workflows/testing-ci.yml index 1c2f91e..f5a82d1 100644 --- a/.github/workflows/testing-ci.yml +++ b/.github/workflows/testing-ci.yml @@ -38,6 +38,17 @@ jobs: python -m pip install -U pip wheel python -m pip install -U tox tox-gh-actions + - name: Install Qt/OpenGL runtime deps (Ubuntu) + if: startsWith(matrix.os, 'ubuntu') + run: | + sudo apt-get update + sudo apt-get install -y \ + libegl1 \ + libgl1 \ + libopengl0 \ + libxkbcommon-x11-0 \ + libxcb-cursor0 + - name: Run tests (exclude hardware) with coverage via tox run: | tox -q @@ -54,6 +65,7 @@ jobs: echo '```' >> "$GITHUB_STEP_SUMMARY" - name: Upload coverage to Codecov + if: github.event_name == 'push' && github.ref == 'refs/heads/main' uses: codecov/codecov-action@v5 with: files: ./coverage.xml From eb255dbf3eae47c4ae89203271f93cc21712014e Mon Sep 17 00:00:00 2001 From: Cyril Achard Date: Mon, 23 Feb 2026 11:00:32 +0100 Subject: [PATCH 43/60] Refactor camera scan state and loaders Introduce explicit scan state and tidy worker APIs for camera discovery. - Add CameraScanState enum and use it as single source of truth in CameraConfigDialog (_scan_state). - Replace ad-hoc worker checks with _set_scan_state, _finish_scan and _cleanup_scan_worker to manage UI overlays, progress/cancel controls and stability guarantees. - Add request_scan_cancel() to request interruption and handle canceled/result flows; DetectCamerasWorker now emits result (even empty) and canceled when interrupted. - Simplify QThread usage: remove custom finished signals, add typing annotations and clearer run() signatures in loaders; adjust worker signals (canceled) and small API refinements (request_cancel return types). - Update UI hookup (ui_blocks) to call request_scan_cancel instead of private handler. - Update unit and e2e tests to follow new scan lifecycle (wait for scan_started/scan_finished, helper _run_scan_and_wait, and scan state assertions). Files changed: camera_config_dialog.py, loaders.py, ui_blocks.py and related tests to match the new scan lifecycle and worker behaviour. --- .../gui/camera_config/camera_config_dialog.py | 202 +++++++++++------- dlclivegui/gui/camera_config/loaders.py | 74 ++++--- dlclivegui/gui/camera_config/ui_blocks.py | 2 +- .../gui/camera_config/test_cam_dialog_e2e.py | 76 ++++--- .../gui/camera_config/test_cam_dialog_unit.py | 9 +- 5 files changed, 220 insertions(+), 143 deletions(-) diff --git a/dlclivegui/gui/camera_config/camera_config_dialog.py b/dlclivegui/gui/camera_config/camera_config_dialog.py index 2a6963e..7fb2f99 100644 --- a/dlclivegui/gui/camera_config/camera_config_dialog.py +++ b/dlclivegui/gui/camera_config/camera_config_dialog.py @@ -19,7 +19,7 @@ from ...cameras.factory import CameraFactory, DetectedCamera, apply_detected_identity, camera_identity_key from ...config import CameraSettings, MultiCameraSettings -from .loaders import CameraLoadWorker, CameraProbeWorker, DetectCamerasWorker +from .loaders import CameraLoadWorker, CameraProbeWorker, CameraScanState, DetectCamerasWorker from .preview import PreviewSession, PreviewState, apply_crop, apply_rotation, resize_to_fit, to_display_pixmap from .ui_blocks import setup_camera_config_dialog_ui @@ -64,6 +64,7 @@ def __init__( # Camera detection worker self._scan_worker: DetectCamerasWorker | None = None + self._scan_state: CameraScanState = CameraScanState.IDLE # UI elements for eventFilter (assigned in _setup_ui) self._settings_scroll: QScrollArea | None = None @@ -171,7 +172,8 @@ def _on_close_cleanup(self) -> None: pass # Keep this short to reduce UI freeze sw.wait(300) - self._scan_worker = None + self._set_scan_state(CameraScanState.IDLE) + self._cleanup_scan_worker() # Cancel probe worker pw = getattr(self, "_probe_worker", None) @@ -260,7 +262,7 @@ def _connect_signals(self) -> None: self.cancel_btn.clicked.connect(self.reject) self.scan_started.connect(lambda _: setattr(self, "_dialog_active", True)) self.scan_finished.connect(lambda: setattr(self, "_dialog_active", False)) - self.scan_cancel_btn.clicked.connect(self._on_scan_cancel) + self.scan_cancel_btn.clicked.connect(self.request_scan_cancel) def _mark_dirty(*_args): self.apply_settings_btn.setEnabled(True) @@ -312,29 +314,6 @@ def _update_button_states(self) -> None: available_row = self.available_cameras_list.currentRow() self.add_camera_btn.setEnabled(available_row >= 0 and not scan_running) - def _sync_scan_ui(self) -> None: - """ - Sync *scan-related* UI controls based on scan state. - - Conservative policy during scan: - - Allow editing/previewing already configured cameras (Active list) - - Disallow structural changes (add/remove/reorder) and available-list actions - """ - scanning = self._is_scan_running() - - # Discovery controls - self.backend_combo.setEnabled(not scanning) - self.refresh_btn.setEnabled(not scanning) - - # Available camera list + add flow is blocked during scan - self.available_cameras_list.setEnabled(not scanning) - self.add_camera_btn.setEnabled(False if scanning else (self.available_cameras_list.currentRow() >= 0)) - - # Scan cancel button visibility is already managed in your scan start/finish, - # but keeping enabled state here makes it robust. - if hasattr(self, "scan_cancel_btn"): - self.scan_cancel_btn.setEnabled(scanning) - def _sync_preview_ui(self) -> None: """Update buttons/overlays based on preview state only.""" st = self._preview.state @@ -478,93 +457,158 @@ def _on_backend_changed(self, _index: int) -> None: self._refresh_available_cameras() def _is_scan_running(self) -> bool: - return bool(self._scan_worker and self._scan_worker.isRunning()) + return self._scan_state in (CameraScanState.RUNNING, CameraScanState.CANCELING) + + def _set_scan_state(self, state: CameraScanState, message: str | None = None) -> None: + """Single source of truth for scan-related UI controls.""" + self._scan_state = state + + scanning = state in (CameraScanState.RUNNING, CameraScanState.CANCELING) + + # Overlay message + if scanning: + self._show_scan_overlay( + message or ("Canceling discovery…" if state == CameraScanState.CANCELING else "Discovering cameras…") + ) + else: + self._hide_scan_overlay() + + # Progress + cancel controls + self.scan_progress.setVisible(scanning) + if scanning: + self.scan_progress.setRange(0, 0) # indeterminate + self.scan_cancel_btn.setVisible(scanning) + self.scan_cancel_btn.setEnabled(state == CameraScanState.RUNNING) # disabled while canceling + + # Disable discovery inputs while scanning + self.backend_combo.setEnabled(not scanning) + self.refresh_btn.setEnabled(not scanning) + + # Available list + add flow blocked while scanning (structure edits disallowed) + self.available_cameras_list.setEnabled(not scanning) + self.add_camera_btn.setEnabled(False if scanning else (self.available_cameras_list.currentRow() >= 0)) + + self._update_button_states() + + def _cleanup_scan_worker(self) -> None: + # worker is truly finished now + w = self._scan_worker + self._scan_worker = None + if w is not None: + w.deleteLater() + + def _finish_scan(self, reason: str) -> None: + """Mark scan UX complete (idempotent) and emit scan_finished queued.""" + if self._scan_state in (CameraScanState.DONE, CameraScanState.IDLE): + return + + # Transition scan UX to DONE (UI controls restored) + self._set_scan_state(CameraScanState.DONE) + + QTimer.singleShot(0, self.scan_finished.emit) + + LOGGER.debug("[Scan] finished reason=%s", reason) def _refresh_available_cameras(self) -> None: """Refresh the list of available cameras asynchronously.""" - backend = self.backend_combo.currentData() - if not backend: - backend = self.backend_combo.currentText().split()[0] + backend = self.backend_combo.currentData() or self.backend_combo.currentText().split()[0] - # If already scanning, ignore new requests to avoid races - if getattr(self, "_scan_worker", None) and self._scan_worker.isRunning(): + if self._is_scan_running(): self._show_scan_overlay("Already discovering cameras…") return - # Reset list UI and show progress + # Reset UI/list self.available_cameras_list.clear() self._detected_cameras = [] - msg = f"Discovering {backend} cameras…" - self._show_scan_overlay(msg) - self.scan_progress.setRange(0, 0) - self.scan_progress.setVisible(True) - self.scan_cancel_btn.setVisible(True) - self.available_cameras_list.setEnabled(False) - self.add_camera_btn.setEnabled(False) - self.refresh_btn.setEnabled(False) - self.backend_combo.setEnabled(False) - - self._sync_scan_ui() - self._update_button_states() + + self._set_scan_state(CameraScanState.RUNNING, message=f"Discovering {backend} cameras…") # Start worker - self._scan_worker = DetectCamerasWorker(backend, max_devices=10, parent=self) - self._scan_worker.progress.connect(self._on_scan_progress) - self._scan_worker.result.connect(self._on_scan_result) - self._scan_worker.error.connect(self._on_scan_error) - self._scan_worker.finished.connect(self._on_scan_finished) + w = DetectCamerasWorker(backend, max_devices=10, parent=self) + self._scan_worker = w + + w.progress.connect(self._on_scan_progress) + w.result.connect(self._on_scan_result) + w.error.connect(self._on_scan_error) + w.canceled.connect(self._on_scan_canceled) + + # Cleanup only + w.finished.connect(self._cleanup_scan_worker) + self.scan_started.emit(f"Scanning {backend} cameras…") - self._scan_worker.start() + w.start() def _on_scan_progress(self, msg: str) -> None: + if self._scan_state not in (CameraScanState.RUNNING, CameraScanState.CANCELING): + return self._show_scan_overlay(msg or "Discovering cameras…") def _on_scan_result(self, cams: list) -> None: + if self._scan_state not in (CameraScanState.RUNNING, CameraScanState.CANCELING): + return + + # Apply results to UI first (stability guarantee) self._detected_cameras = cams or [] - self.available_cameras_list.clear() # replace list contents + self.available_cameras_list.clear() if not self._detected_cameras: placeholder = QListWidgetItem("No cameras detected.") placeholder.setFlags(Qt.ItemIsEnabled) self.available_cameras_list.addItem(placeholder) - return - - for cam in self._detected_cameras: - item = QListWidgetItem(f"{cam.label} (index {cam.index})") - item.setData(Qt.ItemDataRole.UserRole, cam) - self.available_cameras_list.addItem(item) + else: + for cam in self._detected_cameras: + item = QListWidgetItem(f"{cam.label} (index {cam.index})") + item.setData(Qt.ItemDataRole.UserRole, cam) + self.available_cameras_list.addItem(item) + self.available_cameras_list.setCurrentRow(0) - self.available_cameras_list.setCurrentRow(0) + # Now UI is stable: finish scan UX and emit scan_finished queued + self._finish_scan("result") def _on_scan_error(self, msg: str) -> None: + if self._scan_state not in (CameraScanState.RUNNING, CameraScanState.CANCELING): + return + QMessageBox.warning(self, "Camera Scan", f"Failed to detect cameras:\n{msg}") - def _on_scan_finished(self) -> None: - self._hide_scan_overlay() - self.scan_progress.setVisible(False) - self._scan_worker = None + # Ensure UI is stable (list is stable even if empty) before finishing + if self.available_cameras_list.count() == 0: + placeholder = QListWidgetItem("Scan failed.") + placeholder.setFlags(Qt.ItemIsEnabled) + self.available_cameras_list.addItem(placeholder) - self.scan_cancel_btn.setVisible(False) - self.scan_cancel_btn.setEnabled(True) - self.available_cameras_list.setEnabled(True) - self.refresh_btn.setEnabled(True) - self.backend_combo.setEnabled(True) + self._finish_scan("error") - self._sync_scan_ui() - self._update_button_states() - self.scan_finished.emit() + def request_scan_cancel(self) -> None: + if not self._is_scan_running(): + return - def _on_scan_cancel(self) -> None: - """User requested to cancel discovery.""" - if self._scan_worker and self._scan_worker.isRunning(): + self._set_scan_state(CameraScanState.CANCELING, message="Canceling discovery…") + + w = self._scan_worker + if w is not None: try: - self._scan_worker.requestInterruption() + w.requestInterruption() except Exception: pass - # Keep the busy bar, update texts - self._show_scan_overlay("Canceling discovery…") - self.scan_progress.setVisible(True) # stay visible as indeterminate - self.scan_cancel_btn.setEnabled(False) + + # Guarantee UI stability before scan_finished: + if self.available_cameras_list.count() == 0: + placeholder = QListWidgetItem("Scan canceled.") + placeholder.setFlags(Qt.ItemIsEnabled) + self.available_cameras_list.addItem(placeholder) + + self._finish_scan("cancel") + + def _on_scan_canceled(self) -> None: + self._set_scan_state(CameraScanState.CANCELING, message="Finalizing cancellation…") + # If cancel is requested without clicking cancel (e.g., dialog closing), ensure UI finishes + if self._scan_state in (CameraScanState.RUNNING, CameraScanState.CANCELING): + if self.available_cameras_list.count() == 0: + placeholder = QListWidgetItem("Scan canceled.") + placeholder.setFlags(Qt.ItemIsEnabled) + self.available_cameras_list.addItem(placeholder) + self._finish_scan("canceled") def _on_available_camera_selected(self, row: int) -> None: if self._scan_worker and self._scan_worker.isRunning(): diff --git a/dlclivegui/gui/camera_config/loaders.py b/dlclivegui/gui/camera_config/loaders.py index 5cc7ed8..e77edf4 100644 --- a/dlclivegui/gui/camera_config/loaders.py +++ b/dlclivegui/gui/camera_config/loaders.py @@ -1,37 +1,57 @@ """Workers and state logic for loading cameras in the GUI.""" -# dlclivegui/gui/camera_loaders.py +# dlclivegui/gui/loaders.py +from __future__ import annotations + import copy import logging +from enum import Enum, auto +from typing import TYPE_CHECKING from PySide6.QtCore import QThread, Signal from PySide6.QtWidgets import QWidget -from ...cameras.base import CameraSettings from ...cameras.factory import CameraBackend, CameraFactory +from ...config import CameraSettings + +if TYPE_CHECKING: + pass # only for typing LOGGER = logging.getLogger(__name__) +class CameraScanState(Enum): + IDLE = auto() + RUNNING = auto() + CANCELING = auto() + DONE = auto() + + # ------------------------------- # Background worker to detect cameras # ------------------------------- class DetectCamerasWorker(QThread): - """Background worker to detect cameras for the selected backend.""" + """Background worker to detect cameras for the selected backend. - progress = Signal(str) # human-readable text - result = Signal(list) # list[DetectedCamera] + Signals: + - progress(str): human-readable status + - result(list): list of DetectedCamera (may be empty) + - error(str): error message (on exception) + - canceled(): emitted if interruption was requested during/after discovery + """ + + progress = Signal(str) + result = Signal(list) # list[DetectedCamera] at runtime error = Signal(str) - finished = Signal() + canceled = Signal() def __init__(self, backend: str, max_devices: int = 10, parent: QWidget | None = None): super().__init__(parent) self.backend = backend self.max_devices = max_devices - def run(self): + def run(self) -> None: try: - # Initial message self.progress.emit(f"Scanning {self.backend} cameras…") cams = CameraFactory.detect_cameras( @@ -40,11 +60,17 @@ def run(self): should_cancel=self.isInterruptionRequested, progress_cb=self.progress.emit, ) - self.result.emit(cams) + + # Always emit result (even if empty) so UI can stabilize deterministically. + self.result.emit(cams or []) + + # If canceled, emit canceled so UI can set ScanState.CANCELING/DONE if desired. + if self.isInterruptionRequested(): + self.canceled.emit() + except Exception as exc: self.error.emit(f"{type(exc).__name__}: {exc}") - finally: - self.finished.emit() + # No custom finished signal: QThread.finished is emitted automatically when run() returns. class CameraProbeWorker(QThread): @@ -53,7 +79,6 @@ class CameraProbeWorker(QThread): progress = Signal(str) success = Signal(object) # emits CameraSettings error = Signal(str) - finished = Signal() def __init__(self, cam: CameraSettings, parent: QWidget | None = None): super().__init__(parent) @@ -66,10 +91,10 @@ def __init__(self, cam: CameraSettings, parent: QWidget | None = None): if isinstance(ns, dict): ns.setdefault("fast_start", True) - def request_cancel(self): + def request_cancel(self) -> None: self._cancel = True - def run(self): + def run(self) -> None: try: self.progress.emit("Probing device defaults…") if self._cancel: @@ -77,8 +102,7 @@ def run(self): self.success.emit(self._cam) except Exception as exc: self.error.emit(f"{type(exc).__name__}: {exc}") - finally: - self.finished.emit() + # QThread.finished will fire automatically. # ------------------------------- @@ -87,27 +111,24 @@ def run(self): class CameraLoadWorker(QThread): """Open/configure a camera backend off the UI thread with progress and cancel support.""" - progress = Signal(str) # Human-readable status updates - success = Signal(object) # Emits the ready backend (CameraBackend) - error = Signal(str) # Emits error message - canceled = Signal() # Emits when canceled before success + progress = Signal(str) + success = Signal(object) # emits CameraSettings for GUI-thread open + error = Signal(str) + canceled = Signal() def __init__(self, cam: CameraSettings, parent: QWidget | None = None): super().__init__(parent) self._cam = copy.deepcopy(cam) - self._cancel = False self._backend: CameraBackend | None = None - # Do not use fast_start here as we want to actually open the camera to probe capabilities - # If you want a quick probe without full open, use CameraProbeWorker instead which sets fast_start=True # Ensure preview open never uses fast_start probe mode if isinstance(self._cam.properties, dict): ns = self._cam.properties.setdefault(self._cam.backend.lower(), {}) if isinstance(ns, dict): ns["fast_start"] = False - def request_cancel(self): + def request_cancel(self) -> None: self._cancel = True def _check_cancel(self) -> bool: @@ -116,15 +137,16 @@ def _check_cancel(self) -> bool: return True return False - def run(self): + def run(self) -> None: try: self.progress.emit("Creating backend…") if self._check_cancel(): self.canceled.emit() return - LOGGER.debug("Creating camera backend for %s:%d", self._cam.backend, self._cam.index) + LOGGER.debug("Preparing camera open for %s:%d", self._cam.backend, self._cam.index) self.progress.emit("Opening device…") + # Open only in GUI thread to avoid simultaneous opens self.success.emit(self._cam) diff --git a/dlclivegui/gui/camera_config/ui_blocks.py b/dlclivegui/gui/camera_config/ui_blocks.py index 28395c0..e09aa9b 100644 --- a/dlclivegui/gui/camera_config/ui_blocks.py +++ b/dlclivegui/gui/camera_config/ui_blocks.py @@ -196,7 +196,7 @@ def build_available_cameras_group(dlg: CameraConfigDialog) -> QGroupBox: # The original UI block connects cancel here; preserve that. # dlg must provide _on_scan_cancel if hasattr(dlg, "_on_scan_cancel"): - dlg.scan_cancel_btn.clicked.connect(dlg._on_scan_cancel) # type: ignore[attr-defined] + dlg.scan_cancel_btn.clicked.connect(dlg.request_scan_cancel) # type: ignore[attr-defined] available_layout.addWidget(dlg.scan_cancel_btn) diff --git a/tests/gui/camera_config/test_cam_dialog_e2e.py b/tests/gui/camera_config/test_cam_dialog_e2e.py index 160bf12..2b1a2a3 100644 --- a/tests/gui/camera_config/test_cam_dialog_e2e.py +++ b/tests/gui/camera_config/test_cam_dialog_e2e.py @@ -8,11 +8,11 @@ from PySide6.QtCore import Qt from PySide6.QtWidgets import QMessageBox -from dlclivegui.cameras import CameraFactory from dlclivegui.cameras.base import CameraBackend -from dlclivegui.cameras.factory import DetectedCamera +from dlclivegui.cameras.factory import CameraFactory, DetectedCamera from dlclivegui.config import CameraSettings, MultiCameraSettings -from dlclivegui.gui.camera_config.camera_config_dialog import CameraConfigDialog, CameraLoadWorker +from dlclivegui.gui.camera_config.camera_config_dialog import CameraConfigDialog +from dlclivegui.gui.camera_config.loaders import CameraLoadWorker from dlclivegui.gui.camera_config.preview import PreviewState # --------------------------------------------------------------------- @@ -20,6 +20,22 @@ # --------------------------------------------------------------------- +def _run_scan_and_wait(dialog: CameraConfigDialog, qtbot, timeout: int = 2000) -> None: + """ + Trigger a scan via UI and wait for the dialog's scan_finished, + which now means: UI is stable and available list is populated (or placeholder). + """ + qtbot.waitUntil(lambda: not dialog._is_scan_running(), timeout=timeout) + qtbot.wait(50) + + # Wait for the scan started by *this click* to both start and finish + with qtbot.waitSignals([dialog.scan_started, dialog.scan_finished], timeout=timeout, order="strict"): + qtbot.mouseClick(dialog.refresh_btn, Qt.LeftButton) + + # Now the list should be stable + qtbot.waitUntil(lambda: dialog.available_cameras_list.count() > 0, timeout=timeout) + + def _select_backend_for_active_cam(dialog: CameraConfigDialog, cam_row: int = 0) -> str: """ Ensure backend combo is set to the backend of the active camera at cam_row. @@ -119,9 +135,10 @@ def dialog(qtbot, patch_detect_cameras): except Exception: d.close() - qtbot.waitUntil(lambda: getattr(d, "_loader", None) is None, timeout=2000) - qtbot.waitUntil(lambda: getattr(d, "_scan_worker", None) is None, timeout=2000) - qtbot.waitUntil(lambda: not getattr(d, "_preview_active", False), timeout=2000) + qtbot.waitUntil(lambda: d._preview.loader is None, timeout=2000) + qtbot.waitUntil(lambda: not d._is_scan_running(), timeout=2000) + qtbot.wait(50) + qtbot.waitUntil(lambda: d._preview.state == PreviewState.IDLE, timeout=2000) # --------------------------------------------------------------------- @@ -131,9 +148,7 @@ def dialog(qtbot, patch_detect_cameras): @pytest.mark.gui def test_e2e_async_camera_scan(dialog, qtbot): - qtbot.mouseClick(dialog.refresh_btn, Qt.LeftButton) - with qtbot.waitSignal(dialog.scan_finished, timeout=2000): - pass + _run_scan_and_wait(dialog, qtbot, timeout=2000) assert dialog.available_cameras_list.count() == 2 @@ -247,19 +262,17 @@ def read(self): @pytest.mark.gui def test_e2e_selection_change_auto_commits(dialog, qtbot): - """ - Guard contract: switching selection commits pending edits. - Use FPS (supported) rather than gain (OpenCV gain is intentionally disabled). - """ - # Ensure backend combo matches active cam (important for add/dup logic) _select_backend_for_active_cam(dialog, cam_row=0) - # Add second camera deterministically - dialog._on_scan_result([DetectedCamera(index=1, label="ExtraCam")]) - dialog.available_cameras_list.setCurrentRow(0) + # Discover cameras via UI + _run_scan_and_wait(dialog, qtbot, timeout=2000) + assert dialog.available_cameras_list.count() == 2 + + # Select the second detected camera to avoid duplicate (index 1) + dialog.available_cameras_list.setCurrentRow(1) qtbot.mouseClick(dialog.add_camera_btn, Qt.LeftButton) - assert len(dialog._working_settings.cameras) >= 2 + qtbot.waitUntil(lambda: len(dialog._working_settings.cameras) >= 2, timeout=1000) dialog.active_cameras_list.setCurrentRow(0) qtbot.waitUntil(lambda: dialog._current_edit_index == 0, timeout=1000) @@ -300,10 +313,6 @@ def slow_detect(backend, max_devices=10, should_cancel=None, progress_cb=None, * @pytest.mark.gui def test_duplicate_camera_prevented(dialog, qtbot, monkeypatch): - """ - Duplicate detection compares identity keys including backend. - Ensure backend combo is set to match existing active camera backend. - """ calls = {"n": 0} def _warn(parent, title, text, *args, **kwargs): @@ -312,14 +321,15 @@ def _warn(parent, title, text, *args, **kwargs): monkeypatch.setattr(QMessageBox, "warning", staticmethod(_warn)) - backend = _select_backend_for_active_cam(dialog, cam_row=0) - + _select_backend_for_active_cam(dialog, cam_row=0) initial_count = dialog.active_cameras_list.count() - # Same backend + same index -> duplicate - dialog._on_scan_result([DetectedCamera(index=0, label=f"{backend}-X")]) - dialog.available_cameras_list.setCurrentRow(0) + # Scan normally + _run_scan_and_wait(dialog, qtbot, timeout=2000) + assert dialog.available_cameras_list.count() == 2 + # Choose the entry that matches index 0 (duplicate) + dialog.available_cameras_list.setCurrentRow(0) qtbot.mouseClick(dialog.add_camera_btn, Qt.LeftButton) assert dialog.active_cameras_list.count() == initial_count @@ -328,9 +338,6 @@ def _warn(parent, title, text, *args, **kwargs): @pytest.mark.gui def test_max_cameras_prevented(qtbot, monkeypatch, patch_detect_cameras): - """ - Dialog enforces MAX_CAMERAS enabled cameras. - """ calls = {"n": 0} def _warn(parent, title, text, *args, **kwargs): @@ -354,14 +361,13 @@ def _warn(parent, title, text, *args, **kwargs): try: _select_backend_for_active_cam(d, cam_row=0) - initial_count = d.active_cameras_list.count() - qtbot.waitUntil(lambda: not d._is_scan_running(), timeout=1000) - d._on_scan_result([DetectedCamera(index=4, label="Extra")]) - d._on_scan_finished() - d.available_cameras_list.setCurrentRow(0) + _run_scan_and_wait(d, qtbot, timeout=2000) + assert d.available_cameras_list.count() == 2 + # Try to add any detected camera (should hit MAX_CAMERAS guard) + d.available_cameras_list.setCurrentRow(1) qtbot.mouseClick(d.add_camera_btn, Qt.LeftButton) assert d.active_cameras_list.count() == initial_count diff --git a/tests/gui/camera_config/test_cam_dialog_unit.py b/tests/gui/camera_config/test_cam_dialog_unit.py index fc73f75..2abe022 100644 --- a/tests/gui/camera_config/test_cam_dialog_unit.py +++ b/tests/gui/camera_config/test_cam_dialog_unit.py @@ -219,9 +219,14 @@ def test_add_camera_populates_working_settings(dialog_unit, qtbot): Add camera should append a new CameraSettings into _working_settings. We directly call _on_scan_result to populate available list deterministically. """ - dialog_unit._on_scan_result([DetectedCamera(index=2, label="ExtraCam2")]) - dialog_unit.available_cameras_list.setCurrentRow(0) + from dlclivegui.gui.camera_config.loaders import CameraScanState + + dialog_unit._set_scan_state(CameraScanState.RUNNING, message="Test scan running") + dialog_unit._on_scan_result([DetectedCamera(label="ExtraCam2", index=2)]) + with qtbot.waitSignal(dialog_unit.scan_finished, timeout=1000): + pass + dialog_unit.available_cameras_list.setCurrentRow(0) qtbot.mouseClick(dialog_unit.add_camera_btn, Qt.LeftButton) added = dialog_unit._working_settings.cameras[-1] From 48b274ce42b3d5711c122d58612be38796bfcc86 Mon Sep 17 00:00:00 2001 From: Cyril Achard Date: Mon, 23 Feb 2026 11:48:06 +0100 Subject: [PATCH 44/60] CI, packaging: coverage PRs, export main, tox deps Allow Codecov uploads for pushes and pull requests targeting main by loosening the workflow condition. Export the package's main entry (from .main import main) in dlclivegui.__init__ so the CLI/function is exposed. Add tox and tox-gh-actions to the test extras in pyproject.toml. Tidy tox.ini by adding a comment that linting is handled by pre-commit/format workflow and removing an optional tox-gh-actions helper section. --- .github/workflows/testing-ci.yml | 2 +- dlclivegui/__init__.py | 1 + pyproject.toml | 2 ++ tox.ini | 3 +-- 4 files changed, 5 insertions(+), 3 deletions(-) diff --git a/.github/workflows/testing-ci.yml b/.github/workflows/testing-ci.yml index f5a82d1..289c156 100644 --- a/.github/workflows/testing-ci.yml +++ b/.github/workflows/testing-ci.yml @@ -65,7 +65,7 @@ jobs: echo '```' >> "$GITHUB_STEP_SUMMARY" - name: Upload coverage to Codecov - if: github.event_name == 'push' && github.ref == 'refs/heads/main' + if: github.event_name == 'push' || (github.event_name == 'pull_request' && github.base_ref == 'main') uses: codecov/codecov-action@v5 with: files: ./coverage.xml diff --git a/dlclivegui/__init__.py b/dlclivegui/__init__.py index 9d89e55..a64da4b 100644 --- a/dlclivegui/__init__.py +++ b/dlclivegui/__init__.py @@ -7,6 +7,7 @@ MultiCameraSettings, RecordingSettings, ) +from .main import main __all__ = [ "ApplicationSettings", diff --git a/pyproject.toml b/pyproject.toml index 7fba79a..3cc05c6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -60,6 +60,8 @@ test = [ "pytest-cov>=4", "pytest-mock>=3.10", "pytest-qt>=4.2", + "tox", + "tox-gh-actions", ] tf = [ "deeplabcut-live[tf]==1.1", diff --git a/tox.ini b/tox.ini index 6a65ab1..3344818 100644 --- a/tox.ini +++ b/tox.ini @@ -32,6 +32,7 @@ passenv = WAYLAND_DISPLAY XDG_RUNTIME_DIR +; Linting already covered by pre-commit hooks and format.yml workflow ; [testenv:lint] ; description = Ruff linting/format checks (matches pyproject.toml config) ; skip_install = true @@ -41,8 +42,6 @@ passenv = ; ruff check . ; ruff format --check . -# Optional helper if you use tox-gh-actions to map GitHub's python-version to tox envs. -# Requires: pip install tox-gh-actions [gh-actions] python = 3.10: py310 From 31e09d8852ee4f4aa70167055d257fc0b06ca365 Mon Sep 17 00:00:00 2001 From: Cyril Achard Date: Mon, 23 Feb 2026 11:52:30 +0100 Subject: [PATCH 45/60] Normalize model_type and fix model paths Coerce DLCProcessorSettings.model_type to a lowercase string (accepting Enum or string inputs) and validate allowed backends (pytorch, tensorflow). Update UI to handle TensorFlow .pb models by using the parent directory for DLCLive, restrict file dialog to existing files, add existence checks and backend detection when selecting a model. Improve ModelPathStore: robust path normalization, separate helpers for existing file/dir checks, smarter save/load/resolve logic, and better start-dir/suggest-file heuristics. Minor cleanup: remove duplicate import and clarify a TF model detection comment in engine. --- dlclivegui/config.py | 37 ++++- dlclivegui/gui/main_window.py | 24 +++- dlclivegui/services/dlc_processor.py | 2 - dlclivegui/temp/engine.py | 1 + dlclivegui/utils/settings_store.py | 208 ++++++++++++++++++--------- 5 files changed, 195 insertions(+), 77 deletions(-) diff --git a/dlclivegui/config.py b/dlclivegui/config.py index b244e44..a93b710 100644 --- a/dlclivegui/config.py +++ b/dlclivegui/config.py @@ -2,13 +2,12 @@ from __future__ import annotations import json +from enum import Enum from pathlib import Path from typing import Any, Literal from pydantic import BaseModel, Field, field_validator, model_validator -from dlclivegui.temp import Engine - Rotation = Literal[0, 90, 180, 270] TileLayout = Literal["auto", "2x2", "1x4", "4x1"] Precision = Literal["FP32", "FP16"] @@ -241,7 +240,7 @@ class DLCProcessorSettings(BaseModel): resize: float = Field(default=1.0, gt=0) precision: Precision = "FP32" additional_options: dict[str, Any] = Field(default_factory=dict) - model_type: Engine = Engine.PYTORCH + model_type: str = "pytorch" single_animal: bool = True @field_validator("dynamic", mode="before") @@ -249,6 +248,38 @@ class DLCProcessorSettings(BaseModel): def _coerce_dynamic(cls, v): return DynamicCropModel.from_tupleish(v) + @field_validator("model_type", mode="before") + @classmethod + def _coerce_model_type(cls, v): + """ + Accept: + - "pytorch"/"tensorflow"/etc as strings + - Enum instances (e.g. Engine.PYTORCH) and store their .value + Always return a lowercase string. + """ + if v is None or v == "": + return "pytorch" + + # If caller passed Engine enum or any Enum, use its value + if isinstance(v, Enum): + v = v.value + + # If caller passed something with a `.value` attribute (defensive) + if not isinstance(v, str) and hasattr(v, "value"): + v = v.value + + if not isinstance(v, str): + raise TypeError(f"model_type must be a string or Enum, got {type(v)!r}") + + v = v.strip().lower() + + # Optional: enforce allowed values + allowed = {"pytorch", "tensorflow"} + if v not in allowed: + raise ValueError(f"Unknown model type: {v!r}. Allowed: {sorted(allowed)}") + + return v + class BoundingBoxSettings(BaseModel): enabled: bool = False diff --git a/dlclivegui/gui/main_window.py b/dlclivegui/gui/main_window.py index 2cafa77..7824093 100644 --- a/dlclivegui/gui/main_window.py +++ b/dlclivegui/gui/main_window.py @@ -876,6 +876,10 @@ def _parse_json(self, value: str) -> dict: def _dlc_settings_from_ui(self) -> DLCProcessorSettings: model_path = self.model_path_edit.text().strip() + if Path(model_path).exists() and Path(model_path).suffix in (".pb"): + # IMPORTANT NOTE: DLClive expects a directory for TensorFlow models, + # so if user selects a .pb file, we should pass the parent directory to DLCLive + model_path = str(Path(model_path).parent) if model_path == "": raise ValueError("Model path cannot be empty. Please enter a valid path to a DLCLive model file.") try: @@ -976,13 +980,12 @@ def _action_browse_model(self) -> None: preselect = self._model_path_store.suggest_selected_file() dlg = QFileDialog(self, "Select DLCLive model file") - dlg.setFileMode(QFileDialog.FileMode.AnyFile) + dlg.setFileMode(QFileDialog.FileMode.ExistingFile) dlg.setNameFilters( [ "Model files (*.pt *.pth)", "PyTorch models (*.pt *.pth)", - # "TensorFlow models (*.pb)", - "TensorFlow model directory (*.*)", + "TensorFlow models (*.pb)", ] ) dlg.setDirectory(start_dir) @@ -995,7 +998,20 @@ def _action_browse_model(self) -> None: selected = dlg.selectedFiles() if not selected: return - file_path = selected[0] + file_path = Path(selected[0]).expanduser() + if not file_path.exists(): + QMessageBox.warning(self, "File not found", f"The selected file does not exist:\n{file_path}") + return + + try: + DLCLiveProcessor.get_model_backend(str(file_path)) + except FileNotFoundError as e: + QMessageBox.warning(self, "Model selection error", str(e)) + return + except ValueError as e: + QMessageBox.warning(self, "Model selection error", str(e)) + return + file_path = str(file_path) self.model_path_edit.setText(file_path) # Persist model path + directory diff --git a/dlclivegui/services/dlc_processor.py b/dlclivegui/services/dlc_processor.py index 41cbbbd..42b5868 100644 --- a/dlclivegui/services/dlc_processor.py +++ b/dlclivegui/services/dlc_processor.py @@ -16,8 +16,6 @@ from PySide6.QtCore import QObject, Signal from dlclivegui.config import DLCProcessorSettings - -# from dlclivegui.config import DLCProcessorSettings from dlclivegui.processors.processor_utils import instantiate_from_scan from dlclivegui.temp import Engine # type: ignore # TODO use main package enum when released diff --git a/dlclivegui/temp/engine.py b/dlclivegui/temp/engine.py index 47dfefe..5816fe5 100644 --- a/dlclivegui/temp/engine.py +++ b/dlclivegui/temp/engine.py @@ -24,6 +24,7 @@ def from_model_path(cls, model_path: str | Path) -> "Engine": if path.is_dir(): has_cfg = (path / "pose_cfg.yaml").is_file() + # has_cfg is DLClive specific and is considered a requirement for TF live models. has_pb = any(p.suffix == ".pb" for p in path.glob("*.pb")) if has_cfg and has_pb: return cls.TENSORFLOW diff --git a/dlclivegui/utils/settings_store.py b/dlclivegui/utils/settings_store.py index 51d9fa9..9ceb0bb 100644 --- a/dlclivegui/utils/settings_store.py +++ b/dlclivegui/utils/settings_store.py @@ -1,4 +1,6 @@ # dlclivegui/utils/settings_store.py +from __future__ import annotations + import logging from pathlib import Path @@ -70,124 +72,194 @@ class ModelPathStore: def __init__(self, settings: QSettings | None = None): self._settings = settings or QSettings("DeepLabCut", "DLCLiveGUI") - def _norm(self, p: str | None) -> str | None: + # ------------------------- + # Normalization helpers + # ------------------------- + def _as_path(self, p: str | None) -> Path | None: + """Best-effort conversion to Path (expand ~, interpret '.' as cwd).""" if not p: return None + s = str(p).strip() + if not s: + return None try: - return str(Path(p).expanduser().resolve()) + pp = Path(s).expanduser() + if s in (".", "./"): + pp = Path.cwd() + return pp except Exception: - logger.debug("Failed to normalize path: %s", p) + logger.debug("Failed to parse path: %s", p) + return None + + def _norm_existing_dir(self, p: str | None) -> str | None: + """Return an absolute, resolved existing directory path, else None.""" + pp = self._as_path(p) + if pp is None: + return None + try: + # If a file was given, use its parent directory + if pp.exists() and pp.is_file(): + pp = pp.parent + + if pp.exists() and pp.is_dir(): + return str(pp.resolve()) + except Exception: + logger.debug("Failed to normalize directory: %s", p) + return None + + def _norm_existing_path(self, p: str | None) -> str | None: + """Return an absolute, resolved existing path (file or dir), else None.""" + pp = self._as_path(p) + if pp is None: return None + try: + if pp.exists(): + return str(pp.resolve()) + except Exception: + logger.debug("Failed to normalize path: %s", p) + return None + # ------------------------- + # Load + # ------------------------- def load_last(self) -> str | None: + """Return last model path if it still exists and looks usable.""" val = self._settings.value("dlc/last_model_path") - path = self._norm(str(val)) if val else None + path = self._norm_existing_path(str(val)) if val else None if not path: return None + try: - return path if is_model_file(path) else None + pp = Path(path) + # Accept a valid model *file* + if pp.is_file() and is_model_file(str(pp)): + return str(pp) except Exception: - logger.debug("Last model path is not a valid model file: %s", path) - return None + logger.debug("Last model path not valid/usable: %s", path) + + return None def load_last_dir(self) -> str | None: + """Return last directory if it still exists and is a directory.""" val = self._settings.value("dlc/last_model_dir") - d = self._norm(str(val)) if val else None - if not d: - return None - try: - p = Path(d) - return str(p) if p.exists() and p.is_dir() else None - except Exception: - logger.debug("Last model dir is not a valid directory: %s", d) - return None + d = self._norm_existing_dir(str(val)) if val else None + return d + # ------------------------- + # Save + # ------------------------- def save_if_valid(self, path: str) -> None: - """Save last model *file* if it looks valid, and always save its directory.""" - path = self._norm(path) or "" - if not path: + """ + Save last model path if it looks valid/usable, and always save its directory. + - For files: always save parent directory. + - For directories: save directory itself if it looks like a TF model dir. + """ + norm = self._norm_existing_path(path) + if not norm: return + try: - parent = str(Path(path).parent) - self._settings.setValue("dlc/last_model_dir", parent) + p = Path(norm) + + # Always persist a *directory* that is safe for QFileDialog.setDirectory(...) + if p.is_dir(): + model_dir = p + else: + model_dir = p.parent + + model_dir_norm = self._norm_existing_dir(str(model_dir)) + if model_dir_norm: + self._settings.setValue("dlc/last_model_dir", model_dir_norm) + + # Persist model path if it is a valid model file, or a TF model directory + if p.is_file() and is_model_file(str(p)): + self._settings.setValue("dlc/last_model_path", str(p)) + elif p.is_dir() and self._looks_like_tf_model_dir(p): + self._settings.setValue("dlc/last_model_path", str(p)) - if is_model_file(path): - self._settings.setValue("dlc/last_model_path", str(Path(path))) except Exception: - logger.debug("Failed to save last model path: %s", path) - pass + logger.debug("Failed to save model path: %s", path, exc_info=True) def save_last_dir(self, directory: str) -> None: - directory = self._norm(directory) or "" - if not directory: + d = self._norm_existing_dir(directory) + if not d: return try: - p = Path(directory) - if p.exists() and p.is_dir(): - self._settings.setValue("dlc/last_model_dir", str(p)) + self._settings.setValue("dlc/last_model_dir", d) except Exception: - pass + logger.debug("Failed to save last model dir: %s", d, exc_info=True) + # ------------------------- + # Resolve + # ------------------------- def resolve(self, config_path: str | None) -> str: - """Resolve the best model path to display in the UI.""" - config_path = self._norm(config_path) - if config_path: + """ + Resolve the best model path to display in the UI. + Preference: + 1) config_path if valid/usable + 2) persisted last model path if valid/usable + 3) empty + """ + cfg = self._norm_existing_path(config_path) + if cfg: try: - if is_model_file(config_path): - return config_path + p = Path(cfg) + if p.is_file() and is_model_file(cfg): + return cfg + if p.is_dir() and self._looks_like_tf_model_dir(p): + return cfg except Exception: - logger.debug("Config path is not a valid model file: %s", config_path) - pass + logger.debug("Config path not usable: %s", cfg) persisted = self.load_last() if persisted: - try: - if is_model_file(persisted): - return persisted - except Exception: - pass + return persisted return "" def suggest_start_dir(self, fallback_dir: str | None = None) -> str: - """Pick the best directory to start the file dialog in.""" + """ + Pick the best directory to start file dialogs in. + Guarantees: returns an existing absolute directory (never '.'). + """ # 1) last dir last_dir = self.load_last_dir() if last_dir: return last_dir - # 2) directory of last valid model file - last_file = self.load_last() - if last_file: + # 2) directory of last valid model path + last = self.load_last() + if last: try: - parent = Path(last_file).parent - if parent.exists(): - return str(parent) + p = Path(last) + if p.is_file(): + parent = self._norm_existing_dir(str(p.parent)) + if parent: + return parent + elif p.is_dir(): + d = self._norm_existing_dir(str(p)) + if d: + return d except Exception: - logger.debug("Failed to get parent of last model file: %s", last_file) - pass + logger.debug("Failed to derive start dir from last model: %s", last) - # 3) fallback dir (config.model_directory) if valid - if fallback_dir: - try: - p = Path(fallback_dir).expanduser() - if p.exists() and p.is_dir(): - return str(p) - except Exception: - logger.debug("Fallback dir is not a valid directory: %s", fallback_dir) - pass + # 3) fallback dir (e.g. config.dlc.model_directory) + fb = self._norm_existing_dir(fallback_dir) + if fb: + return fb - # 4) last resort: home - return str(Path.home()) + # 4) last resort: cwd if exists else home + cwd = self._norm_existing_dir(str(Path.cwd())) + return cwd or str(Path.home()) def suggest_selected_file(self) -> str | None: - """Optional: return a file to preselect if it exists.""" - last_file = self.load_last() - if not last_file: + """Return a file to preselect if it exists (only files, not directories).""" + last = self.load_last() + if not last: return None try: - p = Path(last_file) + p = Path(last) return str(p) if p.exists() and p.is_file() else None except Exception: - logger.debug("Failed to check existence of last model file: %s", last_file) + logger.debug("Failed to check existence of last model: %s", last) return None From e517aedccf355a268fdaaaf6b05f62264078419d Mon Sep 17 00:00:00 2001 From: Cyril Achard Date: Mon, 23 Feb 2026 11:55:37 +0100 Subject: [PATCH 46/60] Enhance ModelPathStore path normalization tests Update tests in tests/utils/test_settings_store.py to exercise ModelPathStore normalization more robustly: use tmp_path to create real directories and files, assert _norm_existing_dir and _norm_existing_path return existing absolute paths (and that they are dir/file respectively), and handle None via _norm_existing_* helpers. Also adjust suggest_start_dir test to make cwd invalid by monkeypatching Path.cwd so the fallback to home is exercised. Removed the previous unreliable expanduser assertion. --- tests/utils/test_settings_store.py | 29 ++++++++++++++++++++++++----- 1 file changed, 24 insertions(+), 5 deletions(-) diff --git a/tests/utils/test_settings_store.py b/tests/utils/test_settings_store.py index f379b76..01ddc5f 100644 --- a/tests/utils/test_settings_store.py +++ b/tests/utils/test_settings_store.py @@ -98,15 +98,29 @@ def model_validate_json(raw: str): # ----------------------------- # ModelPathStore helpers # ----------------------------- -def test_model_path_store_norm_handles_none_and_invalid(monkeypatch): +def test_model_path_store_norm_handles_none_and_invalid(tmp_path: Path): s = InMemoryQSettings() mps = store.ModelPathStore(settings=s) - assert mps._norm(None) is None # type: ignore[arg-type] + # None should normalize to None + assert mps._norm_existing_path(None) is None # type: ignore[arg-type] + assert mps._norm_existing_dir(None) is None # type: ignore[arg-type] - # Force Path.expanduser() to raise by passing something weird? Hard to do reliably. - # Instead just assert normal path expands/returns str. - assert mps._norm("~/somewhere") is not None + # Existing dir should normalize to an absolute path + d = tmp_path / "models" + d.mkdir() + norm_dir = mps._norm_existing_dir(str(d)) + assert norm_dir is not None + assert Path(norm_dir).exists() + assert Path(norm_dir).is_dir() + + # Existing file should normalize as existing path + f = d / "net.pt" + f.write_text("x") + norm_file = mps._norm_existing_path(str(f)) + assert norm_file is not None + assert Path(norm_file).exists() + assert Path(norm_file).is_file() # ----------------------------- @@ -289,7 +303,12 @@ def test_model_path_store_suggest_start_dir_falls_back_to_home(tmp_path: Path, m fake_home = tmp_path / "home" fake_home.mkdir() + # Make cwd "invalid" so suggest_start_dir can't use it + fake_cwd = tmp_path / "does_not_exist" + assert not fake_cwd.exists() + monkeypatch.setattr(store.Path, "home", lambda: fake_home) + monkeypatch.setattr(store.Path, "cwd", lambda: fake_cwd) assert mps.suggest_start_dir(fallback_dir=None) == str(fake_home) From c7b11c9167e1d995347cec01bd1d6eec21cb93ec Mon Sep 17 00:00:00 2001 From: Cyril Achard Date: Mon, 23 Feb 2026 11:59:54 +0100 Subject: [PATCH 47/60] Add concurrency to testing CI workflow Introduce a concurrency block to the testing CI workflow so runs for the same PR are grouped and previous in-progress jobs are cancelled when a new update is pushed. The group is keyed by workflow and pull request number (ci-${{ github.workflow }}-pr-${{ github.event.pull_request.number }}) to avoid redundant CI runs and save resources. --- .github/workflows/testing-ci.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/testing-ci.yml b/.github/workflows/testing-ci.yml index 289c156..874d4cf 100644 --- a/.github/workflows/testing-ci.yml +++ b/.github/workflows/testing-ci.yml @@ -4,6 +4,10 @@ on: pull_request: types: [opened, synchronize, reopened] +concurrency: + group: ci-${{ github.workflow }}-pr-${{ github.event.pull_request.number }} + cancel-in-progress: true + jobs: unit: name: Unit + Smoke (no hardware) • ${{ matrix.os }} • py${{ matrix.python }} From 4865edb4302883c8b717a1297295bcdd4a3db6da Mon Sep 17 00:00:00 2001 From: Cyril Achard Date: Mon, 23 Feb 2026 15:02:07 +0100 Subject: [PATCH 48/60] Add Engine helpers for model path detection Introduce Engine helpers (is_pytorch_model_path, is_tensorflow_model_dir_path) and centralize model-file/TF-dir detection. Replace the old is_model_file usage with these helpers across settings_store and main_window, fixing a bug in suffix checking and ensuring .pb TensorFlow models are validated via their parent directory. Remove legacy is_model_file from utils. Also update camera UI to expect dlg.request_scan_cancel instead of _on_scan_cancel, refine scan-cancel test synchronization, and add/adjust unit tests to cover the new Engine detection logic. --- dlclivegui/gui/camera_config/ui_blocks.py | 5 +- dlclivegui/gui/main_window.py | 7 +- dlclivegui/temp/engine.py | 25 +++- dlclivegui/utils/settings_store.py | 14 +- dlclivegui/utils/utils.py | 9 -- .../gui/camera_config/test_cam_dialog_e2e.py | 6 +- tests/utils/test_settings_store.py | 14 +- tests/utils/test_utils.py | 137 +++++++++++++++--- 8 files changed, 164 insertions(+), 53 deletions(-) diff --git a/dlclivegui/gui/camera_config/ui_blocks.py b/dlclivegui/gui/camera_config/ui_blocks.py index e09aa9b..86e4f19 100644 --- a/dlclivegui/gui/camera_config/ui_blocks.py +++ b/dlclivegui/gui/camera_config/ui_blocks.py @@ -193,9 +193,8 @@ def build_available_cameras_group(dlg: CameraConfigDialog) -> QGroupBox: dlg.scan_cancel_btn.setIcon(dlg.style().standardIcon(QStyle.StandardPixmap.SP_BrowserStop)) dlg.scan_cancel_btn.setVisible(False) - # The original UI block connects cancel here; preserve that. - # dlg must provide _on_scan_cancel - if hasattr(dlg, "_on_scan_cancel"): + # dlg must provide request_scan_cancel() + if hasattr(dlg, "request_scan_cancel"): dlg.scan_cancel_btn.clicked.connect(dlg.request_scan_cancel) # type: ignore[attr-defined] available_layout.addWidget(dlg.scan_cancel_btn) diff --git a/dlclivegui/gui/main_window.py b/dlclivegui/gui/main_window.py index 7824093..a32e393 100644 --- a/dlclivegui/gui/main_window.py +++ b/dlclivegui/gui/main_window.py @@ -876,7 +876,7 @@ def _parse_json(self, value: str) -> dict: def _dlc_settings_from_ui(self) -> DLCProcessorSettings: model_path = self.model_path_edit.text().strip() - if Path(model_path).exists() and Path(model_path).suffix in (".pb"): + if Path(model_path).exists() and Path(model_path).suffix == ".pb": # IMPORTANT NOTE: DLClive expects a directory for TensorFlow models, # so if user selects a .pb file, we should pass the parent directory to DLCLive model_path = str(Path(model_path).parent) @@ -1004,7 +1004,10 @@ def _action_browse_model(self) -> None: return try: - DLCLiveProcessor.get_model_backend(str(file_path)) + if file_path.suffix == ".pb": + # For TensorFlow, DLCLive expects a directory, so we pass the parent directory for validation + model_check_path = file_path.parent + DLCLiveProcessor.get_model_backend(str(model_check_path)) except FileNotFoundError as e: QMessageBox.warning(self, "Model selection error", str(e)) return diff --git a/dlclivegui/temp/engine.py b/dlclivegui/temp/engine.py index 5816fe5..9532b33 100644 --- a/dlclivegui/temp/engine.py +++ b/dlclivegui/temp/engine.py @@ -2,10 +2,28 @@ from pathlib import Path +# TODO @C-Achard decide if this moves to utils, +# or if we update dlclive.Engine to have these methods and use that instead of a separate enum here. +# The latter would be more cohesive but also creates a dependency from utils to dlclive, +# pending release of dlclive class Engine(Enum): TENSORFLOW = "tensorflow" PYTORCH = "pytorch" + @staticmethod + def is_pytorch_model_path(model_path: str | Path) -> bool: + path = Path(model_path) + return path.is_file() and path.suffix.lower() in (".pt", ".pth") + + @staticmethod + def is_tensorflow_model_dir_path(model_path: str | Path) -> bool: + path = Path(model_path) + if not path.is_dir(): + return False + has_cfg = (path / "pose_cfg.yaml").is_file() + has_pb = any(p.suffix.lower() == ".pb" for p in path.glob("*.pb")) + return has_cfg and has_pb + @classmethod def from_model_type(cls, model_type: str) -> "Engine": if model_type.lower() == "pytorch": @@ -23,13 +41,10 @@ def from_model_path(cls, model_path: str | Path) -> "Engine": raise FileNotFoundError(f"Model path does not exist: {model_path}") if path.is_dir(): - has_cfg = (path / "pose_cfg.yaml").is_file() - # has_cfg is DLClive specific and is considered a requirement for TF live models. - has_pb = any(p.suffix == ".pb" for p in path.glob("*.pb")) - if has_cfg and has_pb: + if cls.is_tensorflow_model_dir_path(path): return cls.TENSORFLOW elif path.is_file(): - if path.suffix in (".pt", ".pth"): + if cls.is_pytorch_model_path(path): return cls.PYTORCH raise ValueError(f"Could not determine engine from model path: {model_path}") diff --git a/dlclivegui/utils/settings_store.py b/dlclivegui/utils/settings_store.py index 9ceb0bb..fcf36fd 100644 --- a/dlclivegui/utils/settings_store.py +++ b/dlclivegui/utils/settings_store.py @@ -7,7 +7,7 @@ from PySide6.QtCore import QSettings from ..config import ApplicationSettings -from .utils import is_model_file +from ..temp import Engine # type: ignore # TODO use main package enum when released logger = logging.getLogger(__name__) @@ -132,7 +132,7 @@ def load_last(self) -> str | None: try: pp = Path(path) # Accept a valid model *file* - if pp.is_file() and is_model_file(str(pp)): + if pp.is_file() and (Engine.is_pytorch_model_path(pp) or Engine.is_tensorflow_model_dir_path(pp.parent)): return str(pp) except Exception: logger.debug("Last model path not valid/usable: %s", path) @@ -172,10 +172,12 @@ def save_if_valid(self, path: str) -> None: self._settings.setValue("dlc/last_model_dir", model_dir_norm) # Persist model path if it is a valid model file, or a TF model directory - if p.is_file() and is_model_file(str(p)): + if Engine.is_pytorch_model_path(p): self._settings.setValue("dlc/last_model_path", str(p)) - elif p.is_dir() and self._looks_like_tf_model_dir(p): + elif p.parent.is_dir() and Engine.is_tensorflow_model_dir_path(p.parent): self._settings.setValue("dlc/last_model_path", str(p)) + # elif p.is_dir() and Engine.is_tensorflow_model_dir_path(p): + # self._settings.setValue("dlc/last_model_path", str(p)) except Exception: logger.debug("Failed to save model path: %s", path, exc_info=True) @@ -204,9 +206,9 @@ def resolve(self, config_path: str | None) -> str: if cfg: try: p = Path(cfg) - if p.is_file() and is_model_file(cfg): + if p.is_file() and Engine.is_pytorch_model_path(p): return cfg - if p.is_dir() and self._looks_like_tf_model_dir(p): + if p.is_dir() and Engine.is_tensorflow_model_dir_path(p): return cfg except Exception: logger.debug("Config path not usable: %s", cfg) diff --git a/dlclivegui/utils/utils.py b/dlclivegui/utils/utils.py index 3d3a4ff..6af003d 100644 --- a/dlclivegui/utils/utils.py +++ b/dlclivegui/utils/utils.py @@ -8,18 +8,9 @@ from datetime import datetime from pathlib import Path -SUPPORTED_MODELS = [".pt", ".pth", ".pb"] _INVALID_CHARS = re.compile(r"[^A-Za-z0-9._-]+") -def is_model_file(file_path: Path | str) -> bool: - if not isinstance(file_path, Path): - file_path = Path(file_path) - if not file_path.is_file(): - return False - return file_path.suffix.lower() in SUPPORTED_MODELS - - def sanitize_name(name: str, *, fallback: str = "session") -> str: """Make a user-provided string safe for filesystem paths.""" name = (name or "").strip() diff --git a/tests/gui/camera_config/test_cam_dialog_e2e.py b/tests/gui/camera_config/test_cam_dialog_e2e.py index 2b1a2a3..49867b8 100644 --- a/tests/gui/camera_config/test_cam_dialog_e2e.py +++ b/tests/gui/camera_config/test_cam_dialog_e2e.py @@ -304,11 +304,13 @@ def slow_detect(backend, max_devices=10, should_cancel=None, progress_cb=None, * qtbot.mouseClick(dialog.scan_cancel_btn, Qt.LeftButton) + # scan_finished = UI stable, not necessarily worker fully stopped / controls unlocked with qtbot.waitSignal(dialog.scan_finished, timeout=3000): pass - assert dialog.refresh_btn.isEnabled() - assert dialog.backend_combo.isEnabled() + # Wait until scan controls are unlocked (worker finished) + qtbot.waitUntil(lambda: dialog.refresh_btn.isEnabled(), timeout=3000) + qtbot.waitUntil(lambda: dialog.backend_combo.isEnabled(), timeout=3000) @pytest.mark.gui diff --git a/tests/utils/test_settings_store.py b/tests/utils/test_settings_store.py index 01ddc5f..7eba56a 100644 --- a/tests/utils/test_settings_store.py +++ b/tests/utils/test_settings_store.py @@ -235,19 +235,21 @@ def test_model_path_store_resolve_prefers_config_path_when_valid(tmp_path: Path) assert mps.resolve(str(model)) == str(model) -def test_model_path_store_resolve_falls_back_to_persisted(tmp_path: Path): +def test_model_path_store_resolve_falls_back_to_persisted_tf_dir(tmp_path: Path): settings = InMemoryQSettings() mps = store.ModelPathStore(settings=settings) - persisted = tmp_path / "persisted.pb" - persisted.write_text("x") - settings.setValue("dlc/last_model_path", str(persisted)) + tf_dir = tmp_path / "tf_model" + tf_dir.mkdir() + (tf_dir / "pose_cfg.yaml").write_text("cfg: 1\n") + (tf_dir / "graph.pb").write_text("pb") + + settings.setValue("dlc/last_model_path", str(tf_dir / "graph.pb")) - # invalid config path bad = tmp_path / "notamodel.onnx" bad.write_text("x") - assert mps.resolve(str(bad)) == str(persisted) + assert mps.resolve(str(bad)) == str(tf_dir / "graph.pb") def test_model_path_store_resolve_returns_empty_when_nothing_valid(tmp_path: Path): diff --git a/tests/utils/test_utils.py b/tests/utils/test_utils.py index 70dd628..dc3a491 100644 --- a/tests/utils/test_utils.py +++ b/tests/utils/test_utils.py @@ -1,41 +1,138 @@ +from __future__ import annotations + from pathlib import Path import pytest import dlclivegui.utils.utils as u +from dlclivegui.temp.engine import Engine # e.g. dlclivegui/utils/engine.py pytestmark = pytest.mark.unit +# NOTE @C-Achard: These tests are currently in test_utils.py for convenience, +# but we may want to use dlclive.Engine directly +# and possibly move these tests to dlclive's test suite # ----------------------------- -# is_model_file +# Engine.from_model_type # ----------------------------- -@pytest.mark.unit -def test_is_model_file_true_for_supported_extensions(tmp_path: Path): - for ext in [".pt", ".pth", ".pb"]: - p = tmp_path / f"model{ext}" - p.write_text("x") - assert u.is_model_file(p) is True - assert u.is_model_file(str(p)) is True # also accepts str +@pytest.mark.parametrize( + "inp, expected", + [ + ("pytorch", Engine.PYTORCH), + ("PYTORCH", Engine.PYTORCH), + ("tensorflow", Engine.TENSORFLOW), + ("TensorFlow", Engine.TENSORFLOW), + ("base", Engine.TENSORFLOW), + ("tensorrt", Engine.TENSORFLOW), + ("lite", Engine.TENSORFLOW), + ], +) +def test_engine_from_model_type(inp: str, expected: Engine): + assert Engine.from_model_type(inp) == expected + + +def test_engine_from_model_type_unknown(): + with pytest.raises(ValueError): + Engine.from_model_type("onnx") - # case-insensitive - p2 = tmp_path / "MODEL.PT" - p2.write_text("x") - assert u.is_model_file(p2) is True +# ----------------------------- +# Engine.is_pytorch_model_path +# ----------------------------- +@pytest.mark.parametrize("ext", [".pt", ".pth"]) +def test_engine_is_pytorch_model_path_true(tmp_path: Path, ext: str): + p = tmp_path / f"model{ext}" + p.write_text("x") + assert Engine.is_pytorch_model_path(p) is True + assert Engine.is_pytorch_model_path(str(p)) is True -@pytest.mark.unit -def test_is_model_file_false_for_missing_or_dir(tmp_path: Path): - missing = tmp_path / "missing.pt" - assert u.is_model_file(missing) is False +def test_engine_is_pytorch_model_path_false_for_missing(tmp_path: Path): + p = tmp_path / "missing.pt" + assert Engine.is_pytorch_model_path(p) is False + + +def test_engine_is_pytorch_model_path_false_for_dir(tmp_path: Path): d = tmp_path / "model.pt" d.mkdir() - assert u.is_model_file(d) is False + assert Engine.is_pytorch_model_path(d) is False + + +def test_engine_is_pytorch_model_path_case_insensitive(tmp_path: Path): + # only include if you applied the .lower() patch + p = tmp_path / "MODEL.PT" + p.write_text("x") + assert Engine.is_pytorch_model_path(p) is True + + +# ----------------------------- +# Engine.is_tensorflow_model_dir_path +# ----------------------------- +def _make_tf_dir(tmp_path: Path, *, with_cfg: bool = True, with_pb: bool = True, pb_name: str = "graph.pb") -> Path: + d = tmp_path / "tf_model" + d.mkdir() + if with_cfg: + (d / "pose_cfg.yaml").write_text("cfg: 1\n") + if with_pb: + (d / pb_name).write_text("pbdata") + return d + + +def test_engine_is_tensorflow_model_dir_path_true(tmp_path: Path): + d = _make_tf_dir(tmp_path, with_cfg=True, with_pb=True) + assert Engine.is_tensorflow_model_dir_path(d) is True + assert Engine.is_tensorflow_model_dir_path(str(d)) is True + + +def test_engine_is_tensorflow_model_dir_path_false_missing_cfg(tmp_path: Path): + d = _make_tf_dir(tmp_path, with_cfg=False, with_pb=True) + assert Engine.is_tensorflow_model_dir_path(d) is False + + +def test_engine_is_tensorflow_model_dir_path_false_missing_pb(tmp_path: Path): + d = _make_tf_dir(tmp_path, with_cfg=True, with_pb=False) + assert Engine.is_tensorflow_model_dir_path(d) is False + + +def test_engine_is_tensorflow_model_dir_path_case_insensitive_pb(tmp_path: Path): + # only include if you applied the .lower() patch for pb suffix + d = _make_tf_dir(tmp_path, with_cfg=True, with_pb=True, pb_name="GRAPH.PB") + assert Engine.is_tensorflow_model_dir_path(d) is True + + +# ----------------------------- +# Engine.from_model_path +# ----------------------------- +def test_engine_from_model_path_missing_raises(tmp_path: Path): + missing = tmp_path / "does_not_exist.pt" + with pytest.raises(FileNotFoundError): + Engine.from_model_path(missing) + + +def test_engine_from_model_path_pytorch_file(tmp_path: Path): + p = tmp_path / "net.pth" + p.write_text("x") + assert Engine.from_model_path(p) == Engine.PYTORCH + + +def test_engine_from_model_path_tensorflow_dir(tmp_path: Path): + d = _make_tf_dir(tmp_path, with_cfg=True, with_pb=True) + assert Engine.from_model_path(d) == Engine.TENSORFLOW + + +def test_engine_from_model_path_dir_not_tf_raises(tmp_path: Path): + d = tmp_path / "some_dir" + d.mkdir() + with pytest.raises(ValueError): + Engine.from_model_path(d) + - bad = tmp_path / "model.onnx" - bad.write_text("x") - assert u.is_model_file(bad) is False +def test_engine_from_model_path_file_not_pytorch_raises(tmp_path: Path): + p = tmp_path / "model.pb" + p.write_text("x") # PB file alone is not a TF dir + with pytest.raises(ValueError): + Engine.from_model_path(p) # ----------------------------- From 1f73c8935ac75120246ef4c508ca87bdd6d7f49c Mon Sep 17 00:00:00 2001 From: Cyril Achard Date: Mon, 23 Feb 2026 15:04:25 +0100 Subject: [PATCH 49/60] Ignore stale scan worker signals Add sender() checks and debug logs to scan-related handlers so signals from old/stale scan workers are ignored. Update _is_scan_running to also consider the worker's isRunning() state. Only call _finish_scan("cancel") when there is no active worker to avoid prematurely finishing cancellation. These changes reduce race conditions when restarting/canceling camera scans and ensure UI reflects the current worker's state. --- .../gui/camera_config/camera_config_dialog.py | 20 +++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/dlclivegui/gui/camera_config/camera_config_dialog.py b/dlclivegui/gui/camera_config/camera_config_dialog.py index 7fb2f99..5f2caff 100644 --- a/dlclivegui/gui/camera_config/camera_config_dialog.py +++ b/dlclivegui/gui/camera_config/camera_config_dialog.py @@ -457,7 +457,10 @@ def _on_backend_changed(self, _index: int) -> None: self._refresh_available_cameras() def _is_scan_running(self) -> bool: - return self._scan_state in (CameraScanState.RUNNING, CameraScanState.CANCELING) + if self._scan_state in (CameraScanState.RUNNING, CameraScanState.CANCELING): + return True + w = self._scan_worker + return bool(w and w.isRunning()) def _set_scan_state(self, state: CameraScanState, message: str | None = None) -> None: """Single source of truth for scan-related UI controls.""" @@ -539,11 +542,17 @@ def _refresh_available_cameras(self) -> None: w.start() def _on_scan_progress(self, msg: str) -> None: + if self.sender() is not self._scan_worker: + LOGGER.debug("[Scan] Ignoring progress from old worker: %s", msg) + return if self._scan_state not in (CameraScanState.RUNNING, CameraScanState.CANCELING): return self._show_scan_overlay(msg or "Discovering cameras…") def _on_scan_result(self, cams: list) -> None: + if self.sender() is not self._scan_worker: + LOGGER.debug("[Scan] Ignoring result from old worker: %d cameras", len(cams) if cams else 0) + return if self._scan_state not in (CameraScanState.RUNNING, CameraScanState.CANCELING): return @@ -566,6 +575,9 @@ def _on_scan_result(self, cams: list) -> None: self._finish_scan("result") def _on_scan_error(self, msg: str) -> None: + if self.sender() is not self._scan_worker: + LOGGER.debug("[Scan] Ignoring error from old worker: %s", msg) + return if self._scan_state not in (CameraScanState.RUNNING, CameraScanState.CANCELING): return @@ -598,9 +610,13 @@ def request_scan_cancel(self) -> None: placeholder.setFlags(Qt.ItemIsEnabled) self.available_cameras_list.addItem(placeholder) - self._finish_scan("cancel") + if w is None or not w.isRunning(): + self._finish_scan("cancel") def _on_scan_canceled(self) -> None: + if self.sender() is not self._scan_worker: + LOGGER.debug("[Scan] Ignoring canceled signal from old worker.") + return self._set_scan_state(CameraScanState.CANCELING, message="Finalizing cancellation…") # If cancel is requested without clicking cancel (e.g., dialog closing), ensure UI finishes if self._scan_state in (CameraScanState.RUNNING, CameraScanState.CANCELING): From 0b47bc4798669556d878ecd75f3a117a41d571d9 Mon Sep 17 00:00:00 2001 From: Cyril Achard Date: Mon, 23 Feb 2026 15:04:58 +0100 Subject: [PATCH 50/60] Update testing-ci.yml --- .github/workflows/testing-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/testing-ci.yml b/.github/workflows/testing-ci.yml index 874d4cf..2fa4c5b 100644 --- a/.github/workflows/testing-ci.yml +++ b/.github/workflows/testing-ci.yml @@ -69,7 +69,7 @@ jobs: echo '```' >> "$GITHUB_STEP_SUMMARY" - name: Upload coverage to Codecov - if: github.event_name == 'push' || (github.event_name == 'pull_request' && github.base_ref == 'main') + if: github.event_name == 'pull_request' && (github.base_ref == 'main' || github.base_ref == 'master') uses: codecov/codecov-action@v5 with: files: ./coverage.xml From 1cf1945946006208362af63cc4129301f8750016 Mon Sep 17 00:00:00 2001 From: Cyril Achard Date: Mon, 23 Feb 2026 15:50:29 +0100 Subject: [PATCH 51/60] Update engine.py --- dlclivegui/temp/engine.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dlclivegui/temp/engine.py b/dlclivegui/temp/engine.py index 9532b33..a6bb225 100644 --- a/dlclivegui/temp/engine.py +++ b/dlclivegui/temp/engine.py @@ -21,7 +21,7 @@ def is_tensorflow_model_dir_path(model_path: str | Path) -> bool: if not path.is_dir(): return False has_cfg = (path / "pose_cfg.yaml").is_file() - has_pb = any(p.suffix.lower() == ".pb" for p in path.glob("*.pb")) + has_pb = any(p.is_file() and p.suffix.lower() == ".pb" for p in path.iterdir()) return has_cfg and has_pb @classmethod From 6952b68a7f05863c5d3e72a873f0762cbb277915 Mon Sep 17 00:00:00 2001 From: Cyril Achard Date: Mon, 23 Feb 2026 15:55:59 +0100 Subject: [PATCH 52/60] Update __init__.py --- dlclivegui/temp/__init__.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/dlclivegui/temp/__init__.py b/dlclivegui/temp/__init__.py index bdc8466..d27f406 100644 --- a/dlclivegui/temp/__init__.py +++ b/dlclivegui/temp/__init__.py @@ -1,3 +1,11 @@ +""" +This dlclivegui.temp package is a temporary location for code +that is needed but duplicated from dlclive or other packages, +and are not yet released in the main dlclive or other packages. +This is a strictly temporary location and should be removed +as soon as the code is released in the main dlclive or other packages. +""" + from .engine import Engine # type: ignore __all__ = ["Engine"] From 559c77585fabdee7c15652a63478203c747ec420 Mon Sep 17 00:00:00 2001 From: Cyril Achard Date: Mon, 23 Feb 2026 16:41:02 +0100 Subject: [PATCH 53/60] Import Engine from dlclivegui.temp in tests Update tests/utils/test_utils.py to import Engine from dlclivegui.temp instead of dlclivegui.temp.engine. Aligns the test import with the package re-export or module relocation so tests reference the correct top-level import. --- tests/utils/test_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/utils/test_utils.py b/tests/utils/test_utils.py index dc3a491..ebbc954 100644 --- a/tests/utils/test_utils.py +++ b/tests/utils/test_utils.py @@ -5,7 +5,7 @@ import pytest import dlclivegui.utils.utils as u -from dlclivegui.temp.engine import Engine # e.g. dlclivegui/utils/engine.py +from dlclivegui.temp import Engine pytestmark = pytest.mark.unit From 025a18e6f6e28069faf13df3878d5eac4ea65fd4 Mon Sep 17 00:00:00 2001 From: Cyril Achard Date: Tue, 24 Feb 2026 16:41:29 +0100 Subject: [PATCH 54/60] Pass file path for non-.pb model checks Ensure model_check_path is set for non-.pb selections before calling DLCLiveProcessor.get_model_backend. .pb files still use the parent directory (TensorFlow expectation); other file types now pass the file path itself to avoid using an undefined or incorrect path. --- dlclivegui/gui/main_window.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/dlclivegui/gui/main_window.py b/dlclivegui/gui/main_window.py index a32e393..380eda0 100644 --- a/dlclivegui/gui/main_window.py +++ b/dlclivegui/gui/main_window.py @@ -1007,6 +1007,8 @@ def _action_browse_model(self) -> None: if file_path.suffix == ".pb": # For TensorFlow, DLCLive expects a directory, so we pass the parent directory for validation model_check_path = file_path.parent + else: + model_check_path = file_path DLCLiveProcessor.get_model_backend(str(model_check_path)) except FileNotFoundError as e: QMessageBox.warning(self, "Model selection error", str(e)) From 735b9a0859a4b912220252990042119d65d78ff0 Mon Sep 17 00:00:00 2001 From: Cyril Achard Date: Tue, 24 Feb 2026 16:51:26 +0100 Subject: [PATCH 55/60] Add ModelType Literal and apply to model_type Introduce a ModelType Literal type ("pytorch" | "tensorflow") and change DLCProcessorSettings.model_type from a plain str to this constrained type, keeping the default as "pytorch". This ensures pydantic validation enforces allowed model backends. --- dlclivegui/config.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/dlclivegui/config.py b/dlclivegui/config.py index a93b710..6d9e1de 100644 --- a/dlclivegui/config.py +++ b/dlclivegui/config.py @@ -11,6 +11,7 @@ Rotation = Literal[0, 90, 180, 270] TileLayout = Literal["auto", "2x2", "1x4", "4x1"] Precision = Literal["FP32", "FP16"] +ModelType = Literal["pytorch", "tensorflow"] class CameraSettings(BaseModel): @@ -240,7 +241,7 @@ class DLCProcessorSettings(BaseModel): resize: float = Field(default=1.0, gt=0) precision: Precision = "FP32" additional_options: dict[str, Any] = Field(default_factory=dict) - model_type: str = "pytorch" + model_type: ModelType = "pytorch" single_animal: bool = True @field_validator("dynamic", mode="before") From af06d09736d9452f1d2e1f7bf4919405dae1d125 Mon Sep 17 00:00:00 2001 From: Cyril Achard Date: Tue, 24 Feb 2026 17:06:24 +0100 Subject: [PATCH 56/60] Move package version to pyproject.toml Set a static version (2.0.0rc1) in pyproject.toml and remove the dynamic setuptools configuration that read dlclivegui.__version__. Also remove the placeholder __version__ from dlclivegui/__init__.py so version metadata is centralized in pyproject.toml. --- dlclivegui/__init__.py | 1 - pyproject.toml | 5 +---- 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/dlclivegui/__init__.py b/dlclivegui/__init__.py index a64da4b..60e5b29 100644 --- a/dlclivegui/__init__.py +++ b/dlclivegui/__init__.py @@ -17,4 +17,3 @@ "RecordingSettings", "main", ] -__version__ = "2.0.0rc0" # PLACEHOLDER diff --git a/pyproject.toml b/pyproject.toml index 3cc05c6..bb84eac 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,6 +4,7 @@ requires = [ "setuptools>=68" ] [project] name = "deeplabcut-live-gui" +version = "2.0.0rc1" description = "PySide6-based GUI to run real time pose estimation experiments with DeepLabCut" readme = "README.md" keywords = [ "deep learning", "deeplabcut", "gui", "pose estimation", "real-time" ] @@ -21,7 +22,6 @@ classifiers = [ "Programming Language :: Python :: 3.12", "Topic :: Scientific/Engineering :: Artificial Intelligence", ] -dynamic = [ "version" ] # version is set in dlclivegui/__init__.py dependencies = [ "cv2-enumerate-cameras", "deeplabcut-live==1.1", @@ -74,9 +74,6 @@ Documentation = "https://github.com/DeepLabCut/DeepLabCut-live-GUI" # FIXME @C- Homepage = "https://github.com/DeepLabCut/DeepLabCut-live-GUI" Repository = "https://github.com/DeepLabCut/DeepLabCut-live-GUI" -[tool.setuptools.dynamic] -version = { attr = "dlclivegui.__version__" } - # [tool.setuptools] # include-package-data = true [tool.setuptools.package-data] From 873ad07b5f849cc5597a87066045c917e591c79b Mon Sep 17 00:00:00 2001 From: Cyril Achard Date: Wed, 25 Feb 2026 14:29:33 +0100 Subject: [PATCH 57/60] Add cti_files_source marker and fallback logic Introduce a cti_files_source marker (auto/user) and wiring to track whether CTI file lists were user-specified or auto-discovered. Treat legacy top-level properties.cti_file(s) as strict user overrides; treat properties.gentl.cti_file(s) as either an auto-cache (falls back to discovery if stale) or a user override depending on the marker. Persist the resolved source back into the namespace when resolving CTIs, and update harvester-selection/rebind logic to fall back to discovery when auto-cached CTIs are stale while still raising for strict user overrides. Also add a small import and internal field (_cti_files_source_used) to track the chosen source, plus logging when falling back from a stale auto-cache. --- dlclivegui/cameras/backends/gentl_backend.py | 146 ++++++++++++++----- 1 file changed, 106 insertions(+), 40 deletions(-) diff --git a/dlclivegui/cameras/backends/gentl_backend.py b/dlclivegui/cameras/backends/gentl_backend.py index ba08b6a..123a9eb 100644 --- a/dlclivegui/cameras/backends/gentl_backend.py +++ b/dlclivegui/cameras/backends/gentl_backend.py @@ -39,6 +39,11 @@ class GenTLCameraBackend(CameraBackend): r"C:\\Program Files\\The Imaging Source Europe GmbH\\TIS Camera SDK\\bin\\win64_x64\\*.cti", r"C:\\Program Files (x86)\\The Imaging Source Europe GmbH\\TIS Grabber\\bin\\win64_x64\\*.cti", ) + # Source marker stored in properties["gentl"]["cti_files_source"] + # auto : persisted by auto-discovery (env vars, patterns, etc.). Cache, may be stale, re-discover if missing. + # user : explicitly set by user via properties.gentl.cti_file(s). Cache, strict raise if missing. + _CTI_FILES_SOURCE_AUTO: ClassVar[str] = "auto" + _CTI_FILES_SOURCE_USER: ClassVar[str] = "user" def __init__(self, settings): super().__init__(settings) @@ -129,6 +134,8 @@ def __init__(self, settings): self._acquirer = None self._device_label: str | None = None + self._cti_files_source_used: str | None = None + @property def actual_resolution(self) -> tuple[int, int] | None: if self._actual_width and self._actual_height: @@ -191,54 +198,95 @@ def _resolve_cti_files_for_settings(self) -> list[str]: """ Resolve CTI files to load. - Policy: - - If the user explicitly provides ctis (cti_files / cti_file), use only those. - - Otherwise, discover all CTIs (env vars + configured patterns/dirs) and return all. - - Never raise just because multiple CTIs exist. - - Raise only when none are found. + Option B policy (source marker + fallback): + - User override (properties.gentl.cti_file/cti_files OR legacy properties.cti_file/cti_files): + * strict: must exist, otherwise raise + * source = "user" + - Auto-persisted cache (properties.gentl.cti_files_source == "auto"): + * try persisted ctis first + * if stale/missing, fall back to discovery + * source = "auto" + - Default: discovery (env + configured patterns/dirs) => source = "auto" + + Never raise just because multiple CTIs exist. + Raise only when none are found (after allowed fallback). """ props = self.settings.properties if isinstance(self.settings.properties, dict) else {} ns = props.get(self.OPTIONS_KEY, {}) if not isinstance(ns, dict): ns = {} + # Read source marker + source = ns.get("cti_files_source") + source = str(source).strip().lower() if source is not None else None + # Explicit CTIs (namespace first, then legacy top-level) ns_cti_files = ns.get("cti_files") ns_cti_file = ns.get("cti_file") legacy_cti_files = props.get("cti_files") legacy_cti_file = props.get("cti_file") - # 1) If user provided explicit list/file in namespace, use that only - if ns_cti_files or ns_cti_file: + # ------------------------------------------------------------ + # 1) Legacy explicit CTIs: always treat as user override (strict) + # ------------------------------------------------------------ + if legacy_cti_files or legacy_cti_file: + self._cti_files_source_used = self._CTI_FILES_SOURCE_USER + candidates, diag = cti_finder.discover_cti_files( - cti_file=str(ns_cti_file) if ns_cti_file else None, - cti_files=cti_finder.cti_files_as_list(ns_cti_files) if ns_cti_files else None, + cti_file=str(legacy_cti_file) if legacy_cti_file else None, + cti_files=cti_finder.cti_files_as_list(legacy_cti_files) if legacy_cti_files else None, include_env=False, must_exist=True, ) if not candidates: raise RuntimeError( - "No valid GenTL producer (.cti) found from properties.gentl.cti_file/cti_files.\n\n" + "No valid GenTL producer (.cti) found from properties.cti_file/cti_files.\n\n" f"Discovery details:\n{diag.summarize()}" ) return list(candidates) - # 2) If user provided explicit list/file in legacy top-level, use that only - if legacy_cti_files or legacy_cti_file: + # ------------------------------------------------------------------------ + # 2) Namespace explicit CTIs: behavior depends on cti_files_source marker + # - source=="auto": treat as cache, stale => fallback to discovery + # - otherwise: strict user override + # ------------------------------------------------------------------------ + if ns_cti_files or ns_cti_file: + is_auto_cache = source == self._CTI_FILES_SOURCE_AUTO + + # Default to "user" if the marker is missing/unknown. + self._cti_files_source_used = self._CTI_FILES_SOURCE_AUTO if is_auto_cache else self._CTI_FILES_SOURCE_USER + candidates, diag = cti_finder.discover_cti_files( - cti_file=str(legacy_cti_file) if legacy_cti_file else None, - cti_files=cti_finder.cti_files_as_list(legacy_cti_files) if legacy_cti_files else None, + cti_file=str(ns_cti_file) if ns_cti_file else None, + cti_files=cti_finder.cti_files_as_list(ns_cti_files) if ns_cti_files else None, include_env=False, must_exist=True, ) - if not candidates: + + if candidates: + return list(candidates) + + # If auto cache is stale, fall back to discovery + if is_auto_cache: + LOG.info( + "Auto-persisted GenTL CTIs appear stale/missing; falling back to discovery. " + "Persisted cti_file=%s cti_files=%s", + ns_cti_file, + ns_cti_files, + ) + # Fall through to discovery (below) + else: + # User override: strict failure raise RuntimeError( - "No valid GenTL producer (.cti) found from properties.cti_file/cti_files.\n\n" + "No valid GenTL producer (.cti) found from properties.gentl.cti_file/cti_files.\n\n" f"Discovery details:\n{diag.summarize()}" ) - return list(candidates) - # 3) Discovery path: find all CTIs from env vars + configured patterns/dirs + # ------------------------------------------------------------ + # 3) Discovery path: env vars + patterns/dirs (source = "auto") + # ------------------------------------------------------------ + self._cti_files_source_used = self._CTI_FILES_SOURCE_AUTO + search_paths = ns.get("cti_search_paths", props.get("cti_search_paths")) extra_dirs = ns.get("cti_dirs", props.get("cti_dirs")) @@ -261,7 +309,6 @@ def _resolve_cti_files_for_settings(self) -> list[str]: f"Discovery details:\n{diag.summarize()}" ) - # Default: try to load ALL discovered producers return list(candidates) @classmethod @@ -346,6 +393,9 @@ def open(self) -> None: # Resolve CTIs (may return many). This no longer raises just because there are multiple. cti_files = self._resolve_cti_files_for_settings() + ns["cti_files_source"] = ( + self._cti_files_source_used or ns.get("cti_files_source") or self._CTI_FILES_SOURCE_AUTO + ) self._harvester = Harvester() @@ -377,7 +427,8 @@ def open(self) -> None: "No GenTL producer (.cti) could be loaded.\n\n" f"Resolved CTIs: {cti_files}\n" f"Failures: {failed}\n" - "Fix: remove/repair incompatible producers or set properties.gentl.cti_file to a known working producer." + "Fix: remove/repair incompatible producers or " + "set properties.gentl.cti_file to a known working producer." ) # Update device list after loading producers @@ -755,7 +806,9 @@ def rebind_settings(cls, settings): correct current index (and serial_number if available). Strategy: - - If ctis were previously persisted (cti_files/cti_file), prefer those. + - If CTIs were persisted: + * if source == "auto" and they are stale -> fall back to discovery + * otherwise use them (best stability) - Otherwise, fall back to env-var + pattern discovery (best-effort). """ if Harvester is None: @@ -770,38 +823,52 @@ def rebind_settings(cls, settings): if not target_id: return settings + source = ns.get("cti_files_source") + source = str(source).strip().lower() if source is not None else None + is_auto_cache = source == cls._CTI_FILES_SOURCE_AUTO + harvester = None try: - # Prefer persisted CTIs for stability if present explicit_files = ns.get("cti_files") or props.get("cti_files") explicit_file = ns.get("cti_file") or props.get("cti_file") if explicit_files or explicit_file: - candidates, diag = cti_finder.discover_cti_files( + candidates, _diag = cti_finder.discover_cti_files( cti_file=explicit_file, cti_files=cti_finder.cti_files_as_list(explicit_files), include_env=False, must_exist=True, ) - if not candidates: - return settings - harvester = Harvester() - loaded: list[str] = [] - for cti in candidates: - try: - harvester.add_file(cti) - loaded.append(cti) - except Exception: - continue - - if not loaded: - cls._reset_select_harvester(harvester) + if not candidates and is_auto_cache: + # Auto cache stale -> fallback to discovery + harvester, _loaded, _diag2 = cls._build_harvester_for_discovery(strict_single=False) + if harvester is None: + return settings + elif not candidates: + # User override stale or unknown -> no rebind return settings - - harvester.update() + else: + harvester = Harvester() + loaded: list[str] = [] + for cti in candidates: + try: + harvester.add_file(cti) + loaded.append(cti) + except Exception: + continue + if not loaded: + cls._reset_select_harvester(harvester) + if is_auto_cache: + harvester, _loaded, _diag2 = cls._build_harvester_for_discovery(strict_single=False) + if harvester is None: + return settings + else: + return settings + else: + harvester.update() else: - harvester, loaded, diag = cls._build_harvester_for_discovery(strict_single=False) + harvester, _loaded, _diag = cls._build_harvester_for_discovery(strict_single=False) if harvester is None: return settings @@ -810,7 +877,6 @@ def rebind_settings(cls, settings): return settings target_id_str = str(target_id).strip() - match_index = None match_serial = None From 2510076bf70ce837d236ae302b7f252b4ee7e5c8 Mon Sep 17 00:00:00 2001 From: Cyril Achard Date: Wed, 25 Feb 2026 14:59:35 +0100 Subject: [PATCH 58/60] Improve CTI discovery safety and diagnostics Add preflight checks, pattern validation and safer globbing for GenTL (.cti) discovery and loading. Renamed default CTI pattern constant to _LEGACY_DEFAULT_CTI_PATTERNS (Windows-only comment) and imported Path. Introduced _cti_preflight to detect missing/locked/permissioned files before Harvester.add_file and applied it where CTIs are loaded (skipping and logging problematic entries). Harden gentl_discovery with: redact-able diagnostics.summarize, _validate_glob_pattern, bounded _glob_limited, static-prefix checks, allowed-roots option and new discover_cti_files params (allow_globs, root_globs_allowed, max_glob_hits_per_pattern) to limit expensive scans. Also adjusted Harvester.update failure handling to treat update errors as discovery failures (return empty loaded list) and improved logging messages for discovery/load failures. --- dlclivegui/cameras/backends/gentl_backend.py | 60 ++++++- .../cameras/backends/utils/gentl_discovery.py | 167 +++++++++++++++++- 2 files changed, 212 insertions(+), 15 deletions(-) diff --git a/dlclivegui/cameras/backends/gentl_backend.py b/dlclivegui/cameras/backends/gentl_backend.py index 123a9eb..4e5c456 100644 --- a/dlclivegui/cameras/backends/gentl_backend.py +++ b/dlclivegui/cameras/backends/gentl_backend.py @@ -5,6 +5,7 @@ import logging import time +from pathlib import Path from typing import ClassVar import cv2 @@ -33,7 +34,7 @@ class GenTLCameraBackend(CameraBackend): """Capture frames from GenTL-compatible devices via Harvesters.""" OPTIONS_KEY: ClassVar[str] = "gentl" - _DEFAULT_CTI_PATTERNS: tuple[str, ...] = ( + _LEGACY_DEFAULT_CTI_PATTERNS: tuple[str, ...] = ( # Windows-only, ignored on other platforms r"C:\\Program Files\\The Imaging Source Europe GmbH\\IC4 GenTL Driver for USB3Vision Devices *\\bin\\*.cti", r"C:\\Program Files\\The Imaging Source Europe GmbH\\TIS Grabber\\bin\\win64_x64\\*.cti", r"C:\\Program Files\\The Imaging Source Europe GmbH\\TIS Camera SDK\\bin\\win64_x64\\*.cti", @@ -194,11 +195,33 @@ def get_device_count(cls) -> int: except Exception: pass + @staticmethod + def _cti_preflight(path: str) -> tuple[bool, str | None]: + """ + Best-effort check right before calling Harvester.add_file(). + Still subject to race conditions (e.g. file deleted after this check), + but helps diagnose common issues like missing files or permission errors more gracefully and early. + Returns (ok, reason_if_not_ok). + """ + p = Path(str(path)) + try: + if not p.exists(): + return False, "missing at load time" + if not p.is_file(): + return False, "not a file at load time" + # Optional: try opening for read to detect permission/locking issues early + with p.open("rb"): + pass + return True, None + except PermissionError: + return False, "permission denied at load time" + except OSError as e: + return False, f"os error at load time: {e}" + def _resolve_cti_files_for_settings(self) -> list[str]: """ Resolve CTI files to load. - Option B policy (source marker + fallback): - User override (properties.gentl.cti_file/cti_files OR legacy properties.cti_file/cti_files): * strict: must exist, otherwise raise * source = "user" @@ -208,6 +231,8 @@ def _resolve_cti_files_for_settings(self) -> list[str]: * source = "auto" - Default: discovery (env + configured patterns/dirs) => source = "auto" + NOTE : legacy properties.cti_file(s) always take strict precedence as user override if present, + even if source marker says "auto". Never raise just because multiple CTIs exist. Raise only when none are found (after allowed fallback). """ @@ -328,7 +353,7 @@ def _build_harvester_for_discovery( candidates, diag = cti_finder.discover_cti_files( include_env=True, - cti_search_paths=list(cls._DEFAULT_CTI_PATTERNS), + cti_search_paths=list(cls._LEGACY_DEFAULT_CTI_PATTERNS), must_exist=True, ) @@ -350,12 +375,18 @@ def _build_harvester_for_discovery( failures: list[tuple[str, str]] = [] for cti in cti_files: + ok, reason = cls._cti_preflight(cti) + if not ok: + failures.append((str(cti), reason or "Check failed")) + LOG.warning("Skipping CTI '%s' during discovery preflight: %s", cti, reason) + continue + try: harvester.add_file(cti) loaded.append(cti) except Exception as exc: - failures.append((cti, str(exc))) - LOG.warning("Failed to load CTI '%s': %s", cti, exc) + failures.append((str(cti), str(exc))) + LOG.warning("Failed to load CTI '%s' during discovery: %s", cti, exc) if not loaded: try: @@ -367,12 +398,19 @@ def _build_harvester_for_discovery( try: harvester.update() except Exception as exc: - LOG.warning("Harvester.update() failed during discovery: %s", exc) + LOG.error( + "Harvester.update() failed during discovery: %s " + "Device list not usable, treating as discovery failure." + "CTIs loaded before failure : %s", + exc, + loaded, + ) try: harvester.reset() except Exception: pass - return None, loaded, diag + # Update failure + return None, [], diag return harvester, loaded, diag @@ -403,11 +441,17 @@ def open(self) -> None: failed: list[tuple[str, str]] = [] for cti in cti_files: + ok, reason = self._cti_preflight(cti) + if not ok: + failed.append((str(cti), reason or "preflight failed")) + LOG.warning("Skipping CTI '%s': %s", cti, reason) + continue + try: self._harvester.add_file(cti) loaded.append(cti) except Exception as exc: - failed.append((cti, str(exc))) + failed.append((str(cti), str(exc))) LOG.warning("Failed to load CTI '%s': %s", cti, exc) # Persist diagnostics for UI / debugging diff --git a/dlclivegui/cameras/backends/utils/gentl_discovery.py b/dlclivegui/cameras/backends/utils/gentl_discovery.py index a8c2006..48a6daa 100644 --- a/dlclivegui/cameras/backends/utils/gentl_discovery.py +++ b/dlclivegui/cameras/backends/utils/gentl_discovery.py @@ -29,15 +29,18 @@ class CTIDiscoveryDiagnostics: candidates: list[str] = field(default_factory=list) rejected: list[tuple[str, str]] = field(default_factory=list) # (path, reason) - def summarize(self) -> str: + def summarize(self, redact_env: bool = True) -> str: lines = [] if self.explicit_files: lines.append(f"Explicit CTI file(s): {self.explicit_files}") if self.glob_patterns: lines.append(f"CTI glob pattern(s): {self.glob_patterns}") if self.env_vars_used: - # Keep raw env var values in summary; you can redact if needed - lines.append(f"Env vars: {self.env_vars_used}") + if redact_env: + redacted_env = {k: ("" if v else "") for k, v in self.env_vars_used.items()} + lines.append(f"Env vars used: {redacted_env}") + else: + lines.append(f"Env vars used: {self.env_vars_used}") if self.env_paths_expanded: lines.append(f"Env-derived path entries: {self.env_paths_expanded}") if self.extra_dirs: @@ -66,7 +69,7 @@ def _normalize_path(p: str) -> str: """ pp = Path(os.path.expandvars(os.path.expanduser(p))) try: - # resolve(False) avoids raising if parts don't exist (py>=3.9) + # resolve(strict=False) will resolve as much as possible without raising if the path doesn't exist return str(pp.resolve(strict=False)) except Exception: return str(pp.absolute()) @@ -98,6 +101,136 @@ def _split_env_paths(raw: str) -> list[str]: return out +_GLOB_META_CHARS = set("*?[") + + +def _pattern_has_glob(s: str) -> bool: + return any(ch in s for ch in _GLOB_META_CHARS) + + +def _pattern_static_prefix(pattern: str) -> str: + """ + Return the substring up to the first glob metacharacter (* ? [). + This is used as a "base path" to constrain globbing. + """ + for i, ch in enumerate(pattern): + if ch in _GLOB_META_CHARS: + return pattern[:i] + return pattern + + +def _is_path_within(child: Path, parent: Path) -> bool: + """ + Cross-version safe "is_relative_to" implementation. + """ + try: + child.relative_to(parent) + return True + except Exception: + return False + + +def _validate_glob_pattern( + pattern: str, + *, + allowed_roots: Sequence[str] | None = None, + require_cti_suffix: bool = True, +) -> tuple[bool, str | None]: + """ + Validate user-supplied glob patterns to reduce filesystem probing risk. + + Rules (conservative but practical): + - Must expand (~ and env vars) into an absolute-ish location (prefix must exist as a path parent) + - Must not include '..' path traversal segments + - Must have a non-trivial static prefix (not empty / not root-only like '/' or 'C:\\') + - Optionally restrict to allowed roots (directories) + - Optionally require that the pattern looks like it targets .cti files + """ + if not pattern or not str(pattern).strip(): + return False, "empty glob pattern" + + expanded = os.path.expandvars(os.path.expanduser(pattern)).strip() + + # Basic traversal guard + parts = Path(expanded).parts + if any(p == ".." for p in parts): + return False, "glob pattern contains '..' traversal" + + if require_cti_suffix: + # Encourage patterns that clearly target CTIs, e.g. '*.cti' or 'foo*.cti' + lower = expanded.lower() + if ".cti" not in lower: + return False, "glob pattern does not target .cti files" + + # Compute static prefix up to first glob meta-char + prefix = _pattern_static_prefix(expanded).strip() + if not prefix: + return False, "glob pattern has no static base path" + + prefix_path = Path(prefix) + + # If prefix is a file-like thing, use its parent as base; otherwise use itself. + # Example: "C:\\dir\\*.cti" -> base = "C:\\dir" + base = prefix_path.parent if prefix_path.suffix else prefix_path + + # Prevent overly broad patterns like "/" or "C:\\" + try: + resolved_base = base.resolve(strict=False) + except Exception: + resolved_base = base + + # If base is a drive root or filesystem root, reject + # - POSIX: "/" -> parent == itself + # - Windows: "C:\\" -> parent often == itself + try: + if resolved_base == resolved_base.parent: + return False, "glob pattern base is filesystem root (too broad)" + except Exception: + # If we can't determine, err on conservative side + return False, "glob pattern base could not be validated" + + # Optional allowlist enforcement + if allowed_roots: + ok = False + for root in allowed_roots: + try: + r = Path(_normalize_path(root)) + except Exception: + r = Path(root) + try: + r_resolved = r.resolve(strict=False) + except Exception: + r_resolved = r + + try: + b_resolved = resolved_base.resolve(strict=False) + except Exception: + b_resolved = resolved_base + + if _is_path_within(b_resolved, r_resolved): + ok = True + break + if not ok: + return False, "glob pattern base is outside allowed roots" + + return True, None + + +def _glob_limited(pattern: str, *, max_hits: int = 200) -> list[str]: + """ + Iterate matches with an upper bound to prevent expensive scans. + Uses iglob to avoid materializing huge lists. + """ + out: list[str] = [] + # Note: recursive globbing via "**" typically requires recursive=True. + # We intentionally keep recursive off here to reduce scanning. + for hit in glob.iglob(pattern, recursive=False): + out.append(hit) + if len(out) >= max_hits: + break + return out + + def discover_cti_files( *, cti_file: str | None = None, @@ -109,6 +242,9 @@ def discover_cti_files( recursive_env_search: bool = False, recursive_extra_search: bool = False, must_exist: bool = True, + allow_globs: bool = True, + root_globs_allowed: Sequence[str] | None = None, + max_glob_hits_per_pattern: int = 200, ) -> tuple[list[str], CTIDiscoveryDiagnostics]: """ Discover candidate GenTL producer (.cti) files from multiple sources. @@ -117,7 +253,11 @@ def discover_cti_files( (candidates, diagnostics) Notes: - - If must_exist=True (recommended), only existing files are returned. + - If must_exist=True (recommended), only existing files are returned at duscovery time. + - Best-effort checks, files may still be missing at load time (e.g. deleted after discovery). + - Callers should handle load-time errors gracefully regardless. + - Glob patterns can enumerate filesystem entries is user-controlled. + Use allow_globs=False to disable globbing and treat patterns as literal paths. - Env vars are parsed as path lists; each entry may be a directory OR a .cti file. """ diag = CTIDiscoveryDiagnostics() @@ -165,9 +305,22 @@ def _add_candidate(path: str, reason_ctx: str) -> None: # Process glob patterns for pat in patterns: - # Normalize only for readability; glob needs pattern semantics, so we expanduser/vars but keep globbing expanded_pat = os.path.expandvars(os.path.expanduser(pat)) - for hit in glob.glob(expanded_pat): + + if not allow_globs: + rejected.append((_normalize_path(expanded_pat), "glob patterns disabled")) + continue + + ok, reason = _validate_glob_pattern( + expanded_pat, + allowed_roots=root_globs_allowed, + require_cti_suffix=True, + ) + if not ok: + rejected.append((_normalize_path(expanded_pat), f"glob pattern rejected: {reason}")) + continue + + for hit in _glob_limited(expanded_pat, max_hits=max_glob_hits_per_pattern): _add_candidate(hit, f"glob:{pat}") # Process env var entries From f8c0999d18116e4bd7943778c8c3921225e01761 Mon Sep 17 00:00:00 2001 From: Cyril Achard Date: Wed, 25 Feb 2026 15:05:30 +0100 Subject: [PATCH 59/60] Centralize env/~ expansion and add dedupe key Introduce a single _expand_user_and_env() helper to consistently expand environment variables and '~', and use it across path normalization and glob validation. Update _normalize_path() to rely on the helper and Path handling, switch file existence checks to use a Path object, and add _dedup_key() to normalize deduplication on case-insensitive filesystems (Windows). Replace scattered os.path.expandvars/os.path.expanduser calls with the new helper for more predictable cross-platform behavior. --- .../cameras/backends/utils/gentl_discovery.py | 38 ++++++++++++++++--- 1 file changed, 32 insertions(+), 6 deletions(-) diff --git a/dlclivegui/cameras/backends/utils/gentl_discovery.py b/dlclivegui/cameras/backends/utils/gentl_discovery.py index 48a6daa..ebce734 100644 --- a/dlclivegui/cameras/backends/utils/gentl_discovery.py +++ b/dlclivegui/cameras/backends/utils/gentl_discovery.py @@ -61,15 +61,35 @@ def cti_files_as_list(value) -> list[str]: return [s] if s else [] +def _expand_user_and_env(value: str) -> str: + """ + Expand environment variables and '~' in a string path/pattern. + pathlib does not expand env vars, so we use os.path.expandvars for that part. + """ + if value is None: + return "" + s = str(value).strip() + if not s: + return "" + # Expand env vars first (e.g., %VAR% / $VAR), then user home (~) + s = os.path.expandvars(s) + try: + s = str(Path(s).expanduser()) + except Exception: + # If expanduser fails for some reason, keep the env-expanded string + pass + return s + + def _normalize_path(p: str) -> str: """ Normalize a filesystem path in a cross-platform way: - expands ~ and environment variables - resolves to absolute where possible (without requiring existence) """ - pp = Path(os.path.expandvars(os.path.expanduser(p))) + expanded = _expand_user_and_env(p) + pp = Path(expanded) try: - # resolve(strict=False) will resolve as much as possible without raising if the path doesn't exist return str(pp.resolve(strict=False)) except Exception: return str(pp.absolute()) @@ -101,6 +121,11 @@ def _split_env_paths(raw: str) -> list[str]: return out +def _dedup_key(path_str: str) -> str: + # Windows filesystem is case-insensitive by default -> normalize key case + return path_str.casefold() if os.name == "nt" else path_str + + _GLOB_META_CHARS = set("*?[") @@ -149,7 +174,7 @@ def _validate_glob_pattern( if not pattern or not str(pattern).strip(): return False, "empty glob pattern" - expanded = os.path.expandvars(os.path.expanduser(pattern)).strip() + expanded = _expand_user_and_env(pattern).strip() # Basic traversal guard parts = Path(expanded).parts @@ -291,7 +316,8 @@ def discover_cti_files( def _add_candidate(path: str, reason_ctx: str) -> None: norm = _normalize_path(path) - if must_exist and not Path(norm).is_file(): + p = Path(norm) + if must_exist and not p.is_file(): rejected.append((norm, f"not a file ({reason_ctx})")) return if not norm.lower().endswith(".cti"): @@ -305,7 +331,7 @@ def _add_candidate(path: str, reason_ctx: str) -> None: # Process glob patterns for pat in patterns: - expanded_pat = os.path.expandvars(os.path.expanduser(pat)) + expanded_pat = _expand_user_and_env(pat) if not allow_globs: rejected.append((_normalize_path(expanded_pat), "glob patterns disabled")) @@ -350,7 +376,7 @@ def _add_candidate(path: str, reason_ctx: str) -> None: seen = set() unique: list[str] = [] for c in candidates: - key = c.lower() if os.name == "nt" else c # Windows case-insensitive + key = _dedup_key(c) if key in seen: continue seen.add(key) From 0d0ed396366c81b642296d0560784b0fe613ed79 Mon Sep 17 00:00:00 2001 From: Cyril Achard Date: Wed, 25 Feb 2026 15:28:02 +0100 Subject: [PATCH 60/60] Fix genTL log message spacing and tidy test Adjust Harvester.update() error log string concatenation in gentl_backend.py to ensure proper spaces between message parts. Also remove an extraneous comment line from the gentl backend test to clean up test output. --- dlclivegui/cameras/backends/gentl_backend.py | 6 +++--- tests/cameras/backends/test_gentl_backend.py | 1 - 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/dlclivegui/cameras/backends/gentl_backend.py b/dlclivegui/cameras/backends/gentl_backend.py index 4e5c456..eb28aea 100644 --- a/dlclivegui/cameras/backends/gentl_backend.py +++ b/dlclivegui/cameras/backends/gentl_backend.py @@ -399,9 +399,9 @@ def _build_harvester_for_discovery( harvester.update() except Exception as exc: LOG.error( - "Harvester.update() failed during discovery: %s " - "Device list not usable, treating as discovery failure." - "CTIs loaded before failure : %s", + "Harvester.update() failed during discovery: %s" + " Device list not usable, treating as discovery failure." + " CTIs loaded before failure : %s", exc, loaded, ) diff --git a/tests/cameras/backends/test_gentl_backend.py b/tests/cameras/backends/test_gentl_backend.py index 8d4094d..ecef1b7 100644 --- a/tests/cameras/backends/test_gentl_backend.py +++ b/tests/cameras/backends/test_gentl_backend.py @@ -708,7 +708,6 @@ def test_open_persists_cti_load_diagnostics_all_success(patch_gentl_sdk, gentl_s ns = settings.properties["gentl"] assert ns["cti_files"] == [str(c1), str(c2)] assert ns["cti_files_loaded"] == [str(c1), str(c2)] - # If dict: assert ns["cti_files_failed"] == [] be.close()