From 10172e69eb6489e9a41e7849ecdf71725678fa65 Mon Sep 17 00:00:00 2001 From: SkalskiP Date: Fri, 30 Jan 2026 14:53:23 +0100 Subject: [PATCH 1/3] Migrate to modern pyproject.toml with uv support - Replace setup.cfg/setup.py with PEP 621 compliant pyproject.toml - Organize dependencies into optional groups (plot, segmentation, jf, burst, dev, all) - Update installation instructions in README - Rename Readme.md to README.md - Remove legacy requirements.txt and minimum_requirements.txt --- Readme.md => README.md | 45 ++++++++++++++++++++++--------- minimum_requirements.txt | 2 -- pyproject.toml | 57 +++++++++++++++++++++++++++++++++++++--- requirements.txt | 10 ------- setup.cfg | 27 ------------------- setup.py | 3 --- 6 files changed, 85 insertions(+), 59 deletions(-) rename Readme.md => README.md (94%) delete mode 100644 minimum_requirements.txt delete mode 100644 requirements.txt delete mode 100644 setup.cfg delete mode 100644 setup.py diff --git a/Readme.md b/README.md similarity index 94% rename from Readme.md rename to README.md index 9273a6f7..c5d6c40f 100644 --- a/Readme.md +++ b/README.md @@ -118,19 +118,38 @@ To convert formats, check out the format specifications defined [here](docs). By default, we would recommend the MOTChallenge format, although any implemented format should work. Note that for many cases you will want to use the argument ```--DO_PREPROC False``` unless you want to run preprocessing to remove distractor objects. -## Requirements - Code tested on Python 3.7. - - - Minimum requirements: numpy, scipy - - For plotting: matplotlib - - For segmentation datasets (KITTI MOTS, MOTS-Challenge, DAVIS, YouTube-VIS): pycocotools - - For DAVIS dataset: Pillow - - For J & F metric: opencv_python, scikit_image - - For simples test-cases for metrics: pytest - -use ```pip3 -r install requirements.txt``` to install all possible requirements. - -use ```pip3 -r install minimum_requirments.txt``` to only install the minimum if you don't need the extra functionality as listed above. +## Installation + +Code tested on Python 3.7. + +### Using uv (recommended) + +```bash +# Install with minimum dependencies (numpy, scipy) +uv pip install trackeval + +# Install with all dependencies +uv pip install "trackeval[all]" + +# Install specific extras +uv pip install "trackeval[plot,segmentation]" +``` + +### Using pip + +```bash +pip install trackeval +pip install "trackeval[all]" +``` + +### Available extras + +- `plot` - matplotlib for plotting results +- `segmentation` - pycocotools, Pillow for segmentation datasets (KITTI MOTS, MOTS-Challenge, DAVIS, YouTube-VIS) +- `jf` - opencv-python, scikit-image for J & F metric +- `burst` - tabulate for BURST benchmark +- `dev` - pytest, tqdm for development and testing +- `all` - all optional dependencies ## Timing analysis diff --git a/minimum_requirements.txt b/minimum_requirements.txt deleted file mode 100644 index b63c322a..00000000 --- a/minimum_requirements.txt +++ /dev/null @@ -1,2 +0,0 @@ -scipy==1.4.1 -numpy==1.18.1 diff --git a/pyproject.toml b/pyproject.toml index 374b58cb..12bf4804 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,55 @@ [build-system] -requires = [ - "setuptools>=42", - "wheel" -] +requires = ["setuptools>=42", "wheel"] build-backend = "setuptools.build_meta" + +[project] +name = "trackeval" +version = "1.0.dev1" +description = "Code for evaluating object tracking" +readme = "README.md" +license = {text = "MIT"} +authors = [ + {name = "Jonathon Luiten, Arne Hoffhues", email = "jonoluiten@gmail.com"} +] +classifiers = [ + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3 :: Only", + "License :: OSI Approved :: MIT License", + "Operating System :: OS Independent", + "Topic :: Scientific/Engineering", +] +requires-python = ">=3.7" +dependencies = [ + "numpy==1.18.1", + "scipy==1.4.1", +] + +[project.optional-dependencies] +plot = [ + "matplotlib==3.2.1", +] +segmentation = [ + "pycocotools==2.0.2", + "Pillow==8.1.2", +] +jf = [ + "opencv-python==4.4.0.46", + "scikit-image==0.16.2", +] +burst = [ + "tabulate", +] +dev = [ + "pytest==6.0.1", + "tqdm==4.64.0", +] +all = [ + "trackeval[plot,segmentation,jf,burst,dev]", +] + +[project.urls] +Homepage = "https://github.com/JonathonLuiten/TrackEval" +"Bug Tracker" = "https://github.com/JonathonLuiten/TrackEval/issues" + +[tool.setuptools.packages.find] +include = ["trackeval*"] diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index 40d2a57c..00000000 --- a/requirements.txt +++ /dev/null @@ -1,10 +0,0 @@ -numpy==1.18.1 -scipy==1.4.1 -pycocotools==2.0.2 -matplotlib==3.2.1 -opencv_python==4.4.0.46 -scikit_image==0.16.2 -pytest==6.0.1 -Pillow==8.1.2 -tqdm==4.64.0 -tabulate diff --git a/setup.cfg b/setup.cfg deleted file mode 100644 index 914f3de2..00000000 --- a/setup.cfg +++ /dev/null @@ -1,27 +0,0 @@ -[metadata] -name = trackeval -version = 1.0.dev1 -author = Jonathon Luiten, Arne Hoffhues -author_email = jonoluiten@gmail.com -description = Code for evaluating object tracking -long_description = file: Readme.md -long_description_content_type = text/markdown -url = https://github.com/JonathonLuiten/TrackEval -project_urls = - Bug Tracker = https://github.com/JonathonLuiten/TrackEval/issues -classifiers = - Programming Language :: Python :: 3 - Programming Language :: Python :: 3 :: Only - License :: OSI Approved :: MIT License - Operating System :: OS Independent - Topic :: Scientific/Engineering -license_files = LICENSE - -[options] -install_requires = - numpy - scipy -packages = find: - -[options.packages.find] -include = trackeval* diff --git a/setup.py b/setup.py deleted file mode 100644 index 60684932..00000000 --- a/setup.py +++ /dev/null @@ -1,3 +0,0 @@ -from setuptools import setup - -setup() From 70a2b290cae9da5a8b6f5c724928e814ba31d307 Mon Sep 17 00:00:00 2001 From: SkalskiP Date: Fri, 30 Jan 2026 15:15:57 +0100 Subject: [PATCH 2/3] Upgrade to numpy 2.0 and Python 3.10+ - Replace deprecated numpy type aliases (np.float -> np.float64, np.int -> np.intp, np.bool -> np.bool_) - Update dependencies: numpy>=2.0.0, scipy>=1.14.0 - Update dev dependencies: pytest>=7.0.0, tqdm>=4.64.0 - Require Python 3.10+ (numpy 2.0 requirement) - Update .gitignore to exclude .venv and .egg-info --- .gitignore | 2 ++ pyproject.toml | 10 +++++----- trackeval/baselines/stp.py | 4 ++-- trackeval/datasets/bdd100k.py | 8 ++++---- trackeval/datasets/burst_helpers/burst_base.py | 10 +++++----- trackeval/datasets/burst_helpers/burst_ow_base.py | 10 +++++----- trackeval/datasets/davis.py | 6 +++--- trackeval/datasets/head_tracking_challenge.py | 8 ++++---- trackeval/datasets/kitti_2d_box.py | 14 +++++++------- trackeval/datasets/kitti_mots.py | 8 ++++---- trackeval/datasets/mot_challenge_2d_box.py | 8 ++++---- trackeval/datasets/mots_challenge.py | 8 ++++---- trackeval/datasets/person_path_22.py | 10 +++++----- trackeval/datasets/rob_mots.py | 10 +++++----- trackeval/datasets/tao.py | 10 +++++----- trackeval/datasets/tao_ow.py | 10 +++++----- trackeval/datasets/youtube_vis.py | 8 ++++---- trackeval/metrics/hota.py | 10 +++++----- trackeval/metrics/identity.py | 6 +++--- trackeval/metrics/j_and_f.py | 2 +- trackeval/metrics/track_map.py | 12 ++++++------ 21 files changed, 88 insertions(+), 86 deletions(-) diff --git a/.gitignore b/.gitignore index 9e6d0fe8..fff4a2b8 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,7 @@ .idea .vscode +.venv/ +*.egg-info/ **.pyc **__pycache__ gt_data/* diff --git a/pyproject.toml b/pyproject.toml index 12bf4804..b7ec0400 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -18,10 +18,10 @@ classifiers = [ "Operating System :: OS Independent", "Topic :: Scientific/Engineering", ] -requires-python = ">=3.7" +requires-python = ">=3.10" dependencies = [ - "numpy==1.18.1", - "scipy==1.4.1", + "numpy>=2.0.0", + "scipy>=1.14.0", ] [project.optional-dependencies] @@ -40,8 +40,8 @@ burst = [ "tabulate", ] dev = [ - "pytest==6.0.1", - "tqdm==4.64.0", + "pytest>=7.0.0", + "tqdm>=4.64.0", ] all = [ "trackeval[plot,segmentation,jf,burst,dev]", diff --git a/trackeval/baselines/stp.py b/trackeval/baselines/stp.py index c1c9d1e0..86ca1d72 100644 --- a/trackeval/baselines/stp.py +++ b/trackeval/baselines/stp.py @@ -49,7 +49,7 @@ def track_sequence(seq_file): # Initialize container for holding previously tracked objects. prev = {'boxes': np.empty((0, 4)), - 'ids': np.array([], np.int), + 'ids': np.array([], np.intp), 'timesteps': np.array([])} # Run tracker for each timestep. @@ -80,7 +80,7 @@ def track_sequence(seq_file): match_cols = match_cols[actually_matched_mask] # Assign the prev track ID to the current dets if they were matched. - ids = np.nan * np.ones((len(boxes),), np.int) + ids = np.nan * np.ones((len(boxes),), np.intp) ids[match_cols] = prev['ids'][match_rows] # Create new track IDs for dets that were not matched to previous tracks. diff --git a/trackeval/datasets/bdd100k.py b/trackeval/datasets/bdd100k.py index cc4fd06e..bd71b84b 100644 --- a/trackeval/datasets/bdd100k.py +++ b/trackeval/datasets/bdd100k.py @@ -226,12 +226,12 @@ def get_preprocessed_seq_data(self, raw_data, cls): # Only extract relevant dets for this class for preproc and eval (cls) gt_class_mask = np.atleast_1d(raw_data['gt_classes'][t] == cls_id) - gt_class_mask = gt_class_mask.astype(np.bool) + gt_class_mask = gt_class_mask.astype(np.bool_) gt_ids = raw_data['gt_ids'][t][gt_class_mask] gt_dets = raw_data['gt_dets'][t][gt_class_mask] tracker_class_mask = np.atleast_1d(raw_data['tracker_classes'][t] == cls_id) - tracker_class_mask = tracker_class_mask.astype(np.bool) + tracker_class_mask = tracker_class_mask.astype(np.bool_) tracker_ids = raw_data['tracker_ids'][t][tracker_class_mask] tracker_dets = raw_data['tracker_dets'][t][tracker_class_mask] similarity_scores = raw_data['similarity_scores'][t][gt_class_mask, :][:, tracker_class_mask] @@ -276,14 +276,14 @@ def get_preprocessed_seq_data(self, raw_data, cls): gt_id_map[unique_gt_ids] = np.arange(len(unique_gt_ids)) for t in range(raw_data['num_timesteps']): if len(data['gt_ids'][t]) > 0: - data['gt_ids'][t] = gt_id_map[data['gt_ids'][t]].astype(np.int) + data['gt_ids'][t] = gt_id_map[data['gt_ids'][t]].astype(np.intp) if len(unique_tracker_ids) > 0: unique_tracker_ids = np.unique(unique_tracker_ids) tracker_id_map = np.nan * np.ones((np.max(unique_tracker_ids) + 1)) tracker_id_map[unique_tracker_ids] = np.arange(len(unique_tracker_ids)) for t in range(raw_data['num_timesteps']): if len(data['tracker_ids'][t]) > 0: - data['tracker_ids'][t] = tracker_id_map[data['tracker_ids'][t]].astype(np.int) + data['tracker_ids'][t] = tracker_id_map[data['tracker_ids'][t]].astype(np.intp) # Record overview statistics. data['num_tracker_dets'] = num_tracker_dets diff --git a/trackeval/datasets/burst_helpers/burst_base.py b/trackeval/datasets/burst_helpers/burst_base.py index 394eda4d..c015f4ec 100644 --- a/trackeval/datasets/burst_helpers/burst_base.py +++ b/trackeval/datasets/burst_helpers/burst_base.py @@ -322,12 +322,12 @@ def get_preprocessed_seq_data(self, raw_data, cls): # Only extract relevant dets for this class for preproc and eval (cls) gt_class_mask = np.atleast_1d(raw_data['gt_classes'][t] == cls_id) - gt_class_mask = gt_class_mask.astype(np.bool) + gt_class_mask = gt_class_mask.astype(np.bool_) gt_ids = raw_data['gt_ids'][t][gt_class_mask] gt_dets = raw_data['gt_dets'][t][gt_class_mask] tracker_class_mask = np.atleast_1d(raw_data['tracker_classes'][t] == cls_id) - tracker_class_mask = tracker_class_mask.astype(np.bool) + tracker_class_mask = tracker_class_mask.astype(np.bool_) tracker_ids = raw_data['tracker_ids'][t][tracker_class_mask] tracker_dets = raw_data['tracker_dets'][t][tracker_class_mask] tracker_confidences = raw_data['tracker_confidences'][t][tracker_class_mask] @@ -349,7 +349,7 @@ def get_preprocessed_seq_data(self, raw_data, cls): elif is_not_exhaustively_labeled: to_remove_tracker = unmatched_indices else: - to_remove_tracker = np.array([], dtype=np.int) + to_remove_tracker = np.array([], dtype=np.intp) # remove all unwanted unmatched tracker detections data['tracker_ids'][t] = np.delete(tracker_ids, to_remove_tracker, axis=0) @@ -377,14 +377,14 @@ def get_preprocessed_seq_data(self, raw_data, cls): gt_id_map[unique_gt_ids] = np.arange(len(unique_gt_ids)) for t in range(raw_data['num_timesteps']): if len(data['gt_ids'][t]) > 0: - data['gt_ids'][t] = gt_id_map[data['gt_ids'][t]].astype(np.int) + data['gt_ids'][t] = gt_id_map[data['gt_ids'][t]].astype(np.intp) if len(unique_tracker_ids) > 0: unique_tracker_ids = np.unique(unique_tracker_ids) tracker_id_map = np.nan * np.ones((np.max(unique_tracker_ids) + 1)) tracker_id_map[unique_tracker_ids] = np.arange(len(unique_tracker_ids)) for t in range(raw_data['num_timesteps']): if len(data['tracker_ids'][t]) > 0: - data['tracker_ids'][t] = tracker_id_map[data['tracker_ids'][t]].astype(np.int) + data['tracker_ids'][t] = tracker_id_map[data['tracker_ids'][t]].astype(np.intp) # Record overview statistics. data['num_tracker_dets'] = num_tracker_dets diff --git a/trackeval/datasets/burst_helpers/burst_ow_base.py b/trackeval/datasets/burst_helpers/burst_ow_base.py index bef14d2b..13e51b29 100644 --- a/trackeval/datasets/burst_helpers/burst_ow_base.py +++ b/trackeval/datasets/burst_helpers/burst_ow_base.py @@ -332,12 +332,12 @@ def get_preprocessed_seq_data(self, raw_data, cls): # Only extract relevant dets for this class for preproc and eval (cls) gt_class_mask = np.atleast_1d(raw_data['gt_classes'][t] == cls_id) - gt_class_mask = gt_class_mask.astype(np.bool) + gt_class_mask = gt_class_mask.astype(np.bool_) gt_ids = raw_data['gt_ids'][t][gt_class_mask] gt_dets = raw_data['gt_dets'][t][gt_class_mask] tracker_class_mask = np.atleast_1d(raw_data['tracker_classes'][t] == cls_id) - tracker_class_mask = tracker_class_mask.astype(np.bool) + tracker_class_mask = tracker_class_mask.astype(np.bool_) tracker_ids = raw_data['tracker_ids'][t][tracker_class_mask] tracker_dets = raw_data['tracker_dets'][t][tracker_class_mask] tracker_confidences = raw_data['tracker_confidences'][t][tracker_class_mask] @@ -358,7 +358,7 @@ def get_preprocessed_seq_data(self, raw_data, cls): elif is_not_exhaustively_labeled: to_remove_tracker = unmatched_indices else: - to_remove_tracker = np.array([], dtype=np.int) + to_remove_tracker = np.array([], dtype=np.intp) # remove all unwanted unmatched tracker detections data['tracker_ids'][t] = np.delete(tracker_ids, to_remove_tracker, axis=0) @@ -382,14 +382,14 @@ def get_preprocessed_seq_data(self, raw_data, cls): gt_id_map[unique_gt_ids] = np.arange(len(unique_gt_ids)) for t in range(raw_data['num_timesteps']): if len(data['gt_ids'][t]) > 0: - data['gt_ids'][t] = gt_id_map[data['gt_ids'][t]].astype(np.int) + data['gt_ids'][t] = gt_id_map[data['gt_ids'][t]].astype(np.intp) if len(unique_tracker_ids) > 0: unique_tracker_ids = np.unique(unique_tracker_ids) tracker_id_map = np.nan * np.ones((np.max(unique_tracker_ids) + 1)) tracker_id_map[unique_tracker_ids] = np.arange(len(unique_tracker_ids)) for t in range(raw_data['num_timesteps']): if len(data['tracker_ids'][t]) > 0: - data['tracker_ids'][t] = tracker_id_map[data['tracker_ids'][t]].astype(np.int) + data['tracker_ids'][t] = tracker_id_map[data['tracker_ids'][t]].astype(np.intp) # Record overview statistics. data['num_tracker_dets'] = num_tracker_dets diff --git a/trackeval/datasets/davis.py b/trackeval/datasets/davis.py index 9db25e93..13cc3682 100644 --- a/trackeval/datasets/davis.py +++ b/trackeval/datasets/davis.py @@ -240,7 +240,7 @@ def get_preprocessed_seq_data(self, raw_data, cls): rows, columns = np.where(void_mask_ious > 0) for r in rows: det = mask_utils.decode(raw_data['tracker_dets'][t][r]) - void = mask_utils.decode(void_mask).astype(np.bool) + void = mask_utils.decode(void_mask).astype(np.bool_) det[void] = 0 det = mask_utils.encode(np.array(det, order='F').astype(np.uint8)) raw_data['tracker_dets'][t][r] = det @@ -253,14 +253,14 @@ def get_preprocessed_seq_data(self, raw_data, cls): gt_id_map[unique_gt_ids] = np.arange(len(unique_gt_ids)) for t in range(raw_data['num_timesteps']): if len(data['gt_ids'][t]) > 0: - data['gt_ids'][t] = gt_id_map[data['gt_ids'][t]].astype(np.int) + data['gt_ids'][t] = gt_id_map[data['gt_ids'][t]].astype(np.intp) if len(unique_tracker_ids) > 0: unique_tracker_ids = np.unique(unique_tracker_ids) tracker_id_map = np.nan * np.ones((np.max(unique_tracker_ids) + 1)) tracker_id_map[unique_tracker_ids] = np.arange(len(unique_tracker_ids)) for t in range(raw_data['num_timesteps']): if len(data['tracker_ids'][t]) > 0: - data['tracker_ids'][t] = tracker_id_map[data['tracker_ids'][t]].astype(np.int) + data['tracker_ids'][t] = tracker_id_map[data['tracker_ids'][t]].astype(np.intp) # Record overview statistics. data['num_tracker_dets'] = num_tracker_dets diff --git a/trackeval/datasets/head_tracking_challenge.py b/trackeval/datasets/head_tracking_challenge.py index 469e9a3c..b33af6aa 100644 --- a/trackeval/datasets/head_tracking_challenge.py +++ b/trackeval/datasets/head_tracking_challenge.py @@ -227,7 +227,7 @@ def _load_raw_file(self, tracker, seq, is_gt): time_key = str(t+1) if time_key in read_data.keys(): try: - time_data = np.asarray(read_data[time_key], dtype=np.float) + time_data = np.asarray(read_data[time_key], dtype=np.float64) except ValueError: if is_gt: raise TrackEvalException( @@ -365,7 +365,7 @@ def get_preprocessed_seq_data(self, raw_data, cls): # Match tracker and gt dets (with hungarian algorithm) and remove tracker dets which match with gt dets # which are labeled as belonging to a distractor class. - to_remove_tracker = np.array([], np.int) + to_remove_tracker = np.array([], np.intp) if self.do_preproc and self.benchmark != 'MOT15' and gt_ids.shape[0] > 0 and tracker_ids.shape[0] > 0: # Check all classes are valid: @@ -432,14 +432,14 @@ def get_preprocessed_seq_data(self, raw_data, cls): gt_id_map[unique_gt_ids] = np.arange(len(unique_gt_ids)) for t in range(raw_data['num_timesteps']): if len(data['gt_ids'][t]) > 0: - data['gt_ids'][t] = gt_id_map[data['gt_ids'][t]].astype(np.int) + data['gt_ids'][t] = gt_id_map[data['gt_ids'][t]].astype(np.intp) if len(unique_tracker_ids) > 0: unique_tracker_ids = np.unique(unique_tracker_ids) tracker_id_map = np.nan * np.ones((np.max(unique_tracker_ids) + 1)) tracker_id_map[unique_tracker_ids] = np.arange(len(unique_tracker_ids)) for t in range(raw_data['num_timesteps']): if len(data['tracker_ids'][t]) > 0: - data['tracker_ids'][t] = tracker_id_map[data['tracker_ids'][t]].astype(np.int) + data['tracker_ids'][t] = tracker_id_map[data['tracker_ids'][t]].astype(np.intp) # Record overview statistics. data['num_tracker_dets'] = num_tracker_dets diff --git a/trackeval/datasets/kitti_2d_box.py b/trackeval/datasets/kitti_2d_box.py index c582c437..d06c9e99 100644 --- a/trackeval/datasets/kitti_2d_box.py +++ b/trackeval/datasets/kitti_2d_box.py @@ -190,7 +190,7 @@ def _load_raw_file(self, tracker, seq, is_gt): for t in range(num_timesteps): time_key = str(t) if time_key in read_data.keys(): - time_data = np.asarray(read_data[time_key], dtype=np.float) + time_data = np.asarray(read_data[time_key], dtype=np.float64) raw_data['dets'][t] = np.atleast_2d(time_data[:, 6:10]) raw_data['ids'][t] = np.atleast_1d(time_data[:, 1]).astype(int) raw_data['classes'][t] = np.atleast_1d(time_data[:, 2]).astype(int) @@ -215,7 +215,7 @@ def _load_raw_file(self, tracker, seq, is_gt): raw_data['tracker_confidences'][t] = np.empty(0) if is_gt: if time_key in ignore_data.keys(): - time_ignore = np.asarray(ignore_data[time_key], dtype=np.float) + time_ignore = np.asarray(ignore_data[time_key], dtype=np.float64) raw_data['gt_crowd_ignore_regions'][t] = np.atleast_2d(time_ignore[:, 6:10]) else: raw_data['gt_crowd_ignore_regions'][t] = np.empty((0, 4)) @@ -288,7 +288,7 @@ def get_preprocessed_seq_data(self, raw_data, cls): # Only extract relevant dets for this class for preproc and eval (cls + distractor classes) gt_class_mask = np.sum([raw_data['gt_classes'][t] == c for c in [cls_id] + distractor_classes], axis=0) - gt_class_mask = gt_class_mask.astype(np.bool) + gt_class_mask = gt_class_mask.astype(np.bool_) gt_ids = raw_data['gt_ids'][t][gt_class_mask] gt_dets = raw_data['gt_dets'][t][gt_class_mask] gt_classes = raw_data['gt_classes'][t][gt_class_mask] @@ -296,7 +296,7 @@ def get_preprocessed_seq_data(self, raw_data, cls): gt_truncation = raw_data['gt_extras'][t]['truncation'][gt_class_mask] tracker_class_mask = np.atleast_1d(raw_data['tracker_classes'][t] == cls_id) - tracker_class_mask = tracker_class_mask.astype(np.bool) + tracker_class_mask = tracker_class_mask.astype(np.bool_) tracker_ids = raw_data['tracker_ids'][t][tracker_class_mask] tracker_dets = raw_data['tracker_dets'][t][tracker_class_mask] tracker_confidences = raw_data['tracker_confidences'][t][tracker_class_mask] @@ -304,7 +304,7 @@ def get_preprocessed_seq_data(self, raw_data, cls): # Match tracker and gt dets (with hungarian algorithm) and remove tracker dets which match with gt dets # which are labeled as truncated, occluded, or belonging to a distractor class. - to_remove_matched = np.array([], np.int) + to_remove_matched = np.array([], np.intp) unmatched_indices = np.arange(tracker_ids.shape[0]) if gt_ids.shape[0] > 0 and tracker_ids.shape[0] > 0: matching_scores = similarity_scores.copy() @@ -362,14 +362,14 @@ def get_preprocessed_seq_data(self, raw_data, cls): gt_id_map[unique_gt_ids] = np.arange(len(unique_gt_ids)) for t in range(raw_data['num_timesteps']): if len(data['gt_ids'][t]) > 0: - data['gt_ids'][t] = gt_id_map[data['gt_ids'][t]].astype(np.int) + data['gt_ids'][t] = gt_id_map[data['gt_ids'][t]].astype(np.intp) if len(unique_tracker_ids) > 0: unique_tracker_ids = np.unique(unique_tracker_ids) tracker_id_map = np.nan * np.ones((np.max(unique_tracker_ids) + 1)) tracker_id_map[unique_tracker_ids] = np.arange(len(unique_tracker_ids)) for t in range(raw_data['num_timesteps']): if len(data['tracker_ids'][t]) > 0: - data['tracker_ids'][t] = tracker_id_map[data['tracker_ids'][t]].astype(np.int) + data['tracker_ids'][t] = tracker_id_map[data['tracker_ids'][t]].astype(np.intp) # Record overview statistics. data['num_tracker_dets'] = num_tracker_dets diff --git a/trackeval/datasets/kitti_mots.py b/trackeval/datasets/kitti_mots.py index 9e04d3c2..6f575494 100644 --- a/trackeval/datasets/kitti_mots.py +++ b/trackeval/datasets/kitti_mots.py @@ -311,12 +311,12 @@ def get_preprocessed_seq_data(self, raw_data, cls): # Only extract relevant dets for this class for preproc and eval (cls) gt_class_mask = np.atleast_1d(raw_data['gt_classes'][t] == cls_id) - gt_class_mask = gt_class_mask.astype(np.bool) + gt_class_mask = gt_class_mask.astype(np.bool_) gt_ids = raw_data['gt_ids'][t][gt_class_mask] gt_dets = [raw_data['gt_dets'][t][ind] for ind in range(len(gt_class_mask)) if gt_class_mask[ind]] tracker_class_mask = np.atleast_1d(raw_data['tracker_classes'][t] == cls_id) - tracker_class_mask = tracker_class_mask.astype(np.bool) + tracker_class_mask = tracker_class_mask.astype(np.bool_) tracker_ids = raw_data['tracker_ids'][t][tracker_class_mask] tracker_dets = [raw_data['tracker_dets'][t][ind] for ind in range(len(tracker_class_mask)) if tracker_class_mask[ind]] @@ -363,14 +363,14 @@ def get_preprocessed_seq_data(self, raw_data, cls): gt_id_map[unique_gt_ids] = np.arange(len(unique_gt_ids)) for t in range(raw_data['num_timesteps']): if len(data['gt_ids'][t]) > 0: - data['gt_ids'][t] = gt_id_map[data['gt_ids'][t]].astype(np.int) + data['gt_ids'][t] = gt_id_map[data['gt_ids'][t]].astype(np.intp) if len(unique_tracker_ids) > 0: unique_tracker_ids = np.unique(unique_tracker_ids) tracker_id_map = np.nan * np.ones((np.max(unique_tracker_ids) + 1)) tracker_id_map[unique_tracker_ids] = np.arange(len(unique_tracker_ids)) for t in range(raw_data['num_timesteps']): if len(data['tracker_ids'][t]) > 0: - data['tracker_ids'][t] = tracker_id_map[data['tracker_ids'][t]].astype(np.int) + data['tracker_ids'][t] = tracker_id_map[data['tracker_ids'][t]].astype(np.intp) # Record overview statistics. data['num_tracker_dets'] = num_tracker_dets diff --git a/trackeval/datasets/mot_challenge_2d_box.py b/trackeval/datasets/mot_challenge_2d_box.py index 68aac512..469df28b 100644 --- a/trackeval/datasets/mot_challenge_2d_box.py +++ b/trackeval/datasets/mot_challenge_2d_box.py @@ -225,7 +225,7 @@ def _load_raw_file(self, tracker, seq, is_gt): time_key = str(t+1) if time_key in read_data.keys(): try: - time_data = np.asarray(read_data[time_key], dtype=np.float) + time_data = np.asarray(read_data[time_key], dtype=np.float64) except ValueError: if is_gt: raise TrackEvalException( @@ -356,7 +356,7 @@ def get_preprocessed_seq_data(self, raw_data, cls): # Match tracker and gt dets (with hungarian algorithm) and remove tracker dets which match with gt dets # which are labeled as belonging to a distractor class. - to_remove_tracker = np.array([], np.int) + to_remove_tracker = np.array([], np.intp) if self.do_preproc and self.benchmark != 'MOT15' and gt_ids.shape[0] > 0 and tracker_ids.shape[0] > 0: # Check all classes are valid: @@ -410,14 +410,14 @@ def get_preprocessed_seq_data(self, raw_data, cls): gt_id_map[unique_gt_ids] = np.arange(len(unique_gt_ids)) for t in range(raw_data['num_timesteps']): if len(data['gt_ids'][t]) > 0: - data['gt_ids'][t] = gt_id_map[data['gt_ids'][t]].astype(np.int) + data['gt_ids'][t] = gt_id_map[data['gt_ids'][t]].astype(np.intp) if len(unique_tracker_ids) > 0: unique_tracker_ids = np.unique(unique_tracker_ids) tracker_id_map = np.nan * np.ones((np.max(unique_tracker_ids) + 1)) tracker_id_map[unique_tracker_ids] = np.arange(len(unique_tracker_ids)) for t in range(raw_data['num_timesteps']): if len(data['tracker_ids'][t]) > 0: - data['tracker_ids'][t] = tracker_id_map[data['tracker_ids'][t]].astype(np.int) + data['tracker_ids'][t] = tracker_id_map[data['tracker_ids'][t]].astype(np.intp) # Record overview statistics. data['num_tracker_dets'] = num_tracker_dets diff --git a/trackeval/datasets/mots_challenge.py b/trackeval/datasets/mots_challenge.py index 191b4384..ca92129e 100644 --- a/trackeval/datasets/mots_challenge.py +++ b/trackeval/datasets/mots_challenge.py @@ -332,12 +332,12 @@ def get_preprocessed_seq_data(self, raw_data, cls): # Only extract relevant dets for this class for preproc and eval (cls) gt_class_mask = np.atleast_1d(raw_data['gt_classes'][t] == cls_id) - gt_class_mask = gt_class_mask.astype(np.bool) + gt_class_mask = gt_class_mask.astype(np.bool_) gt_ids = raw_data['gt_ids'][t][gt_class_mask] gt_dets = [raw_data['gt_dets'][t][ind] for ind in range(len(gt_class_mask)) if gt_class_mask[ind]] tracker_class_mask = np.atleast_1d(raw_data['tracker_classes'][t] == cls_id) - tracker_class_mask = tracker_class_mask.astype(np.bool) + tracker_class_mask = tracker_class_mask.astype(np.bool_) tracker_ids = raw_data['tracker_ids'][t][tracker_class_mask] tracker_dets = [raw_data['tracker_dets'][t][ind] for ind in range(len(tracker_class_mask)) if tracker_class_mask[ind]] @@ -384,14 +384,14 @@ def get_preprocessed_seq_data(self, raw_data, cls): gt_id_map[unique_gt_ids] = np.arange(len(unique_gt_ids)) for t in range(raw_data['num_timesteps']): if len(data['gt_ids'][t]) > 0: - data['gt_ids'][t] = gt_id_map[data['gt_ids'][t]].astype(np.int) + data['gt_ids'][t] = gt_id_map[data['gt_ids'][t]].astype(np.intp) if len(unique_tracker_ids) > 0: unique_tracker_ids = np.unique(unique_tracker_ids) tracker_id_map = np.nan * np.ones((np.max(unique_tracker_ids) + 1)) tracker_id_map[unique_tracker_ids] = np.arange(len(unique_tracker_ids)) for t in range(raw_data['num_timesteps']): if len(data['tracker_ids'][t]) > 0: - data['tracker_ids'][t] = tracker_id_map[data['tracker_ids'][t]].astype(np.int) + data['tracker_ids'][t] = tracker_id_map[data['tracker_ids'][t]].astype(np.intp) # Record overview statistics. data['num_tracker_dets'] = num_tracker_dets diff --git a/trackeval/datasets/person_path_22.py b/trackeval/datasets/person_path_22.py index 177954a8..363d07b8 100644 --- a/trackeval/datasets/person_path_22.py +++ b/trackeval/datasets/person_path_22.py @@ -230,7 +230,7 @@ def _load_raw_file(self, tracker, seq, is_gt): time_key = str(t+1) if time_key in read_data.keys(): try: - time_data = np.asarray(read_data[time_key], dtype=np.float) + time_data = np.asarray(read_data[time_key], dtype=np.float64) except ValueError: if is_gt: raise TrackEvalException( @@ -276,7 +276,7 @@ def _load_raw_file(self, tracker, seq, is_gt): raw_data['tracker_confidences'][t] = np.empty(0) if is_gt: if time_key in ignore_data.keys(): - time_ignore = np.asarray(ignore_data[time_key], dtype=np.float) + time_ignore = np.asarray(ignore_data[time_key], dtype=np.float64) raw_data['gt_crowd_ignore_regions'][t] = np.atleast_2d(time_ignore[:, 2:6]) else: raw_data['gt_crowd_ignore_regions'][t] = np.empty((0, 4)) @@ -366,7 +366,7 @@ def get_preprocessed_seq_data(self, raw_data, cls): # Match tracker and gt dets (with hungarian algorithm) and remove tracker dets which match with gt dets # which are labeled as belonging to a distractor class. - to_remove_tracker = np.array([], np.int) + to_remove_tracker = np.array([], np.intp) if self.do_preproc and self.benchmark != 'MOT15' and (gt_ids.shape[0] > 0 or len(crowd_ignore_regions) > 0) and tracker_ids.shape[0] > 0: # Check all classes are valid: @@ -425,14 +425,14 @@ def get_preprocessed_seq_data(self, raw_data, cls): gt_id_map[unique_gt_ids] = np.arange(len(unique_gt_ids)) for t in range(raw_data['num_timesteps']): if len(data['gt_ids'][t]) > 0: - data['gt_ids'][t] = gt_id_map[data['gt_ids'][t]].astype(np.int) + data['gt_ids'][t] = gt_id_map[data['gt_ids'][t]].astype(np.intp) if len(unique_tracker_ids) > 0: unique_tracker_ids = np.unique(unique_tracker_ids) tracker_id_map = np.nan * np.ones((np.max(unique_tracker_ids) + 1)) tracker_id_map[unique_tracker_ids] = np.arange(len(unique_tracker_ids)) for t in range(raw_data['num_timesteps']): if len(data['tracker_ids'][t]) > 0: - data['tracker_ids'][t] = tracker_id_map[data['tracker_ids'][t]].astype(np.int) + data['tracker_ids'][t] = tracker_id_map[data['tracker_ids'][t]].astype(np.intp) # Record overview statistics. data['num_tracker_dets'] = num_tracker_dets diff --git a/trackeval/datasets/rob_mots.py b/trackeval/datasets/rob_mots.py index d6a6d1e9..f3da66ed 100644 --- a/trackeval/datasets/rob_mots.py +++ b/trackeval/datasets/rob_mots.py @@ -351,7 +351,7 @@ def get_preprocessed_seq_data(self, raw_data, cls): gt_class_mask = np.isin(raw_data['gt_classes'][t], waymo_vehicle_classes) else: gt_class_mask = raw_data['gt_classes'][t] == cls_id - gt_class_mask = gt_class_mask.astype(np.bool) + gt_class_mask = gt_class_mask.astype(np.bool_) gt_ids = raw_data['gt_ids'][t][gt_class_mask] if cls == 'all': ignore_regions_mask = raw_data['gt_classes'][t] >= 100 @@ -376,7 +376,7 @@ def get_preprocessed_seq_data(self, raw_data, cls): tracker_class_mask = np.ones_like(raw_data['tracker_classes'][t]) else: tracker_class_mask = np.atleast_1d(raw_data['tracker_classes'][t] == cls_id) - tracker_class_mask = tracker_class_mask.astype(np.bool) + tracker_class_mask = tracker_class_mask.astype(np.bool_) tracker_ids = raw_data['tracker_ids'][t][tracker_class_mask] tracker_dets = [raw_data['tracker_dets'][t][ind] for ind in range(len(tracker_class_mask)) if tracker_class_mask[ind]] @@ -443,7 +443,7 @@ def get_preprocessed_seq_data(self, raw_data, cls): to_remove_all = unmatched_indices[np.logical_or(is_ignore_class, is_not_evaled_class)] to_remove_tracker = np.concatenate([to_remove_tracker, to_remove_all], axis=0) else: - to_remove_tracker = np.array([], dtype=np.int) + to_remove_tracker = np.array([], dtype=np.intp) # remove all unwanted tracker detections data['tracker_ids'][t] = np.delete(tracker_ids, to_remove_tracker, axis=0) @@ -468,14 +468,14 @@ def get_preprocessed_seq_data(self, raw_data, cls): gt_id_map[unique_gt_ids] = np.arange(len(unique_gt_ids)) for t in range(raw_data['num_timesteps']): if len(data['gt_ids'][t]) > 0: - data['gt_ids'][t] = gt_id_map[data['gt_ids'][t]].astype(np.int) + data['gt_ids'][t] = gt_id_map[data['gt_ids'][t]].astype(np.intp) if len(unique_tracker_ids) > 0: unique_tracker_ids = np.unique(unique_tracker_ids) tracker_id_map = np.nan * np.ones((np.max(unique_tracker_ids) + 1)) tracker_id_map[unique_tracker_ids] = np.arange(len(unique_tracker_ids)) for t in range(raw_data['num_timesteps']): if len(data['tracker_ids'][t]) > 0: - data['tracker_ids'][t] = tracker_id_map[data['tracker_ids'][t]].astype(np.int) + data['tracker_ids'][t] = tracker_id_map[data['tracker_ids'][t]].astype(np.intp) # Record overview statistics. data['num_tracker_dets'] = num_tracker_dets diff --git a/trackeval/datasets/tao.py b/trackeval/datasets/tao.py index e8461676..4679e047 100644 --- a/trackeval/datasets/tao.py +++ b/trackeval/datasets/tao.py @@ -302,12 +302,12 @@ def get_preprocessed_seq_data(self, raw_data, cls): # Only extract relevant dets for this class for preproc and eval (cls) gt_class_mask = np.atleast_1d(raw_data['gt_classes'][t] == cls_id) - gt_class_mask = gt_class_mask.astype(np.bool) + gt_class_mask = gt_class_mask.astype(np.bool_) gt_ids = raw_data['gt_ids'][t][gt_class_mask] gt_dets = raw_data['gt_dets'][t][gt_class_mask] tracker_class_mask = np.atleast_1d(raw_data['tracker_classes'][t] == cls_id) - tracker_class_mask = tracker_class_mask.astype(np.bool) + tracker_class_mask = tracker_class_mask.astype(np.bool_) tracker_ids = raw_data['tracker_ids'][t][tracker_class_mask] tracker_dets = raw_data['tracker_dets'][t][tracker_class_mask] tracker_confidences = raw_data['tracker_confidences'][t][tracker_class_mask] @@ -328,7 +328,7 @@ def get_preprocessed_seq_data(self, raw_data, cls): elif is_not_exhaustively_labeled: to_remove_tracker = unmatched_indices else: - to_remove_tracker = np.array([], dtype=np.int) + to_remove_tracker = np.array([], dtype=np.intp) # remove all unwanted unmatched tracker detections data['tracker_ids'][t] = np.delete(tracker_ids, to_remove_tracker, axis=0) @@ -352,14 +352,14 @@ def get_preprocessed_seq_data(self, raw_data, cls): gt_id_map[unique_gt_ids] = np.arange(len(unique_gt_ids)) for t in range(raw_data['num_timesteps']): if len(data['gt_ids'][t]) > 0: - data['gt_ids'][t] = gt_id_map[data['gt_ids'][t]].astype(np.int) + data['gt_ids'][t] = gt_id_map[data['gt_ids'][t]].astype(np.intp) if len(unique_tracker_ids) > 0: unique_tracker_ids = np.unique(unique_tracker_ids) tracker_id_map = np.nan * np.ones((np.max(unique_tracker_ids) + 1)) tracker_id_map[unique_tracker_ids] = np.arange(len(unique_tracker_ids)) for t in range(raw_data['num_timesteps']): if len(data['tracker_ids'][t]) > 0: - data['tracker_ids'][t] = tracker_id_map[data['tracker_ids'][t]].astype(np.int) + data['tracker_ids'][t] = tracker_id_map[data['tracker_ids'][t]].astype(np.intp) # Record overview statistics. data['num_tracker_dets'] = num_tracker_dets diff --git a/trackeval/datasets/tao_ow.py b/trackeval/datasets/tao_ow.py index 40f80d78..0fb35bea 100644 --- a/trackeval/datasets/tao_ow.py +++ b/trackeval/datasets/tao_ow.py @@ -317,12 +317,12 @@ def get_preprocessed_seq_data(self, raw_data, cls): # Only extract relevant dets for this class for preproc and eval (cls) gt_class_mask = np.atleast_1d(raw_data['gt_classes'][t] == cls_id) - gt_class_mask = gt_class_mask.astype(np.bool) + gt_class_mask = gt_class_mask.astype(np.bool_) gt_ids = raw_data['gt_ids'][t][gt_class_mask] gt_dets = raw_data['gt_dets'][t][gt_class_mask] tracker_class_mask = np.atleast_1d(raw_data['tracker_classes'][t] == cls_id) - tracker_class_mask = tracker_class_mask.astype(np.bool) + tracker_class_mask = tracker_class_mask.astype(np.bool_) tracker_ids = raw_data['tracker_ids'][t][tracker_class_mask] tracker_dets = raw_data['tracker_dets'][t][tracker_class_mask] tracker_confidences = raw_data['tracker_confidences'][t][tracker_class_mask] @@ -343,7 +343,7 @@ def get_preprocessed_seq_data(self, raw_data, cls): elif is_not_exhaustively_labeled: to_remove_tracker = unmatched_indices else: - to_remove_tracker = np.array([], dtype=np.int) + to_remove_tracker = np.array([], dtype=np.intp) # remove all unwanted unmatched tracker detections data['tracker_ids'][t] = np.delete(tracker_ids, to_remove_tracker, axis=0) @@ -367,14 +367,14 @@ def get_preprocessed_seq_data(self, raw_data, cls): gt_id_map[unique_gt_ids] = np.arange(len(unique_gt_ids)) for t in range(raw_data['num_timesteps']): if len(data['gt_ids'][t]) > 0: - data['gt_ids'][t] = gt_id_map[data['gt_ids'][t]].astype(np.int) + data['gt_ids'][t] = gt_id_map[data['gt_ids'][t]].astype(np.intp) if len(unique_tracker_ids) > 0: unique_tracker_ids = np.unique(unique_tracker_ids) tracker_id_map = np.nan * np.ones((np.max(unique_tracker_ids) + 1)) tracker_id_map[unique_tracker_ids] = np.arange(len(unique_tracker_ids)) for t in range(raw_data['num_timesteps']): if len(data['tracker_ids'][t]) > 0: - data['tracker_ids'][t] = tracker_id_map[data['tracker_ids'][t]].astype(np.int) + data['tracker_ids'][t] = tracker_id_map[data['tracker_ids'][t]].astype(np.intp) # Record overview statistics. data['num_tracker_dets'] = num_tracker_dets diff --git a/trackeval/datasets/youtube_vis.py b/trackeval/datasets/youtube_vis.py index 6d5b54c9..4d1b4b36 100644 --- a/trackeval/datasets/youtube_vis.py +++ b/trackeval/datasets/youtube_vis.py @@ -241,12 +241,12 @@ def get_preprocessed_seq_data(self, raw_data, cls): # Only extract relevant dets for this class for eval (cls) gt_class_mask = np.atleast_1d(raw_data['gt_classes'][t] == cls_id) - gt_class_mask = gt_class_mask.astype(np.bool) + gt_class_mask = gt_class_mask.astype(np.bool_) gt_ids = raw_data['gt_ids'][t][gt_class_mask] gt_dets = [raw_data['gt_dets'][t][ind] for ind in range(len(gt_class_mask)) if gt_class_mask[ind]] tracker_class_mask = np.atleast_1d(raw_data['tracker_classes'][t] == cls_id) - tracker_class_mask = tracker_class_mask.astype(np.bool) + tracker_class_mask = tracker_class_mask.astype(np.bool_) tracker_ids = raw_data['tracker_ids'][t][tracker_class_mask] tracker_dets = [raw_data['tracker_dets'][t][ind] for ind in range(len(tracker_class_mask)) if tracker_class_mask[ind]] @@ -270,14 +270,14 @@ def get_preprocessed_seq_data(self, raw_data, cls): gt_id_map[unique_gt_ids] = np.arange(len(unique_gt_ids)) for t in range(raw_data['num_timesteps']): if len(data['gt_ids'][t]) > 0: - data['gt_ids'][t] = gt_id_map[data['gt_ids'][t]].astype(np.int) + data['gt_ids'][t] = gt_id_map[data['gt_ids'][t]].astype(np.intp) if len(unique_tracker_ids) > 0: unique_tracker_ids = np.unique(unique_tracker_ids) tracker_id_map = np.nan * np.ones((np.max(unique_tracker_ids) + 1)) tracker_id_map[unique_tracker_ids] = np.arange(len(unique_tracker_ids)) for t in range(raw_data['num_timesteps']): if len(data['tracker_ids'][t]) > 0: - data['tracker_ids'][t] = tracker_id_map[data['tracker_ids'][t]].astype(np.int) + data['tracker_ids'][t] = tracker_id_map[data['tracker_ids'][t]].astype(np.intp) # Ensure that ids are unique per timestep. self._check_unique_ids(data) diff --git a/trackeval/metrics/hota.py b/trackeval/metrics/hota.py index f551b766..ad8d42fe 100644 --- a/trackeval/metrics/hota.py +++ b/trackeval/metrics/hota.py @@ -28,19 +28,19 @@ def eval_sequence(self, data): # Initialise results res = {} for field in self.float_array_fields + self.integer_array_fields: - res[field] = np.zeros((len(self.array_labels)), dtype=np.float) + res[field] = np.zeros((len(self.array_labels)), dtype=np.float64) for field in self.float_fields: res[field] = 0 # Return result quickly if tracker or gt sequence is empty if data['num_tracker_dets'] == 0: - res['HOTA_FN'] = data['num_gt_dets'] * np.ones((len(self.array_labels)), dtype=np.float) - res['LocA'] = np.ones((len(self.array_labels)), dtype=np.float) + res['HOTA_FN'] = data['num_gt_dets'] * np.ones((len(self.array_labels)), dtype=np.float64) + res['LocA'] = np.ones((len(self.array_labels)), dtype=np.float64) res['LocA(0)'] = 1.0 return res if data['num_gt_dets'] == 0: - res['HOTA_FP'] = data['num_tracker_dets'] * np.ones((len(self.array_labels)), dtype=np.float) - res['LocA'] = np.ones((len(self.array_labels)), dtype=np.float) + res['HOTA_FP'] = data['num_tracker_dets'] * np.ones((len(self.array_labels)), dtype=np.float64) + res['LocA'] = np.ones((len(self.array_labels)), dtype=np.float64) res['LocA(0)'] = 1.0 return res diff --git a/trackeval/metrics/identity.py b/trackeval/metrics/identity.py index c8c6c809..43265d3d 100644 --- a/trackeval/metrics/identity.py +++ b/trackeval/metrics/identity.py @@ -80,9 +80,9 @@ def eval_sequence(self, data): match_rows, match_cols = linear_sum_assignment(fn_mat + fp_mat) # Accumulate basic statistics - res['IDFN'] = fn_mat[match_rows, match_cols].sum().astype(np.int) - res['IDFP'] = fp_mat[match_rows, match_cols].sum().astype(np.int) - res['IDTP'] = (gt_id_count.sum() - res['IDFN']).astype(np.int) + res['IDFN'] = fn_mat[match_rows, match_cols].sum().astype(np.intp) + res['IDFP'] = fp_mat[match_rows, match_cols].sum().astype(np.intp) + res['IDTP'] = (gt_id_count.sum() - res['IDFN']).astype(np.intp) # Calculate final ID scores res = self._compute_final_fields(res) diff --git a/trackeval/metrics/j_and_f.py b/trackeval/metrics/j_and_f.py index 1b18f046..1548847c 100644 --- a/trackeval/metrics/j_and_f.py +++ b/trackeval/metrics/j_and_f.py @@ -160,7 +160,7 @@ def _seg2bmap(seg, width=None, height=None): January 2003 """ - seg = seg.astype(np.bool) + seg = seg.astype(np.bool_) seg[seg > 0] = 1 assert np.atleast_3d(seg).shape[2] == 1 diff --git a/trackeval/metrics/track_map.py b/trackeval/metrics/track_map.py index 039f8908..c1af01c8 100644 --- a/trackeval/metrics/track_map.py +++ b/trackeval/metrics/track_map.py @@ -220,8 +220,8 @@ def combine_sequences(self, all_res): tps = np.logical_and(dt_m != -1, np.logical_not(dt_ig)) fps = np.logical_and(dt_m == -1, np.logical_not(dt_ig)) - tp_sum = np.cumsum(tps, axis=1).astype(dtype=np.float) - fp_sum = np.cumsum(fps, axis=1).astype(dtype=np.float) + tp_sum = np.cumsum(tps, axis=1).astype(dtype=np.float64) + fp_sum = np.cumsum(fps, axis=1).astype(dtype=np.float64) for iou_thr_idx, (tp, fp) in enumerate(zip(tp_sum, fp_sum)): tp = np.array(tp) @@ -259,8 +259,8 @@ def combine_sequences(self, all_res): # compute the precision and recall averages for the respective alpha thresholds and ignore masks for lbl in self.lbls: - res['AP_' + lbl] = np.zeros((len(self.array_labels)), dtype=np.float) - res['AR_' + lbl] = np.zeros((len(self.array_labels)), dtype=np.float) + res['AP_' + lbl] = np.zeros((len(self.array_labels)), dtype=np.float64) + res['AR_' + lbl] = np.zeros((len(self.array_labels)), dtype=np.float64) for a_id, alpha in enumerate(self.array_labels): for lbl_idx, lbl in enumerate(self.lbls): @@ -280,7 +280,7 @@ def combine_classes_class_averaged(self, all_res, ignore_empty_classes=True): """ res = {} for field in self.fields: - res[field] = np.zeros((len(self.array_labels)), dtype=np.float) + res[field] = np.zeros((len(self.array_labels)), dtype=np.float64) field_stacked = np.array([res[field] for res in all_res.values()]) for a_id, alpha in enumerate(self.array_labels): @@ -297,7 +297,7 @@ def combine_classes_det_averaged(self, all_res): res = {} for field in self.fields: - res[field] = np.zeros((len(self.array_labels)), dtype=np.float) + res[field] = np.zeros((len(self.array_labels)), dtype=np.float64) field_stacked = np.array([res[field] for res in all_res.values()]) for a_id, alpha in enumerate(self.array_labels): From a42b72e48d438ccf7ff4229d80f830ffc07cfb20 Mon Sep 17 00:00:00 2001 From: SkalskiP Date: Fri, 30 Jan 2026 15:46:59 +0100 Subject: [PATCH 3/3] Fix seqmap_file indexing in MotChallenge2DBox Extract first element from seqmap_file when it's returned as a list. --- trackeval/datasets/mot_challenge_2d_box.py | 1 + 1 file changed, 1 insertion(+) diff --git a/trackeval/datasets/mot_challenge_2d_box.py b/trackeval/datasets/mot_challenge_2d_box.py index 469df28b..9fea853c 100644 --- a/trackeval/datasets/mot_challenge_2d_box.py +++ b/trackeval/datasets/mot_challenge_2d_box.py @@ -153,6 +153,7 @@ def _get_seq_info(self): seqmap_file = os.path.join(self.config['GT_FOLDER'], 'seqmaps', self.gt_set + '.txt') else: seqmap_file = os.path.join(self.config["SEQMAP_FOLDER"], self.gt_set + '.txt') + seqmap_file = seqmap_file[0] if not os.path.isfile(seqmap_file): print('no seqmap found: ' + seqmap_file) raise TrackEvalException('no seqmap found: ' + os.path.basename(seqmap_file))