Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
.idea
.vscode
.venv/
*.egg-info/
**.pyc
**__pycache__
gt_data/*
Expand Down
45 changes: 32 additions & 13 deletions Readme.md → README.md
Original file line number Diff line number Diff line change
Expand Up @@ -118,19 +118,38 @@ To convert formats, check out the format specifications defined [here](docs).

By default, we would recommend the MOTChallenge format, although any implemented format should work. Note that for many cases you will want to use the argument ```--DO_PREPROC False``` unless you want to run preprocessing to remove distractor objects.

## Requirements
Code tested on Python 3.7.

- Minimum requirements: numpy, scipy
- For plotting: matplotlib
- For segmentation datasets (KITTI MOTS, MOTS-Challenge, DAVIS, YouTube-VIS): pycocotools
- For DAVIS dataset: Pillow
- For J & F metric: opencv_python, scikit_image
- For simples test-cases for metrics: pytest

use ```pip3 -r install requirements.txt``` to install all possible requirements.

use ```pip3 -r install minimum_requirments.txt``` to only install the minimum if you don't need the extra functionality as listed above.
## Installation

Code tested on Python 3.7.

### Using uv (recommended)

```bash
# Install with minimum dependencies (numpy, scipy)
uv pip install trackeval

# Install with all dependencies
uv pip install "trackeval[all]"

# Install specific extras
uv pip install "trackeval[plot,segmentation]"
```

### Using pip

```bash
pip install trackeval
pip install "trackeval[all]"
```

### Available extras

- `plot` - matplotlib for plotting results
- `segmentation` - pycocotools, Pillow for segmentation datasets (KITTI MOTS, MOTS-Challenge, DAVIS, YouTube-VIS)
- `jf` - opencv-python, scikit-image for J & F metric
- `burst` - tabulate for BURST benchmark
- `dev` - pytest, tqdm for development and testing
- `all` - all optional dependencies

## Timing analysis

Expand Down
2 changes: 0 additions & 2 deletions minimum_requirements.txt

This file was deleted.

57 changes: 53 additions & 4 deletions pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,55 @@
[build-system]
requires = [
"setuptools>=42",
"wheel"
]
requires = ["setuptools>=42", "wheel"]
build-backend = "setuptools.build_meta"

[project]
name = "trackeval"
version = "1.0.dev1"
description = "Code for evaluating object tracking"
readme = "README.md"
license = {text = "MIT"}
authors = [
{name = "Jonathon Luiten, Arne Hoffhues", email = "jonoluiten@gmail.com"}
]
classifiers = [
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3 :: Only",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: Scientific/Engineering",
]
requires-python = ">=3.10"
dependencies = [
"numpy>=2.0.0",
"scipy>=1.14.0",
]

[project.optional-dependencies]
plot = [
"matplotlib==3.2.1",
]
segmentation = [
"pycocotools==2.0.2",
"Pillow==8.1.2",
]
jf = [
"opencv-python==4.4.0.46",
"scikit-image==0.16.2",
]
burst = [
"tabulate",
]
dev = [
"pytest>=7.0.0",
"tqdm>=4.64.0",
]
all = [
"trackeval[plot,segmentation,jf,burst,dev]",
]

[project.urls]
Homepage = "https://github.com/JonathonLuiten/TrackEval"
"Bug Tracker" = "https://github.com/JonathonLuiten/TrackEval/issues"

[tool.setuptools.packages.find]
include = ["trackeval*"]
10 changes: 0 additions & 10 deletions requirements.txt

This file was deleted.

27 changes: 0 additions & 27 deletions setup.cfg

This file was deleted.

3 changes: 0 additions & 3 deletions setup.py

This file was deleted.

4 changes: 2 additions & 2 deletions trackeval/baselines/stp.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ def track_sequence(seq_file):

# Initialize container for holding previously tracked objects.
prev = {'boxes': np.empty((0, 4)),
'ids': np.array([], np.int),
'ids': np.array([], np.intp),
'timesteps': np.array([])}

# Run tracker for each timestep.
Expand Down Expand Up @@ -80,7 +80,7 @@ def track_sequence(seq_file):
match_cols = match_cols[actually_matched_mask]

# Assign the prev track ID to the current dets if they were matched.
ids = np.nan * np.ones((len(boxes),), np.int)
ids = np.nan * np.ones((len(boxes),), np.intp)
ids[match_cols] = prev['ids'][match_rows]

# Create new track IDs for dets that were not matched to previous tracks.
Expand Down
8 changes: 4 additions & 4 deletions trackeval/datasets/bdd100k.py
Original file line number Diff line number Diff line change
Expand Up @@ -226,12 +226,12 @@ def get_preprocessed_seq_data(self, raw_data, cls):

# Only extract relevant dets for this class for preproc and eval (cls)
gt_class_mask = np.atleast_1d(raw_data['gt_classes'][t] == cls_id)
gt_class_mask = gt_class_mask.astype(np.bool)
gt_class_mask = gt_class_mask.astype(np.bool_)
gt_ids = raw_data['gt_ids'][t][gt_class_mask]
gt_dets = raw_data['gt_dets'][t][gt_class_mask]

tracker_class_mask = np.atleast_1d(raw_data['tracker_classes'][t] == cls_id)
tracker_class_mask = tracker_class_mask.astype(np.bool)
tracker_class_mask = tracker_class_mask.astype(np.bool_)
tracker_ids = raw_data['tracker_ids'][t][tracker_class_mask]
tracker_dets = raw_data['tracker_dets'][t][tracker_class_mask]
similarity_scores = raw_data['similarity_scores'][t][gt_class_mask, :][:, tracker_class_mask]
Expand Down Expand Up @@ -276,14 +276,14 @@ def get_preprocessed_seq_data(self, raw_data, cls):
gt_id_map[unique_gt_ids] = np.arange(len(unique_gt_ids))
for t in range(raw_data['num_timesteps']):
if len(data['gt_ids'][t]) > 0:
data['gt_ids'][t] = gt_id_map[data['gt_ids'][t]].astype(np.int)
data['gt_ids'][t] = gt_id_map[data['gt_ids'][t]].astype(np.intp)
if len(unique_tracker_ids) > 0:
unique_tracker_ids = np.unique(unique_tracker_ids)
tracker_id_map = np.nan * np.ones((np.max(unique_tracker_ids) + 1))
tracker_id_map[unique_tracker_ids] = np.arange(len(unique_tracker_ids))
for t in range(raw_data['num_timesteps']):
if len(data['tracker_ids'][t]) > 0:
data['tracker_ids'][t] = tracker_id_map[data['tracker_ids'][t]].astype(np.int)
data['tracker_ids'][t] = tracker_id_map[data['tracker_ids'][t]].astype(np.intp)

# Record overview statistics.
data['num_tracker_dets'] = num_tracker_dets
Expand Down
10 changes: 5 additions & 5 deletions trackeval/datasets/burst_helpers/burst_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -322,12 +322,12 @@ def get_preprocessed_seq_data(self, raw_data, cls):

# Only extract relevant dets for this class for preproc and eval (cls)
gt_class_mask = np.atleast_1d(raw_data['gt_classes'][t] == cls_id)
gt_class_mask = gt_class_mask.astype(np.bool)
gt_class_mask = gt_class_mask.astype(np.bool_)
gt_ids = raw_data['gt_ids'][t][gt_class_mask]
gt_dets = raw_data['gt_dets'][t][gt_class_mask]

tracker_class_mask = np.atleast_1d(raw_data['tracker_classes'][t] == cls_id)
tracker_class_mask = tracker_class_mask.astype(np.bool)
tracker_class_mask = tracker_class_mask.astype(np.bool_)
tracker_ids = raw_data['tracker_ids'][t][tracker_class_mask]
tracker_dets = raw_data['tracker_dets'][t][tracker_class_mask]
tracker_confidences = raw_data['tracker_confidences'][t][tracker_class_mask]
Expand All @@ -349,7 +349,7 @@ def get_preprocessed_seq_data(self, raw_data, cls):
elif is_not_exhaustively_labeled:
to_remove_tracker = unmatched_indices
else:
to_remove_tracker = np.array([], dtype=np.int)
to_remove_tracker = np.array([], dtype=np.intp)

# remove all unwanted unmatched tracker detections
data['tracker_ids'][t] = np.delete(tracker_ids, to_remove_tracker, axis=0)
Expand Down Expand Up @@ -377,14 +377,14 @@ def get_preprocessed_seq_data(self, raw_data, cls):
gt_id_map[unique_gt_ids] = np.arange(len(unique_gt_ids))
for t in range(raw_data['num_timesteps']):
if len(data['gt_ids'][t]) > 0:
data['gt_ids'][t] = gt_id_map[data['gt_ids'][t]].astype(np.int)
data['gt_ids'][t] = gt_id_map[data['gt_ids'][t]].astype(np.intp)
if len(unique_tracker_ids) > 0:
unique_tracker_ids = np.unique(unique_tracker_ids)
tracker_id_map = np.nan * np.ones((np.max(unique_tracker_ids) + 1))
tracker_id_map[unique_tracker_ids] = np.arange(len(unique_tracker_ids))
for t in range(raw_data['num_timesteps']):
if len(data['tracker_ids'][t]) > 0:
data['tracker_ids'][t] = tracker_id_map[data['tracker_ids'][t]].astype(np.int)
data['tracker_ids'][t] = tracker_id_map[data['tracker_ids'][t]].astype(np.intp)

# Record overview statistics.
data['num_tracker_dets'] = num_tracker_dets
Expand Down
10 changes: 5 additions & 5 deletions trackeval/datasets/burst_helpers/burst_ow_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -332,12 +332,12 @@ def get_preprocessed_seq_data(self, raw_data, cls):

# Only extract relevant dets for this class for preproc and eval (cls)
gt_class_mask = np.atleast_1d(raw_data['gt_classes'][t] == cls_id)
gt_class_mask = gt_class_mask.astype(np.bool)
gt_class_mask = gt_class_mask.astype(np.bool_)
gt_ids = raw_data['gt_ids'][t][gt_class_mask]
gt_dets = raw_data['gt_dets'][t][gt_class_mask]

tracker_class_mask = np.atleast_1d(raw_data['tracker_classes'][t] == cls_id)
tracker_class_mask = tracker_class_mask.astype(np.bool)
tracker_class_mask = tracker_class_mask.astype(np.bool_)
tracker_ids = raw_data['tracker_ids'][t][tracker_class_mask]
tracker_dets = raw_data['tracker_dets'][t][tracker_class_mask]
tracker_confidences = raw_data['tracker_confidences'][t][tracker_class_mask]
Expand All @@ -358,7 +358,7 @@ def get_preprocessed_seq_data(self, raw_data, cls):
elif is_not_exhaustively_labeled:
to_remove_tracker = unmatched_indices
else:
to_remove_tracker = np.array([], dtype=np.int)
to_remove_tracker = np.array([], dtype=np.intp)

# remove all unwanted unmatched tracker detections
data['tracker_ids'][t] = np.delete(tracker_ids, to_remove_tracker, axis=0)
Expand All @@ -382,14 +382,14 @@ def get_preprocessed_seq_data(self, raw_data, cls):
gt_id_map[unique_gt_ids] = np.arange(len(unique_gt_ids))
for t in range(raw_data['num_timesteps']):
if len(data['gt_ids'][t]) > 0:
data['gt_ids'][t] = gt_id_map[data['gt_ids'][t]].astype(np.int)
data['gt_ids'][t] = gt_id_map[data['gt_ids'][t]].astype(np.intp)
if len(unique_tracker_ids) > 0:
unique_tracker_ids = np.unique(unique_tracker_ids)
tracker_id_map = np.nan * np.ones((np.max(unique_tracker_ids) + 1))
tracker_id_map[unique_tracker_ids] = np.arange(len(unique_tracker_ids))
for t in range(raw_data['num_timesteps']):
if len(data['tracker_ids'][t]) > 0:
data['tracker_ids'][t] = tracker_id_map[data['tracker_ids'][t]].astype(np.int)
data['tracker_ids'][t] = tracker_id_map[data['tracker_ids'][t]].astype(np.intp)

# Record overview statistics.
data['num_tracker_dets'] = num_tracker_dets
Expand Down
6 changes: 3 additions & 3 deletions trackeval/datasets/davis.py
Original file line number Diff line number Diff line change
Expand Up @@ -240,7 +240,7 @@ def get_preprocessed_seq_data(self, raw_data, cls):
rows, columns = np.where(void_mask_ious > 0)
for r in rows:
det = mask_utils.decode(raw_data['tracker_dets'][t][r])
void = mask_utils.decode(void_mask).astype(np.bool)
void = mask_utils.decode(void_mask).astype(np.bool_)
det[void] = 0
det = mask_utils.encode(np.array(det, order='F').astype(np.uint8))
raw_data['tracker_dets'][t][r] = det
Expand All @@ -253,14 +253,14 @@ def get_preprocessed_seq_data(self, raw_data, cls):
gt_id_map[unique_gt_ids] = np.arange(len(unique_gt_ids))
for t in range(raw_data['num_timesteps']):
if len(data['gt_ids'][t]) > 0:
data['gt_ids'][t] = gt_id_map[data['gt_ids'][t]].astype(np.int)
data['gt_ids'][t] = gt_id_map[data['gt_ids'][t]].astype(np.intp)
if len(unique_tracker_ids) > 0:
unique_tracker_ids = np.unique(unique_tracker_ids)
tracker_id_map = np.nan * np.ones((np.max(unique_tracker_ids) + 1))
tracker_id_map[unique_tracker_ids] = np.arange(len(unique_tracker_ids))
for t in range(raw_data['num_timesteps']):
if len(data['tracker_ids'][t]) > 0:
data['tracker_ids'][t] = tracker_id_map[data['tracker_ids'][t]].astype(np.int)
data['tracker_ids'][t] = tracker_id_map[data['tracker_ids'][t]].astype(np.intp)

# Record overview statistics.
data['num_tracker_dets'] = num_tracker_dets
Expand Down
8 changes: 4 additions & 4 deletions trackeval/datasets/head_tracking_challenge.py
Original file line number Diff line number Diff line change
Expand Up @@ -227,7 +227,7 @@ def _load_raw_file(self, tracker, seq, is_gt):
time_key = str(t+1)
if time_key in read_data.keys():
try:
time_data = np.asarray(read_data[time_key], dtype=np.float)
time_data = np.asarray(read_data[time_key], dtype=np.float64)
except ValueError:
if is_gt:
raise TrackEvalException(
Expand Down Expand Up @@ -365,7 +365,7 @@ def get_preprocessed_seq_data(self, raw_data, cls):

# Match tracker and gt dets (with hungarian algorithm) and remove tracker dets which match with gt dets
# which are labeled as belonging to a distractor class.
to_remove_tracker = np.array([], np.int)
to_remove_tracker = np.array([], np.intp)
if self.do_preproc and self.benchmark != 'MOT15' and gt_ids.shape[0] > 0 and tracker_ids.shape[0] > 0:

# Check all classes are valid:
Expand Down Expand Up @@ -432,14 +432,14 @@ def get_preprocessed_seq_data(self, raw_data, cls):
gt_id_map[unique_gt_ids] = np.arange(len(unique_gt_ids))
for t in range(raw_data['num_timesteps']):
if len(data['gt_ids'][t]) > 0:
data['gt_ids'][t] = gt_id_map[data['gt_ids'][t]].astype(np.int)
data['gt_ids'][t] = gt_id_map[data['gt_ids'][t]].astype(np.intp)
if len(unique_tracker_ids) > 0:
unique_tracker_ids = np.unique(unique_tracker_ids)
tracker_id_map = np.nan * np.ones((np.max(unique_tracker_ids) + 1))
tracker_id_map[unique_tracker_ids] = np.arange(len(unique_tracker_ids))
for t in range(raw_data['num_timesteps']):
if len(data['tracker_ids'][t]) > 0:
data['tracker_ids'][t] = tracker_id_map[data['tracker_ids'][t]].astype(np.int)
data['tracker_ids'][t] = tracker_id_map[data['tracker_ids'][t]].astype(np.intp)

# Record overview statistics.
data['num_tracker_dets'] = num_tracker_dets
Expand Down
Loading