-
Notifications
You must be signed in to change notification settings - Fork 828
Expand file tree
/
Copy pathmise.toml
More file actions
117 lines (99 loc) · 3.55 KB
/
mise.toml
File metadata and controls
117 lines (99 loc) · 3.55 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
[tools]
"go:github.com/gohugoio/hugo" = "v0.160.1"
"go:github.com/grafana/oats" = "0.6.1"
java = "temurin-25.0.3+9.0.LTS"
lychee = "0.23.0"
node = "24.15.0"
"npm:renovate" = "43.129.1"
protoc = "34.1"
# Linters
actionlint = "1.7.12"
"cargo:xmloxide" = "0.4.1"
editorconfig-checker = "v3.6.1"
"github:grafana/flint" = "0.20.3"
"github:google/google-java-format" = "1.35.0"
"npm:@biomejs/biome" = "2.4.12"
"npm:markdownlint-cli2" = "0.22.0"
"npm:prettier" = "3.8.3"
"pipx:codespell" = "2.4.2"
"pipx:ruff" = "0.15.11"
shellcheck = "v0.11.0"
shfmt = "3.13.1"
[env]
FLINT_CONFIG_DIR = ".github/config"
# renovate: datasource=github-releases depName=grafana/docker-otel-lgtm
LGTM_VERSION = "0.25.0"
[tasks.ci]
description = "CI Build"
run = "./mvnw clean install"
env.REQUIRE_PROTO_UP_TO_DATE = "true"
env.PROTO_GENERATION = "true"
[tasks.clean]
description = "clean all modules"
run = "./mvnw clean"
[tasks.compile]
description = "bare compile, ignoring formatting and linters"
run = "./mvnw install -DskipTests -Dcoverage.skip=true -Dcheckstyle.skip=true -Dwarnings=-nowarn"
[tasks.generate]
description = "regenerate protobuf sources"
run = "./mvnw clean install -DskipTests -Dcoverage.skip=true -Dcheckstyle.skip=true -Dwarnings=-nowarn"
env.PROTO_GENERATION = "true"
[tasks.test]
description = "run unit tests, ignoring formatting and linters"
run = "./mvnw test -Dcoverage.skip=true -Dcheckstyle.skip=true -Dwarnings=-nowarn"
[tasks.test-all]
description = "run all tests"
run = "./mvnw verify"
[tasks.build]
description = "build all modules without tests"
run = "./mvnw install -DskipTests -Dcoverage.skip=true"
[tasks."lint"]
description = "Run all lints"
raw_args = true
depends = ["lint:bom"]
run = "flint run"
[tasks."lint:fix"]
description = "Auto-fix lint issues"
run = "flint run --fix"
[tasks.acceptance-test]
description = "Run OATs acceptance tests"
depends = "build"
run = "oats -lgtm-version $LGTM_VERSION -timeout 5m examples/"
[tasks.javadoc]
description = "Generate Javadoc"
run = [
"./mvnw -B clean compile javadoc:javadoc javadoc:aggregate -P 'javadoc,!default'",
"rm -rf ./docs/static/api",
"mv ./target/reports/apidocs ./docs/static/api && echo && echo 'ls ./docs/static/api' && ls ./docs/static/api"
]
[tasks.gh-pages-dev]
description = "Build GitHub pages for dev"
run = "hugo server -D"
dir = "docs"
[tasks.build-gh-pages]
description = "Build GitHub pages"
depends = ["javadoc", "set-release-version-github-pages"]
# For maximum backward compatibility with Hugo modules
env = { HUGO_ENVIRONMENT = "production", HUGO_ENV = "production" }
dir = "docs"
run = [
"hugo --gc --minify --baseURL ${BASE_URL}/",
"echo 'ls ./public/api' && ls ./public/api"
]
[tasks."benchmark:quick"]
description = "Run benchmarks with reduced iterations (quick smoke test, ~10 min)"
run = "python3 ./.mise/tasks/update_benchmarks.py --jmh-args '-f 1 -wi 1 -i 3'"
[tasks."benchmark:ci"]
description = "Run benchmarks with CI configuration (3 forks, 3 warmup, 5 measurement iterations (~60 min total)"
run = "python3 ./.mise/tasks/update_benchmarks.py --jmh-args '-f 3 -wi 3 -i 5'"
[tasks."benchmark:ci-json"]
description = "Run benchmarks with CI configuration and JSON output (for workflow/testing)"
run = """
./mvnw -pl benchmarks -am -DskipTests clean package
JMH_ARGS="${JMH_ARGS:--f 3 -wi 3 -i 5}"
echo "Running benchmarks with args: $JMH_ARGS"
java -jar ./benchmarks/target/benchmarks.jar -rf json -rff benchmark-results.json $JMH_ARGS
"""
[tasks."benchmark:generate-summary"]
description = "Generate summary from existing benchmark-results.json"
run = "python3 ./.mise/tasks/generate_benchmark_summary.py"