-
Notifications
You must be signed in to change notification settings - Fork 0
101 lines (90 loc) · 3.33 KB
/
benchmark.yml
File metadata and controls
101 lines (90 loc) · 3.33 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
name: Benchmark
on:
push:
branches: [ dev ]
workflow_dispatch:
inputs:
duration:
description: Benchmark duration per preset (seconds)
required: false
default: '30'
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
env:
GIT_CONFIG_COUNT: 1
GIT_CONFIG_KEY_0: init.defaultBranch
GIT_CONFIG_VALUE_0: main
jobs:
benchmark:
permissions:
contents: read
statuses: write
runs-on: blacksmith-2vcpu-ubuntu-2404
timeout-minutes: 90
steps:
- uses: actions/checkout@v4
- name: Setup Nix
uses: ./.github/actions/setup-nix
- name: Start headless Wayland compositor
run: |
mkdir -p /tmp/runtime-runner
chmod 700 /tmp/runtime-runner
export XDG_RUNTIME_DIR=/tmp/runtime-runner
nix develop --command weston --socket=headless --backend=headless-backend.so --width=1280 --height=720 &
{
echo "WAYLAND_DISPLAY=headless"
echo "XDG_RUNTIME_DIR=/tmp/runtime-runner"
} >> "$GITHUB_ENV"
sleep 5
- name: Run benchmark suite
id: run_benchmark
env:
ZIG_GLOBAL_CACHE_DIR: ${{ github.workspace }}/.zig-cache-global
XDG_RUNTIME_DIR: /tmp/runtime-runner
WAYLAND_DISPLAY: headless
ZIGCRAFT_SAFE_MODE: '1'
BENCHMARK_DURATION: ${{ github.event_name == 'workflow_dispatch' && github.event.inputs.duration || '30' }}
run: |
LVP_PATH=$(nix build --no-link --print-out-paths nixpkgs#mesa.drivers)/share/vulkan/icd.d/lvp_icd.x86_64.json
LAYER_PATH=$(nix build --no-link --print-out-paths nixpkgs#vulkan-validation-layers)/share/vulkan/explicit_layer.d
export VK_ICD_FILENAMES=$LVP_PATH
export VK_LAYER_PATH=$LAYER_PATH
nix develop --command bash scripts/run_benchmark.sh --duration "$BENCHMARK_DURATION" --presets low,medium,high --output-dir benchmark-results
- name: Compare against baseline
id: compare
run: |
for preset in low medium high; do
bash scripts/compare_benchmarks.sh docs/benchmarks/baseline.json "benchmark-results/${preset}.json" --preset "$preset"
done
- name: Upload benchmark artifacts
if: always()
uses: actions/upload-artifact@v7
with:
name: benchmark-results
path: benchmark-results
retention-days: 30
- name: Publish commit status
if: always()
uses: actions/github-script@v9
env:
BENCHMARK_RUN_OUTCOME: ${{ steps.run_benchmark.outcome }}
BENCHMARK_COMPARE_OUTCOME: ${{ steps.compare.outcome }}
with:
script: |
const runOutcome = process.env.BENCHMARK_RUN_OUTCOME;
const compareOutcome = process.env.BENCHMARK_COMPARE_OUTCOME;
const failed = runOutcome === 'failure' || compareOutcome === 'failure';
const description = failed
? 'Benchmark regression or runtime failure'
: compareOutcome === 'skipped'
? 'Benchmark completed (baseline placeholder)'
: 'Benchmark completed';
await github.rest.repos.createCommitStatus({
owner: context.repo.owner,
repo: context.repo.repo,
sha: context.sha,
state: failed ? 'failure' : 'success',
context: 'performance/benchmark',
description,
});