Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
35 changes: 33 additions & 2 deletions .github/workflows/perf-tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ jobs:
perf-tests:
name: Component Performance Tests
runs-on: windows-latest
timeout-minutes: 30
timeout-minutes: 60

permissions:
contents: read
Expand All @@ -49,9 +49,31 @@ jobs:
- name: Install dependencies
run: yarn install --frozen-lockfile

- name: Install Windows SDK 10.0.22621
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Installing the Windows SDK in the workflow is a major time/flakiness risk (download/installer failures). Can we either rely on the preinstalled SDK on windows-latest, cache this, or add stronger logging (installed SDK versions) + clearer error output when install fails?

shell: pwsh
run: |
$installerUrl = "https://download.microsoft.com/download/3/b/d/3bd97f81-3f5b-4922-b86d-dc5145cd6bfe/windowssdk/winsdksetup.exe"
$installerPath = "$env:TEMP\winsdksetup.exe"
Invoke-WebRequest -Uri $installerUrl -OutFile $installerPath
Start-Process -FilePath $installerPath -ArgumentList '/quiet', '/norestart', '/features', '+' -Wait -NoNewWindow
$sdkPath = "${env:ProgramFiles(x86)}\Windows Kits\10\Include\10.0.22621.0"
if (!(Test-Path $sdkPath)) {
echo "::error::Failed to install Windows SDK 10.0.22621"
exit 1
}
- name: Build perf-testing package
run: yarn workspace @react-native-windows/perf-testing build

- name: Enable Developer Mode
shell: pwsh
run: reg add "HKLM\SOFTWARE\Microsoft\Windows\CurrentVersion\AppModelUnlock" /t REG_DWORD /f /v AllowDevelopmentWithoutDevLicense /d 1
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Enabling Developer Mode via HKLM registry is a sensitive step. Can we add explicit error handling/output if this fails, and a short comment on why it’s needed for the native perf setup?


# ── Build & Deploy RNTesterApp-Fabric (for native perf tests) ──
- name: Build and deploy RNTesterApp-Fabric
working-directory: packages/e2e-test-app-fabric
run: yarn windows --release --no-launch --logging

# ── Run Tests ──────────────────────────────────────────
- name: Run perf tests
id: perf-run
Expand All @@ -61,7 +83,14 @@ jobs:
RN_TARGET_PLATFORM: windows
run: yarn perf:ci
continue-on-error: true # Don't fail here — let comparison decide
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

continue-on-error: true is fine if compare/report is the gate, but we should ensure the gate fails with a clear message when native perf results are missing. Can we explicitly validate .native-perf-results/results.json exists/non-empty before compare/report succeeds?


- name: Run native perf tests
id: native-perf-run
working-directory: packages/e2e-test-app-fabric
env:
CI: 'true'
RN_TARGET_PLATFORM: windows
run: yarn perf:native:ci
continue-on-error: true
# ── Compare & Report ───────────────────────────────────
- name: Compare against baselines
id: compare
Expand All @@ -80,7 +109,9 @@ jobs:
name: perf-results
path: |
packages/e2e-test-app-fabric/.perf-results/
packages/e2e-test-app-fabric/.native-perf-results/
packages/e2e-test-app-fabric/test/__perf__/**/__perf_snapshots__/
packages/e2e-test-app-fabric/test/__native_perf__/**/__perf_snapshots__/
retention-days: 30

# ── Status Gate ────────────────────────────────────────
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
{
"type": "prerelease",
"comment": "add native perf benchmarking infrastructure for Fabric components",
"packageName": "@react-native-windows/automation",
"email": "74712637+iamAbhi-916@users.noreply.github.com",
"dependentChangeType": "patch"
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
{
"type": "prerelease",
"comment": "add native perf benchmarking infrastructure for Fabric components",
"packageName": "@react-native-windows/perf-testing",
"email": "74712637+iamAbhi-916@users.noreply.github.com",
"dependentChangeType": "patch"
}
Original file line number Diff line number Diff line change
Expand Up @@ -209,16 +209,24 @@ export default class AutomationEnvironment extends NodeEnvironment {
// Set up the "Desktop" or Root session
const rootBrowser = await webdriverio.remote(this.rootWebDriverOptions);

// Get the list of windows
const allWindows = await rootBrowser.$$('//Window');

// Find our target window
// Poll for the app window with timeout (cold starts can be slow)
const windowTimeout = 300000; // 5 minutes
const pollInterval = 2000;
const deadline = Date.now() + windowTimeout;
let appWindow: webdriverio.Element | undefined;
for (const window of allWindows) {
if ((await window.getAttribute('Name')) === appName) {
appWindow = window;

while (Date.now() < deadline) {
const allWindows = await rootBrowser.$$('//Window');
for (const window of allWindows) {
if ((await window.getAttribute('Name')) === appName) {
appWindow = window;
break;
}
}
if (appWindow) {
break;
}
await new Promise(resolve => setTimeout(resolve, pollInterval));
}

if (!appWindow) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -56,4 +56,14 @@ export const ThresholdPresets: Readonly<
maxCV: 0.6,
mode: 'track',
},

native: {
maxDurationIncrease: 15,
maxDuration: Infinity,
minAbsoluteDelta: 5,
maxRenderCount: 1,
minRuns: 10,
maxCV: 0.5,
mode: 'gate',
},
};
Original file line number Diff line number Diff line change
@@ -0,0 +1,265 @@
/**
* Copyright (c) Microsoft Corporation.
* Licensed under the MIT License.
* @format
*/

'use strict';

const React = require('react');
const {
View,
Text,
TextInput,
Image,
ScrollView,
FlatList,
SectionList,
Switch,
ActivityIndicator,
Button,
Modal,
Pressable,
TouchableHighlight,
TouchableOpacity,
StyleSheet,
} = require('react-native');

const {useState, useRef, useCallback, useEffect} = React;

const PHASE_IDLE = 'idle';
const PHASE_CLEARING = 'clearing';
const PHASE_MOUNTING = 'mounting';
const PHASE_DONE = 'done';

const COMPONENT_REGISTRY = {
View: () => <View style={styles.target} />,
Text: () => <Text style={styles.target}>Benchmark Text</Text>,
TextInput: () => (
<TextInput style={styles.targetInput} placeholder="Benchmark" />
),
Image: () => (
<Image
style={styles.targetImage}
source={{uri: 'https://reactnative.dev/img/tiny_logo.png'}}
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Using a network image can introduce perf noise / failures in CI. Consider switching to a bundled/local asset so the “Image native mount” test isn’t affected by network variability.

/>
),
ScrollView: () => (
<ScrollView style={styles.target}>
{Array.from({length: 20}, (_, i) => (
<View key={i} style={styles.scrollItem} />
))}
</ScrollView>
),
FlatList: () => (
<FlatList
style={styles.target}
data={Array.from({length: 50}, (_, i) => ({key: String(i)}))}
renderItem={({item}) => <Text>{item.key}</Text>}
/>
),
SectionList: () => (
<SectionList
style={styles.target}
sections={[
{title: 'A', data: ['A1', 'A2', 'A3']},
{title: 'B', data: ['B1', 'B2', 'B3']},
]}
renderItem={({item}) => <Text>{item}</Text>}
renderSectionHeader={({section}) => <Text>{section.title}</Text>}
/>
),
Switch: () => <Switch value={false} />,
ActivityIndicator: () => <ActivityIndicator size="large" />,
Button: () => <Button title="Benchmark" onPress={() => {}} />,
Modal: () => (
<Modal visible={false} transparent>
<View style={styles.target} />
</Modal>
),
Pressable: () => (
<Pressable style={styles.target}>
<Text>Press</Text>
</Pressable>
),
TouchableHighlight: () => (
<TouchableHighlight style={styles.target} onPress={() => {}}>
<Text>Highlight</Text>
</TouchableHighlight>
),
TouchableOpacity: () => (
<TouchableOpacity style={styles.target} onPress={() => {}}>
<Text>Opacity</Text>
</TouchableOpacity>
),
};

function BenchmarkRunner() {
const [componentName, setComponentName] = useState('View');
const [runsInput, setRunsInput] = useState('15');
const [phase, setPhase] = useState(PHASE_IDLE);
const [showTarget, setShowTarget] = useState(false);
const [resultsJson, setResultsJson] = useState('');

const durationsRef = useRef([]);
const runIndexRef = useRef(0);
const totalRunsRef = useRef(15);
const markNameRef = useRef('');

const finishRun = useCallback(() => {
const markEnd = `perf-end-${runIndexRef.current}`;
performance.mark(markEnd);
try {
const measure = performance.measure(
`perf-run-${runIndexRef.current}`,
markNameRef.current,
markEnd,
);
durationsRef.current.push(measure.duration);
} catch (_) {}
performance.clearMarks(markNameRef.current);
performance.clearMarks(markEnd);
performance.clearMeasures(`perf-run-${runIndexRef.current}`);

runIndexRef.current++;
if (runIndexRef.current < totalRunsRef.current) {
setPhase(PHASE_CLEARING);
} else {
setShowTarget(false);
setResultsJson(
JSON.stringify({
componentName,
runs: durationsRef.current.length,
durations: durationsRef.current,
}),
);
setPhase(PHASE_DONE);
}
}, [componentName]);

useEffect(() => {
if (phase === PHASE_CLEARING) {
setShowTarget(false);
requestAnimationFrame(() => {
setPhase(PHASE_MOUNTING);
});
}
}, [phase]);

useEffect(() => {
if (phase === PHASE_MOUNTING) {
const markStart = `perf-start-${runIndexRef.current}`;
markNameRef.current = markStart;
performance.mark(markStart);
setShowTarget(true);
}
}, [phase]);

useEffect(() => {
if (phase === PHASE_MOUNTING && showTarget) {
requestAnimationFrame(() => {
finishRun();
});
}
}, [phase, showTarget, finishRun]);

const handleRun = useCallback(() => {
const runs = parseInt(runsInput, 10) || 15;
totalRunsRef.current = runs;
runIndexRef.current = 0;
durationsRef.current = [];
setResultsJson('');
setPhase(PHASE_CLEARING);
}, [runsInput]);

const ComponentFactory = COMPONENT_REGISTRY[componentName];

return (
<View style={styles.container}>
<View style={styles.controls}>
<TextInput
testID="perf-component-input"
style={styles.input}
value={componentName}
onChangeText={setComponentName}
placeholder="Component name"
/>
<TextInput
testID="perf-runs-input"
style={styles.input}
value={runsInput}
onChangeText={setRunsInput}
keyboardType="numeric"
placeholder="Runs"
/>
<Pressable
testID="perf-run-btn"
style={styles.button}
onPress={handleRun}
disabled={phase !== PHASE_IDLE && phase !== PHASE_DONE}>
<Text style={styles.buttonText}>Run Benchmark</Text>
</Pressable>
</View>

<Text testID="perf-status" style={styles.status}>
{phase}
</Text>

<View style={styles.targetContainer}>
{showTarget && ComponentFactory ? <ComponentFactory /> : null}
</View>

<Text testID="perf-results" style={styles.results}>
{resultsJson}
</Text>
</View>
);
}

const styles = StyleSheet.create({
container: {flex: 1, padding: 8},
controls: {flexDirection: 'row', gap: 8, marginBottom: 8},
input: {
borderWidth: 1,
borderColor: '#ccc',
padding: 6,
minWidth: 100,
fontSize: 14,
},
button: {
backgroundColor: '#0078D4',
paddingHorizontal: 16,
paddingVertical: 8,
borderRadius: 4,
justifyContent: 'center',
},
buttonText: {color: 'white', fontWeight: 'bold'},
status: {fontSize: 12, color: '#666', marginBottom: 4},
targetContainer: {
minHeight: 100,
borderWidth: 1,
borderColor: '#eee',
marginBottom: 8,
},
target: {width: 80, height: 80, backgroundColor: '#f0f0f0'},
targetInput: {width: 200, height: 40, borderWidth: 1, borderColor: '#999'},
targetImage: {width: 80, height: 80},
scrollItem: {height: 20, backgroundColor: '#ddd', marginBottom: 2},
results: {fontSize: 10, fontFamily: 'monospace', color: '#333'},
});

exports.displayName = 'NativePerfBenchmarkExample';
exports.framework = 'React';
exports.category = 'Basic';
exports.title = 'Native Perf Benchmark';
exports.description =
'Measures native rendering pipeline via performance.mark/measure.';

exports.examples = [
{
title: 'Benchmark Runner',
render: function () {
return <BenchmarkRunner />;
},
},
];
Loading
Loading