diff --git a/README.md b/README.md
index 614f99e8..8fc42bba 100644
--- a/README.md
+++ b/README.md
@@ -23,6 +23,7 @@ Full documentation: **[docs.page/vypdev/copilot](https://docs.page/vypdev/copilo
| [OpenCode (AI)](https://docs.page/vypdev/copilot/opencode-integration) | Progress, Bugbot, think, AI PR description |
| [Testing OpenCode locally](https://docs.page/vypdev/copilot/testing-opencode-plan-locally) | Run check-progress, detect-potential-problems, recommend-steps via CLI |
| [Single actions](https://docs.page/vypdev/copilot/single-actions) | On-demand: check progress, think, create release/tag, deployed |
+| [Deploy label and merge flow](docs/single-actions/deploy-label-and-merge.mdx) | Deploy/deployed labels, post-deploy merges, waiting for checks per PR |
| [Issues](https://docs.page/vypdev/copilot/issues) | Issue configuration and types (feature, bugfix, hotfix, release, docs, chore) |
| [Pull requests](https://docs.page/vypdev/copilot/pull-requests) | PR configuration and AI description |
| [Troubleshooting](https://docs.page/vypdev/copilot/troubleshooting) | Common issues and solutions |
diff --git a/build/cli/index.js b/build/cli/index.js
index 734e447e..b7f4365c 100755
--- a/build/cli/index.js
+++ b/build/cli/index.js
@@ -46693,7 +46693,11 @@ const boxen_1 = __importDefault(__nccwpck_require__(4506));
const queue_utils_1 = __nccwpck_require__(9800);
async function mainRun(execution) {
const results = [];
+ (0, logger_1.logInfo)('GitHub Action: starting main run.');
+ (0, logger_1.logDebugInfo)(`Event: ${execution.eventName}, actor: ${execution.actor}, repo: ${execution.owner}/${execution.repo}, debug: ${execution.debug}`);
await execution.setup();
+ (0, logger_1.clearAccumulatedLogs)();
+ (0, logger_1.logDebugInfo)(`Setup done. Issue number: ${execution.issueNumber}, isSingleAction: ${execution.isSingleAction}, isIssue: ${execution.isIssue}, isPullRequest: ${execution.isPullRequest}, isPush: ${execution.isPush}`);
if (!execution.welcome) {
/**
* Wait for previous runs to finish
@@ -46705,19 +46709,21 @@ async function mainRun(execution) {
}
if (execution.runnedByToken) {
if (execution.isSingleAction && execution.singleAction.validSingleAction) {
- (0, logger_1.logInfo)(`User from token (${execution.tokenUser}) matches actor. Executing single action.`);
+ (0, logger_1.logInfo)(`User from token (${execution.tokenUser}) matches actor. Executing single action: ${execution.singleAction.currentSingleAction}.`);
results.push(...await new single_action_use_case_1.SingleActionUseCase().invoke(execution));
+ (0, logger_1.logInfo)(`Single action finished. Results: ${results.length}.`);
return results;
}
- (0, logger_1.logInfo)(`User from token (${execution.tokenUser}) matches actor. Ignoring.`);
+ (0, logger_1.logInfo)(`User from token (${execution.tokenUser}) matches actor. Ignoring (not a valid single action).`);
return results;
}
if (execution.issueNumber === -1) {
if (execution.isSingleAction && execution.singleAction.isSingleActionWithoutIssue) {
+ (0, logger_1.logInfo)('No issue number; running single action without issue.');
results.push(...await new single_action_use_case_1.SingleActionUseCase().invoke(execution));
}
else {
- (0, logger_1.logInfo)(`Issue number not found. Skipping.`);
+ (0, logger_1.logInfo)('Issue number not found. Skipping.');
}
return results;
}
@@ -46734,34 +46740,45 @@ async function mainRun(execution) {
}
try {
if (execution.isSingleAction) {
+ (0, logger_1.logInfo)(`Running SingleActionUseCase (action: ${execution.singleAction.currentSingleAction}).`);
results.push(...await new single_action_use_case_1.SingleActionUseCase().invoke(execution));
}
else if (execution.isIssue) {
if (execution.issue.isIssueComment) {
+ (0, logger_1.logInfo)(`Running IssueCommentUseCase for issue #${execution.issue.number}.`);
results.push(...await new issue_comment_use_case_1.IssueCommentUseCase().invoke(execution));
}
else {
+ (0, logger_1.logInfo)(`Running IssueUseCase for issue #${execution.issueNumber}.`);
results.push(...await new issue_use_case_1.IssueUseCase().invoke(execution));
}
}
else if (execution.isPullRequest) {
if (execution.pullRequest.isPullRequestReviewComment) {
+ (0, logger_1.logInfo)(`Running PullRequestReviewCommentUseCase for PR #${execution.pullRequest.number}.`);
results.push(...await new pull_request_review_comment_use_case_1.PullRequestReviewCommentUseCase().invoke(execution));
}
else {
+ (0, logger_1.logInfo)(`Running PullRequestUseCase for PR #${execution.pullRequest.number}.`);
results.push(...await new pull_request_use_case_1.PullRequestUseCase().invoke(execution));
}
}
else if (execution.isPush) {
+ (0, logger_1.logDebugInfo)(`Push event. Branch: ${execution.commit?.branch ?? 'unknown'}, commits: ${execution.commit?.commits?.length ?? 0}, issue number: ${execution.issueNumber}.`);
+ (0, logger_1.logInfo)('Running CommitUseCase.');
results.push(...await new commit_use_case_1.CommitUseCase().invoke(execution));
}
else {
+ (0, logger_1.logError)(`Action not handled. Event: ${execution.eventName}.`);
core.setFailed(`Action not handled.`);
}
+ const totalSteps = results.reduce((acc, r) => acc + (r.steps?.length ?? 0), 0);
+ (0, logger_1.logInfo)(`Main run finished. Results: ${results.length}, total steps: ${totalSteps}.`);
return results;
}
catch (error) {
const msg = error instanceof Error ? error.message : String(error);
+ (0, logger_1.logError)(`Main run failed: ${msg}`, error instanceof Error ? { stack: error.stack } : undefined);
core.setFailed(msg);
return [];
}
@@ -49253,11 +49270,6 @@ function createTimeoutSignal(ms) {
function ensureNoTrailingSlash(url) {
return url.replace(/\/+$/, '') || url;
}
-function truncate(s, maxLen) {
- return s.length <= maxLen ? s : s.slice(0, maxLen) + '...';
-}
-const OPENCODE_PROMPT_LOG_PREVIEW_LEN = 500;
-const OPENCODE_PROMPT_LOG_FULL_LEN = 3000;
function getValidatedOpenCodeConfig(ai) {
const serverUrl = ai.getOpencodeServerUrl();
const model = ai.getOpencodeModel();
@@ -49342,16 +49354,12 @@ function parseJsonFromAgentText(text) {
}
catch (e) {
const msg = e instanceof Error ? e.message : String(e);
- (0, logger_1.logDebugInfo)(`OpenCode agent response (expectJson): failed to parse extracted JSON. Full text length=${trimmed.length} firstChars=${JSON.stringify(trimmed.slice(0, 200))}`);
+ (0, logger_1.logDebugInfo)(`OpenCode agent response (expectJson): failed to parse extracted JSON. Full text length=${trimmed.length}. Full text:\n${trimmed}`);
throw new Error(`Agent response is not valid JSON: ${msg}`);
}
}
- const previewLen = 500;
- const msg = trimmed.length > previewLen ? `${trimmed.slice(0, previewLen)}...` : trimmed;
- const fullTruncated = trimmed.length > 3000 ? `${trimmed.slice(0, 3000)}... [total ${trimmed.length} chars]` : trimmed;
- (0, logger_1.logDebugInfo)(`OpenCode agent response (expectJson): no JSON object found. length=${trimmed.length} preview=${JSON.stringify(msg)}`);
- (0, logger_1.logDebugInfo)(`OpenCode agent response (expectJson) full text for debugging:\n${fullTruncated}`);
- throw new Error(`Agent response is not valid JSON: no JSON object found. Response starts with: ${msg.slice(0, 150)}`);
+ (0, logger_1.logDebugInfo)(`OpenCode agent response (expectJson): no JSON object found. length=${trimmed.length}. Full text:\n${trimmed}`);
+ throw new Error(`Agent response is not valid JSON: no JSON object found. Response length: ${trimmed.length} chars.`);
}
}
}
@@ -49367,14 +49375,10 @@ function extractPartsByType(parts, type, joinWith) {
.join(joinWith)
.trim();
}
-const OPENCODE_RESPONSE_LOG_MAX_LEN = 80000;
-/** Parse response as JSON; on empty or invalid body throw a clear error with context. */
+/** Parse response as JSON; on empty or invalid body throw a clear error with context. Logs full body (no truncation). */
async function parseJsonResponse(res, context) {
const raw = await res.text();
- const truncated = raw.length > OPENCODE_RESPONSE_LOG_MAX_LEN
- ? `${raw.slice(0, OPENCODE_RESPONSE_LOG_MAX_LEN)}... [truncated, total ${raw.length} chars]`
- : raw;
- (0, logger_1.logDebugInfo)(`OpenCode response [${context}] status=${res.status} bodyLength=${raw.length}: ${truncated}`);
+ (0, logger_1.logDebugInfo)(`OpenCode response [${context}] status=${res.status} bodyLength=${raw.length}. Full body:\n${raw}`);
if (!raw || !raw.trim()) {
throw new Error(`${context}: empty response body (status ${res.status}). The server may have returned nothing or closed the connection early.`);
}
@@ -49382,8 +49386,7 @@ async function parseJsonResponse(res, context) {
return JSON.parse(raw);
}
catch (parseError) {
- const snippet = raw.length > 200 ? `${raw.slice(0, 200)}...` : raw;
- const err = new Error(`${context}: invalid JSON (status ${res.status}). Body snippet: ${snippet}`);
+ const err = new Error(`${context}: invalid JSON (status ${res.status}). Body length: ${raw.length} chars. See debug log for full body.`);
if (parseError instanceof Error && 'cause' in err)
err.cause = parseError;
throw err;
@@ -49397,25 +49400,27 @@ function extractTextFromParts(parts) {
function extractReasoningFromParts(parts) {
return extractPartsByType(parts, 'reasoning', '\n\n');
}
-/** Max length of per-part text preview in debug log (to avoid huge log lines). */
-const OPENCODE_PART_PREVIEW_LEN = 80;
/**
- * Build a short summary of OpenCode message parts for debug logs (types, text lengths, and short preview).
+ * Log OpenCode message parts: summary line and full text of each part (no truncation).
*/
-function summarizePartsForLog(parts, context) {
+function logPartsForDebug(parts, context) {
if (!Array.isArray(parts) || parts.length === 0) {
- return `${context}: 0 parts`;
+ (0, logger_1.logDebugInfo)(`${context}: 0 parts`);
+ return;
}
- const items = parts.map((p, i) => {
+ const summary = parts.map((p, i) => {
+ const type = p?.type ?? '(missing type)';
+ const len = typeof p?.text === 'string' ? p.text.length : 0;
+ return `[${i}] type=${type} length=${len}`;
+ }).join(' | ');
+ (0, logger_1.logDebugInfo)(`${context}: ${parts.length} part(s) — ${summary}`);
+ parts.forEach((p, i) => {
const type = p?.type ?? '(missing type)';
const text = typeof p?.text === 'string' ? p.text : '';
- const len = text.length;
- const preview = len > OPENCODE_PART_PREVIEW_LEN
- ? `${text.slice(0, OPENCODE_PART_PREVIEW_LEN).replace(/\n/g, ' ')}...`
- : text.replace(/\n/g, ' ');
- return `[${i}] type=${type} length=${len}${preview ? ` preview=${JSON.stringify(preview)}` : ''}`;
+ if (text) {
+ (0, logger_1.logDebugInfo)(`OpenCode part [${i}] type=${type} full text:\n${text}`);
+ }
});
- return `${context}: ${parts.length} part(s) — ${items.join(' | ')}`;
}
/** Default OpenCode agent for analysis/planning (read-only, no file edits). */
exports.OPENCODE_AGENT_PLAN = 'plan';
@@ -49468,8 +49473,8 @@ exports.LANGUAGE_CHECK_RESPONSE_SCHEMA = {
*/
async function opencodeMessageWithAgentRaw(baseUrl, options) {
(0, logger_1.logInfo)(`OpenCode request [agent ${options.agent}] model=${options.providerID}/${options.modelID} promptLength=${options.promptText.length}`);
- (0, logger_1.logInfo)(`OpenCode sending prompt (preview): ${truncate(options.promptText, OPENCODE_PROMPT_LOG_PREVIEW_LEN)}`);
- (0, logger_1.logDebugInfo)(`OpenCode prompt (full): ${truncate(options.promptText, OPENCODE_PROMPT_LOG_FULL_LEN)}`);
+ (0, logger_1.logInfo)(`OpenCode sending prompt (full):\n${options.promptText}`);
+ (0, logger_1.logDebugInfo)(`OpenCode prompt (full, no truncation):\n${options.promptText}`);
(0, logger_1.logDebugInfo)(`OpenCode message body: agent=${options.agent}, model=${options.providerID}/${options.modelID}, parts[0].text length=${options.promptText.length}`);
const base = ensureNoTrailingSlash(baseUrl);
const signal = createTimeoutSignal(constants_1.OPENCODE_REQUEST_TIMEOUT_MS);
@@ -49511,7 +49516,7 @@ async function opencodeMessageWithAgentRaw(baseUrl, options) {
const messageData = await parseJsonResponse(messageRes, `OpenCode agent "${options.agent}" message`);
const parts = messageData?.parts ?? messageData?.data?.parts ?? [];
const partsArray = Array.isArray(parts) ? parts : [];
- (0, logger_1.logDebugInfo)(summarizePartsForLog(partsArray, `OpenCode agent "${options.agent}" message parts`));
+ logPartsForDebug(partsArray, `OpenCode agent "${options.agent}" message parts`);
const text = extractTextFromParts(partsArray);
(0, logger_1.logInfo)(`OpenCode response [agent ${options.agent}] responseLength=${text.length} sessionId=${sessionId}`);
return { text, parts: partsArray, sessionId };
@@ -49580,9 +49585,8 @@ class AiRepository {
throw new Error('Empty response text');
const reasoning = options.includeReasoning ? extractReasoningFromParts(parts) : '';
if (options.expectJson && options.schema) {
- const maxLogLen = 5000000;
- const toLog = text.length > maxLogLen ? `${text.slice(0, maxLogLen)}\n... [truncated, total ${text.length} chars]` : text;
- (0, logger_1.logInfo)(`OpenCode agent response (full text, expectJson=true) length=${text.length}:\n${toLog}`);
+ (0, logger_1.logInfo)(`OpenCode agent response (expectJson=true) length=${text.length}`);
+ (0, logger_1.logDebugInfo)(`OpenCode agent response (full text, no truncation) length=${text.length}:\n${text}`);
const parsed = parseJsonFromAgentText(text);
if (options.includeReasoning && reasoning) {
return { ...parsed, reasoning };
@@ -50334,6 +50338,7 @@ Object.defineProperty(exports, "__esModule", ({ value: true }));
exports.IssueRepository = exports.PROGRESS_LABEL_PATTERN = void 0;
const core = __importStar(__nccwpck_require__(2186));
const github = __importStar(__nccwpck_require__(5438));
+const comment_watermark_1 = __nccwpck_require__(4467);
const logger_1 = __nccwpck_require__(8836);
const milestone_1 = __nccwpck_require__(2298);
/** Matches labels that are progress percentages (e.g. "0%", "85%"). Used for setProgressLabel and syncing. */
@@ -50690,23 +50695,27 @@ class IssueRepository {
});
return pullRequest.data.head.ref;
};
- this.addComment = async (owner, repository, issueNumber, comment, token) => {
+ this.addComment = async (owner, repository, issueNumber, comment, token, options) => {
+ const watermark = (0, comment_watermark_1.getCommentWatermark)(options?.commitSha ? { commitSha: options.commitSha, owner, repo: repository } : undefined);
+ const body = `${comment}\n\n${watermark}`;
const octokit = github.getOctokit(token);
await octokit.rest.issues.createComment({
owner: owner,
repo: repository,
issue_number: issueNumber,
- body: comment,
+ body,
});
(0, logger_1.logDebugInfo)(`Comment added to Issue ${issueNumber}.`);
};
- this.updateComment = async (owner, repository, issueNumber, commentId, comment, token) => {
+ this.updateComment = async (owner, repository, issueNumber, commentId, comment, token, options) => {
+ const watermark = (0, comment_watermark_1.getCommentWatermark)(options?.commitSha ? { commitSha: options.commitSha, owner, repo: repository } : undefined);
+ const body = `${comment}\n\n${watermark}`;
const octokit = github.getOctokit(token);
await octokit.rest.issues.updateComment({
owner: owner,
repo: repository,
comment_id: commentId,
- body: comment,
+ body,
});
(0, logger_1.logDebugInfo)(`Comment ${commentId} updated in Issue ${issueNumber}.`);
};
@@ -51275,8 +51284,14 @@ const github = __importStar(__nccwpck_require__(5438));
const logger_1 = __nccwpck_require__(8836);
const result_1 = __nccwpck_require__(7305);
/**
- * Repository for merging branches (via PR or direct merge).
- * Isolated to allow unit tests with mocked Octokit.
+ * Repository for merging branches: creates a PR, waits for that PR's check runs (or status checks),
+ * then merges the PR; on failure, falls back to a direct Git merge.
+ *
+ * Check runs are filtered by PR (pull_requests) so we only wait for the current PR's checks,
+ * not those of another PR sharing the same head (e.g. release→main vs release→develop).
+ * If the PR has no check runs after a short wait, we proceed to merge (branch may have no required checks).
+ *
+ * @see docs/single-actions/deploy-label-and-merge.mdx for the deploy flow and check-wait behaviour.
*/
class MergeRepository {
constructor() {
@@ -51318,10 +51333,13 @@ This PR merges **${head}** into **${base}**.
'\n\nThis PR was automatically created by [`copilot`](https://github.com/vypdev/copilot).',
});
const iteration = 10;
+ /** Give workflows a short window to register check runs for this PR; after this, we allow merge with no check runs (e.g. branch has no required checks). */
+ const maxWaitForPrChecksAttempts = 3;
if (timeout > iteration) {
// Wait for checks to complete - can use regular token for reading checks
let checksCompleted = false;
let attempts = 0;
+ let waitForPrChecksAttempts = 0;
const maxAttempts = timeout > iteration ? Math.floor(timeout / iteration) : iteration;
while (!checksCompleted && attempts < maxAttempts) {
const { data: checkRuns } = await octokit.rest.checks.listForRef({
@@ -51329,6 +51347,11 @@ This PR merges **${head}** into **${base}**.
repo: repository,
ref: head,
});
+ // Only consider check runs that are for this PR. When the same branch is used in
+ // multiple PRs (e.g. release→master and release→develop), listForRef returns runs
+ // for all PRs; we must wait for runs tied to the current PR or we may see completed
+ // runs from the other PR and merge before this PR's checks have run.
+ const runsForThisPr = checkRuns.check_runs.filter(run => run.pull_requests?.some(pr => pr.number === pullRequest.number));
// Get commit status checks for the PR head commit
const { data: commitStatus } = await octokit.rest.repos.getCombinedStatusForRef({
owner: owner,
@@ -51336,15 +51359,15 @@ This PR merges **${head}** into **${base}**.
ref: head,
});
(0, logger_1.logDebugInfo)(`Combined status state: ${commitStatus.state}`);
- (0, logger_1.logDebugInfo)(`Number of check runs: ${checkRuns.check_runs.length}`);
- // If there are check runs, prioritize those over status checks
- if (checkRuns.check_runs.length > 0) {
- const pendingCheckRuns = checkRuns.check_runs.filter(check => check.status !== 'completed');
+ (0, logger_1.logDebugInfo)(`Number of check runs for this PR: ${runsForThisPr.length} (total on ref: ${checkRuns.check_runs.length})`);
+ // If there are check runs for this PR, wait for them to complete
+ if (runsForThisPr.length > 0) {
+ const pendingCheckRuns = runsForThisPr.filter(check => check.status !== 'completed');
if (pendingCheckRuns.length === 0) {
checksCompleted = true;
(0, logger_1.logDebugInfo)('All check runs have completed.');
// Verify if all checks passed
- const failedChecks = checkRuns.check_runs.filter(check => check.conclusion === 'failure');
+ const failedChecks = runsForThisPr.filter(check => check.conclusion === 'failure');
if (failedChecks.length > 0) {
throw new Error(`Checks failed: ${failedChecks.map(check => check.name).join(', ')}`);
}
@@ -51359,6 +51382,37 @@ This PR merges **${head}** into **${base}**.
continue;
}
}
+ else if (checkRuns.check_runs.length > 0 && runsForThisPr.length === 0) {
+ // There are runs on the ref but none for this PR. Either workflows for this PR
+ // haven't registered yet, or this PR/base has no required checks.
+ waitForPrChecksAttempts++;
+ if (waitForPrChecksAttempts >= maxWaitForPrChecksAttempts) {
+ // Give up waiting for PR-specific check runs; fall back to status checks
+ // before proceeding to merge (PR may have required status checks).
+ const pendingChecksFallback = commitStatus.statuses.filter(status => {
+ (0, logger_1.logDebugInfo)(`Status check (fallback): ${status.context} (State: ${status.state})`);
+ return status.state === 'pending';
+ });
+ if (pendingChecksFallback.length === 0) {
+ checksCompleted = true;
+ (0, logger_1.logDebugInfo)(`No check runs for this PR after ${maxWaitForPrChecksAttempts} polls; no pending status checks; proceeding to merge.`);
+ }
+ else {
+ (0, logger_1.logDebugInfo)(`No check runs for this PR after ${maxWaitForPrChecksAttempts} polls; falling back to status checks. Waiting for ${pendingChecksFallback.length} status checks to complete.`);
+ pendingChecksFallback.forEach(check => {
+ (0, logger_1.logDebugInfo)(` - ${check.context} (State: ${check.state})`);
+ });
+ await new Promise(resolve => setTimeout(resolve, iteration * 1000));
+ attempts++;
+ }
+ }
+ else {
+ (0, logger_1.logDebugInfo)('Check runs exist on ref but none for this PR yet; waiting for workflows to register.');
+ await new Promise(resolve => setTimeout(resolve, iteration * 1000));
+ attempts++;
+ }
+ continue;
+ }
else {
// Fall back to status checks if no check runs exist
const pendingChecks = commitStatus.statuses.filter(status => {
@@ -53402,11 +53456,12 @@ const TEMPLATE = `You are in the repository workspace. Your task is to produce a
- **Breaking Changes:** list any, or "None".
- **Notes for Reviewers / Additional Context:** fill only if useful; otherwise a short placeholder or omit.
5. Do not output a single compact paragraph. Output the full filled template so the PR description is well-structured and easy to scan. Preserve the template's formatting (headings with # and ##, horizontal rules). Use checkboxes \`- [ ]\` / \`- [x]\` only where they add value; you may simplify or drop a section if it does not apply.
+6. **Output format:** Return only the filled template content. Do not add any preamble, meta-commentary, or framing phrases (e.g. "Based on my analysis...", "After reviewing the diff...", "Here is the description..."). Start directly with the first heading of the template (e.g. # Summary). Do not wrap the output in code blocks.
**Issue description:**
{{issueDescription}}
-Output only the filled template content (the PR description body), starting with the first heading of the template (e.g. # Summary). Do not wrap it in code blocks or add extra commentary.`;
+Output only the filled template content (the PR description body), starting with the first heading. No preamble, no commentary.`;
function getUpdatePullRequestDescriptionPrompt(params) {
return (0, fill_1.fillTemplate)(TEMPLATE, {
projectContextInstruction: params.projectContextInstruction,
@@ -53585,12 +53640,23 @@ class CheckProgressUseCase {
baseBranch: developmentBranch,
currentBranch: branch,
});
+ (0, logger_1.logDebugInfo)(`CheckProgress: prompt length=${prompt.length}, issue description length=${issueDescription.length}.`);
(0, logger_1.logInfo)('🤖 Analyzing progress using OpenCode Plan agent...');
const attemptResult = await this.fetchProgressAttempt(param.ai, prompt);
const progress = attemptResult.progress;
const summary = attemptResult.summary;
const reasoning = attemptResult.reasoning;
const remaining = attemptResult.remaining;
+ (0, logger_1.logDebugInfo)(`CheckProgress: raw progress=${progress}, summary length=${summary.length}, reasoning length=${reasoning.length}, remaining length=${remaining?.length ?? 0}. Full summary:\n${summary}`);
+ if (reasoning) {
+ (0, logger_1.logDebugInfo)(`CheckProgress: full reasoning:\n${reasoning}`);
+ }
+ if (remaining) {
+ (0, logger_1.logDebugInfo)(`CheckProgress: full remaining:\n${remaining}`);
+ }
+ if (progress < 0 || progress > 100) {
+ (0, logger_1.logWarn)(`CheckProgress: unexpected progress value ${progress} (expected 0-100). Clamping for display.`);
+ }
if (progress > 0) {
(0, logger_1.logInfo)(`✅ Progress detection completed: ${progress}%`);
}
@@ -53784,6 +53850,7 @@ class CreateReleaseUseCase {
}));
}
else {
+ (0, logger_1.logWarn)(`CreateRelease: createRelease returned no URL for version ${param.singleAction.version}.`);
result.push(new result_1.Result({
id: this.taskId,
success: false,
@@ -53869,6 +53936,7 @@ class CreateTagUseCase {
}));
}
else {
+ (0, logger_1.logWarn)(`CreateTag: createTag returned no SHA for version ${param.singleAction.version}.`);
result.push(new result_1.Result({
id: this.taskId,
success: false,
@@ -53911,6 +53979,16 @@ const branch_repository_1 = __nccwpck_require__(7701);
const issue_repository_1 = __nccwpck_require__(57);
const logger_1 = __nccwpck_require__(8836);
const task_emoji_1 = __nccwpck_require__(9785);
+/**
+ * Single action run after a successful deployment (triggered with the "deployed" action and an issue number).
+ *
+ * Requires the issue to have the "deploy" label and not already have the "deployed" label. Then:
+ * 1. Replaces the "deploy" label with "deployed".
+ * 2. If a release or hotfix branch is configured: merges it into default and develop (each via PR, waiting for that PR's checks).
+ * 3. Closes the issue only when all merges succeed.
+ *
+ * @see docs/single-actions/deploy-label-and-merge.mdx for the full flow and how merge/check waiting works.
+ */
class DeployedActionUseCase {
constructor() {
this.taskId = 'DeployedActionUseCase';
@@ -54335,11 +54413,13 @@ class RecommendStepsUseCase {
issueNumber: String(issueNumber),
issueDescription,
});
+ (0, logger_1.logDebugInfo)(`RecommendSteps: prompt length=${prompt.length}, issue description length=${issueDescription.length}.`);
(0, logger_1.logInfo)(`🤖 Recommending steps using OpenCode Plan agent...`);
const response = await this.aiRepository.askAgent(param.ai, ai_repository_1.OPENCODE_AGENT_PLAN, prompt);
const steps = typeof response === 'string'
? response
: (response && String(response.steps)) || 'No response.';
+ (0, logger_1.logDebugInfo)(`RecommendSteps: OpenCode response received. Steps length=${steps.length}. Full steps:\n${steps}`);
results.push(new result_1.Result({
id: this.taskId,
success: true,
@@ -54892,9 +54972,10 @@ class SingleActionUseCase {
const results = [];
try {
if (!param.singleAction.validSingleAction) {
- (0, logger_1.logDebugInfo)(`Not a valid single action: ${param.singleAction.currentSingleAction}`);
+ (0, logger_1.logWarn)(`Single action invoked but not a valid single action: ${param.singleAction.currentSingleAction}. Skipping.`);
return results;
}
+ (0, logger_1.logDebugInfo)(`SingleAction: dispatching to handler for action: ${param.singleAction.currentSingleAction}.`);
if (param.singleAction.isDeployedAction) {
results.push(...await new deployed_action_use_case_1.DeployedActionUseCase().invoke(param));
}
@@ -55314,8 +55395,10 @@ class BugbotAutofixUseCase {
}
const verifyCommands = execution.ai.getBugbotFixVerifyCommands?.() ?? [];
const prompt = (0, build_bugbot_fix_prompt_1.buildBugbotFixPrompt)(execution, context, idsToFix, userComment, verifyCommands);
+ (0, logger_1.logDebugInfo)(`BugbotAutofix: prompt length=${prompt.length}, target finding ids=${idsToFix.length}, verifyCommands=${verifyCommands.length}.`);
(0, logger_1.logInfo)("Running OpenCode build agent to fix selected findings (changes applied in workspace).");
const response = await this.aiRepository.copilotMessage(execution.ai, prompt);
+ (0, logger_1.logDebugInfo)(`BugbotAutofix: OpenCode build agent response length=${response?.text?.length ?? 0}. Full response:\n${response?.text ?? '(none)'}`);
if (!response?.text) {
(0, logger_1.logError)("Bugbot autofix: no response from OpenCode build agent.");
results.push(new result_1.Result({
@@ -55662,6 +55745,7 @@ class DetectBugbotFixIntentUseCase {
parentCommentBody = parentBody ?? undefined;
}
const prompt = (0, build_bugbot_fix_intent_prompt_1.buildBugbotFixIntentPrompt)(commentBody, unresolvedFindings, parentCommentBody);
+ (0, logger_1.logDebugInfo)(`DetectBugbotFixIntent: prompt length=${prompt.length}, unresolved findings=${unresolvedFindings.length}. Calling OpenCode Plan agent.`);
const response = await this.aiRepository.askAgent(param.ai, ai_repository_1.OPENCODE_AGENT_PLAN, prompt, {
expectJson: true,
schema: schema_1.BUGBOT_FIX_INTENT_RESPONSE_SCHEMA,
@@ -55686,6 +55770,7 @@ class DetectBugbotFixIntentUseCase {
: [];
const validIds = new Set(unresolvedIds);
const filteredIds = targetFindingIds.filter((id) => validIds.has(id));
+ (0, logger_1.logDebugInfo)(`DetectBugbotFixIntent: OpenCode payload is_fix_request=${isFixRequest}, is_do_request=${isDoRequest}, target_finding_ids=${JSON.stringify(targetFindingIds)}, filteredIds=${JSON.stringify(filteredIds)}.`);
results.push(new result_1.Result({
id: this.taskId,
success: true,
@@ -55821,6 +55906,7 @@ const issue_repository_1 = __nccwpck_require__(57);
const pull_request_repository_1 = __nccwpck_require__(634);
const build_bugbot_fix_prompt_1 = __nccwpck_require__(1822);
const marker_1 = __nccwpck_require__(2401);
+const logger_1 = __nccwpck_require__(8836);
/** Builds the text block sent to OpenCode for task 2 (decide which previous findings are now resolved). */
function buildPreviousFindingsBlock(previousFindings) {
if (previousFindings.length === 0)
@@ -55851,6 +55937,7 @@ async function loadBugbotContext(param, options) {
const owner = param.owner;
const repo = param.repo;
if (!headBranch) {
+ (0, logger_1.logDebugInfo)('LoadBugbotContext: no head branch (branchOverride or commit.branch); returning empty context.');
return {
existingByFindingId: {},
issueComments: [],
@@ -55916,6 +56003,7 @@ async function loadBugbotContext(param, options) {
}
const previousFindingsBlock = buildPreviousFindingsBlock(previousFindingsForPrompt);
const unresolvedFindingsWithBody = previousFindingsForPrompt.map((p) => ({ id: p.id, fullBody: p.fullBody }));
+ (0, logger_1.logDebugInfo)(`LoadBugbotContext: issue #${issueNumber}, branch ${headBranch}, open PRs=${openPrNumbers.length}, existing findings=${Object.keys(existingByFindingId).length}, unresolved with body=${unresolvedFindingsWithBody.length}.`);
// PR context is only for publishing: we need file list and diff lines so GitHub review comments attach to valid (path, line).
let prContext = null;
if (openPrNumbers.length > 0) {
@@ -56217,12 +56305,13 @@ Object.defineProperty(exports, "__esModule", ({ value: true }));
exports.publishFindings = publishFindings;
const issue_repository_1 = __nccwpck_require__(57);
const pull_request_repository_1 = __nccwpck_require__(634);
+const comment_watermark_1 = __nccwpck_require__(4467);
const logger_1 = __nccwpck_require__(8836);
const marker_1 = __nccwpck_require__(2401);
const path_validation_1 = __nccwpck_require__(1999);
/** Creates or updates issue comments for each finding; creates PR review comments only when finding.file is in prFiles. */
async function publishFindings(param) {
- const { execution, context, findings, overflowCount = 0, overflowTitles = [] } = param;
+ const { execution, context, findings, commitSha, overflowCount = 0, overflowTitles = [] } = param;
const { existingByFindingId, openPrNumbers, prContext } = context;
const issueNumber = execution.issueNumber;
const token = execution.tokens.token;
@@ -56230,18 +56319,22 @@ async function publishFindings(param) {
const repo = execution.repo;
const issueRepository = new issue_repository_1.IssueRepository();
const pullRequestRepository = new pull_request_repository_1.PullRequestRepository();
+ const bugbotWatermark = commitSha && owner && repo
+ ? (0, comment_watermark_1.getCommentWatermark)({ commitSha, owner, repo })
+ : (0, comment_watermark_1.getCommentWatermark)();
const prFiles = prContext?.prFiles ?? [];
const pathToFirstDiffLine = prContext?.pathToFirstDiffLine ?? {};
const prCommentsToCreate = [];
for (const finding of findings) {
const existing = existingByFindingId[finding.id];
const commentBody = (0, marker_1.buildCommentBody)(finding, false);
+ const bodyWithWatermark = `${commentBody}\n\n${bugbotWatermark}`;
if (existing?.issueCommentId != null) {
- await issueRepository.updateComment(owner, repo, issueNumber, existing.issueCommentId, commentBody, token);
+ await issueRepository.updateComment(owner, repo, issueNumber, existing.issueCommentId, commentBody, token, commitSha ? { commitSha } : undefined);
(0, logger_1.logDebugInfo)(`Updated bugbot comment for finding ${finding.id} on issue.`);
}
else {
- await issueRepository.addComment(owner, repo, issueNumber, commentBody, token);
+ await issueRepository.addComment(owner, repo, issueNumber, commentBody, token, commitSha ? { commitSha } : undefined);
(0, logger_1.logDebugInfo)(`Added bugbot comment for finding ${finding.id} on issue.`);
}
// PR review comment: only if this finding's file is in the PR changed files (so GitHub can attach the comment).
@@ -56250,10 +56343,10 @@ async function publishFindings(param) {
if (path) {
const line = finding.line ?? pathToFirstDiffLine[path] ?? 1;
if (existing?.prCommentId != null && existing.prNumber === openPrNumbers[0]) {
- await pullRequestRepository.updatePullRequestReviewComment(owner, repo, existing.prCommentId, commentBody, token);
+ await pullRequestRepository.updatePullRequestReviewComment(owner, repo, existing.prCommentId, bodyWithWatermark, token);
}
else {
- prCommentsToCreate.push({ path, line, body: commentBody });
+ prCommentsToCreate.push({ path, line, body: bodyWithWatermark });
}
}
else if (finding.file != null && String(finding.file).trim() !== "") {
@@ -56271,7 +56364,7 @@ async function publishFindings(param) {
const overflowBody = `## More findings (comment limit)
There are **${overflowCount}** more finding(s) that were not published as individual comments. Review locally or in the full diff to see the list.${titlesList}`;
- await issueRepository.addComment(owner, repo, issueNumber, overflowBody, token);
+ await issueRepository.addComment(owner, repo, issueNumber, overflowBody, token, commitSha ? { commitSha } : undefined);
(0, logger_1.logDebugInfo)(`Added overflow comment: ${overflowCount} additional finding(s) not published individually.`);
}
}
@@ -56511,7 +56604,7 @@ class CheckChangesIssueSizeUseCase {
}
}
catch (error) {
- (0, logger_1.logError)(error);
+ (0, logger_1.logError)(`CheckChangesIssueSize: failed for issue #${param.issueNumber}.`, error instanceof Error ? { stack: error.stack } : undefined);
result.push(new result_1.Result({
id: this.taskId,
success: false,
@@ -56531,12 +56624,46 @@ exports.CheckChangesIssueSizeUseCase = CheckChangesIssueSizeUseCase;
/***/ }),
/***/ 7395:
-/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => {
+/***/ (function(__unused_webpack_module, exports, __nccwpck_require__) {
"use strict";
+var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
+ if (k2 === undefined) k2 = k;
+ var desc = Object.getOwnPropertyDescriptor(m, k);
+ if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
+ desc = { enumerable: true, get: function() { return m[k]; } };
+ }
+ Object.defineProperty(o, k2, desc);
+}) : (function(o, m, k, k2) {
+ if (k2 === undefined) k2 = k;
+ o[k2] = m[k];
+}));
+var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
+ Object.defineProperty(o, "default", { enumerable: true, value: v });
+}) : function(o, v) {
+ o["default"] = v;
+});
+var __importStar = (this && this.__importStar) || (function () {
+ var ownKeys = function(o) {
+ ownKeys = Object.getOwnPropertyNames || function (o) {
+ var ar = [];
+ for (var k in o) if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k;
+ return ar;
+ };
+ return ownKeys(o);
+ };
+ return function (mod) {
+ if (mod && mod.__esModule) return mod;
+ var result = {};
+ if (mod != null) for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
+ __setModuleDefault(result, mod);
+ return result;
+ };
+})();
Object.defineProperty(exports, "__esModule", ({ value: true }));
exports.DetectPotentialProblemsUseCase = void 0;
+const github = __importStar(__nccwpck_require__(5438));
const result_1 = __nccwpck_require__(7305);
const ai_repository_1 = __nccwpck_require__(8307);
const constants_1 = __nccwpck_require__(8593);
@@ -56567,11 +56694,13 @@ class DetectPotentialProblemsUseCase {
return results;
}
if (param.issueNumber === -1) {
- (0, logger_1.logDebugInfo)('No issue number for this branch; skipping.');
+ (0, logger_1.logDebugInfo)('No issue number for this branch; skipping potential problems detection.');
return results;
}
+ (0, logger_1.logDebugInfo)(`DetectPotentialProblems: loading context for issue #${param.issueNumber}.`);
const context = await (0, load_bugbot_context_use_case_1.loadBugbotContext)(param);
const prompt = (0, build_bugbot_prompt_1.buildBugbotPrompt)(param, context);
+ (0, logger_1.logDebugInfo)(`DetectPotentialProblems: prompt length=${prompt.length}. Calling OpenCode Plan agent.`);
(0, logger_1.logInfo)('Detecting potential problems via OpenCode (agent computes changes and checks resolved)...');
const response = await this.aiRepository.askAgent(param.ai, ai_repository_1.OPENCODE_AGENT_PLAN, prompt, {
expectJson: true,
@@ -56579,7 +56708,7 @@ class DetectPotentialProblemsUseCase {
schemaName: 'bugbot_findings',
});
if (response == null || typeof response !== 'object') {
- (0, logger_1.logDebugInfo)('No response from OpenCode.');
+ (0, logger_1.logDebugInfo)('DetectPotentialProblems: No response from OpenCode.');
return results;
}
const payload = response;
@@ -56587,6 +56716,7 @@ class DetectPotentialProblemsUseCase {
const resolvedFindingIdsRaw = Array.isArray(payload.resolved_finding_ids) ? payload.resolved_finding_ids : [];
const resolvedFindingIds = new Set(resolvedFindingIdsRaw);
const normalizedResolvedIds = new Set(resolvedFindingIdsRaw.map(marker_1.sanitizeFindingIdForMarker));
+ (0, logger_1.logDebugInfo)(`DetectPotentialProblems: OpenCode returned findings=${findings.length}, resolved_finding_ids=${resolvedFindingIdsRaw.length}. Resolved ids: ${JSON.stringify([...resolvedFindingIds])}.`);
const ignorePatterns = param.ai?.getAiIgnoreFiles?.() ?? [];
const minSeverity = (0, severity_1.normalizeMinSeverity)(param.ai?.getBugbotMinSeverity?.());
findings = findings.filter((f) => f.file == null || String(f.file).trim() === '' || (0, path_validation_1.isSafeFindingFilePath)(f.file));
@@ -56595,8 +56725,9 @@ class DetectPotentialProblemsUseCase {
findings = (0, deduplicate_findings_1.deduplicateFindings)(findings);
const maxComments = param.ai?.getBugbotCommentLimit?.() ?? constants_1.BUGBOT_MAX_COMMENTS;
const { toPublish, overflowCount, overflowTitles } = (0, limit_comments_1.applyCommentLimit)(findings, maxComments);
+ (0, logger_1.logDebugInfo)(`DetectPotentialProblems: after filters and limit — toPublish=${toPublish.length}, overflow=${overflowCount}, minSeverity applied, ignore patterns applied.`);
if (toPublish.length === 0 && resolvedFindingIds.size === 0) {
- (0, logger_1.logDebugInfo)('OpenCode returned no new findings (after filters) and no resolved ids.');
+ (0, logger_1.logDebugInfo)('DetectPotentialProblems: OpenCode returned no new findings (after filters) and no resolved ids.');
results.push(new result_1.Result({
id: this.taskId,
success: true,
@@ -56615,6 +56746,7 @@ class DetectPotentialProblemsUseCase {
execution: param,
context,
findings: toPublish,
+ commitSha: github.context.sha,
overflowCount: overflowCount > 0 ? overflowCount : undefined,
overflowTitles: overflowCount > 0 ? overflowTitles : undefined,
});
@@ -56766,7 +56898,7 @@ ${this.separator}
await this.issueRepository.addComment(param.owner, param.repo, param.issueNumber, commentBody, param.tokens.token);
}
catch (error) {
- (0, logger_1.logError)(error);
+ (0, logger_1.logError)(`NotifyNewCommitOnIssue: failed to notify issue #${param.issueNumber}.`, error instanceof Error ? { stack: error.stack } : undefined);
result.push(new result_1.Result({
id: this.taskId,
success: false,
@@ -56835,8 +56967,10 @@ class DoUserRequestUseCase {
issueNumber: String(execution.issueNumber),
userComment: (0, sanitize_user_comment_for_prompt_1.sanitizeUserCommentForPrompt)(userComment),
});
+ (0, logger_1.logDebugInfo)(`DoUserRequest: prompt length=${prompt.length}, user comment length=${commentTrimmed.length}.`);
(0, logger_1.logInfo)("Running OpenCode build agent to perform user request (changes applied in workspace).");
const response = await this.aiRepository.copilotMessage(execution.ai, prompt);
+ (0, logger_1.logDebugInfo)(`DoUserRequest: OpenCode build agent response length=${response?.text?.length ?? 0}. Full response:\n${response?.text ?? '(none)'}`);
if (!response?.text) {
(0, logger_1.logError)("DoUserRequest: no response from OpenCode build agent.");
results.push(new result_1.Result({
@@ -56917,6 +57051,7 @@ class CheckPermissionsUseCase {
}));
}
else {
+ (0, logger_1.logWarn)(`CheckPermissions: @${param.issue.creator} not authorized to create [${param.labels.currentIssueLabels.join(',')}] issues.`);
result.push(new result_1.Result({
id: this.taskId,
success: false,
@@ -56935,7 +57070,7 @@ class CheckPermissionsUseCase {
}
}
catch (error) {
- (0, logger_1.logError)(error);
+ (0, logger_1.logError)(`CheckPermissions: failed to get project members or check creator.`, error instanceof Error ? { stack: error.stack } : undefined);
result.push(new result_1.Result({
id: this.taskId,
success: false,
@@ -57294,6 +57429,7 @@ class GetReleaseVersionUseCase {
}
const description = await this.issueRepository.getDescription(param.owner, param.repo, number, param.tokens.token);
if (description === undefined) {
+ (0, logger_1.logDebugInfo)(`GetReleaseVersion: no description for issue/PR ${number}.`);
result.push(new result_1.Result({
id: this.taskId,
success: false,
@@ -57304,6 +57440,7 @@ class GetReleaseVersionUseCase {
}
const releaseVersion = (0, content_utils_1.extractVersion)('Release Version', description);
if (releaseVersion === undefined) {
+ (0, logger_1.logDebugInfo)(`GetReleaseVersion: no "Release Version" found in description (issue/PR ${number}).`);
result.push(new result_1.Result({
id: this.taskId,
success: false,
@@ -57321,12 +57458,12 @@ class GetReleaseVersionUseCase {
}));
}
catch (error) {
- (0, logger_1.logError)(error);
+ (0, logger_1.logError)(`GetReleaseVersion: failed to get version for issue/PR.`, error instanceof Error ? { stack: error.stack } : undefined);
result.push(new result_1.Result({
id: this.taskId,
success: false,
executed: true,
- steps: [`Tried to check action permissions.`],
+ steps: [`Tried to get the release version but there was a problem.`],
error: error,
}));
}
@@ -57359,6 +57496,7 @@ class ThinkUseCase {
}
async invoke(param) {
const results = [];
+ (0, logger_1.logInfo)('Think: processing comment (AI Q&A).');
try {
const commentBody = param.issue.isIssueComment
? (param.issue.commentBody ?? '')
@@ -57421,11 +57559,13 @@ class ThinkUseCase {
const contextBlock = issueDescription
? `\n\nContext (issue #${issueNumberForContext} description):\n${issueDescription}\n\n`
: '\n\n';
+ (0, logger_1.logDebugInfo)(`Think: question length=${question.length}, issue context length=${issueDescription.length}. Full question:\n${question}`);
const prompt = (0, prompts_1.getThinkPrompt)({
projectContextInstruction: opencode_project_context_instruction_1.OPENCODE_PROJECT_CONTEXT_INSTRUCTION,
contextBlock,
question,
});
+ (0, logger_1.logDebugInfo)(`Think: calling OpenCode Plan agent (prompt length=${prompt.length}).`);
const response = await this.aiRepository.askAgent(param.ai, ai_repository_1.OPENCODE_AGENT_PLAN, prompt, {
expectJson: true,
schema: ai_repository_1.THINK_RESPONSE_SCHEMA,
@@ -57436,6 +57576,7 @@ class ThinkUseCase {
typeof response.answer === 'string'
? response.answer.trim()
: '';
+ (0, logger_1.logDebugInfo)(`Think: OpenCode response received. Answer length=${answer.length}. Full answer:\n${answer}`);
if (!answer) {
(0, logger_1.logError)('OpenCode returned no answer for Think.');
results.push(new result_1.Result({
@@ -57629,6 +57770,7 @@ class AnswerIssueHelpUseCase {
}
async invoke(param) {
const results = [];
+ (0, logger_1.logInfo)('AnswerIssueHelp: checking if initial help reply is needed (AI).');
try {
if (!param.issue.opened) {
results.push(new result_1.Result({
@@ -57679,6 +57821,7 @@ class AnswerIssueHelpUseCase {
description,
projectContextInstruction: opencode_project_context_instruction_1.OPENCODE_PROJECT_CONTEXT_INSTRUCTION,
});
+ (0, logger_1.logDebugInfo)(`AnswerIssueHelp: prompt length=${prompt.length}, issue description length=${description.length}. Calling OpenCode Plan agent.`);
const response = await this.aiRepository.askAgent(param.ai, ai_repository_1.OPENCODE_AGENT_PLAN, prompt, {
expectJson: true,
schema: ai_repository_1.THINK_RESPONSE_SCHEMA,
@@ -57689,6 +57832,7 @@ class AnswerIssueHelpUseCase {
typeof response.answer === 'string'
? response.answer.trim()
: '';
+ (0, logger_1.logDebugInfo)(`AnswerIssueHelp: OpenCode response. Answer length=${answer.length}. Full answer:\n${answer}`);
if (!answer) {
(0, logger_1.logError)('OpenCode returned no answer for initial help.');
results.push(new result_1.Result({
@@ -58044,6 +58188,7 @@ class CloseIssueAfterMergingUseCase {
try {
const closed = await this.issueRepository.closeIssue(param.owner, param.repo, param.issueNumber, param.tokens.token);
if (closed) {
+ (0, logger_1.logInfo)(`Issue #${param.issueNumber} closed after merging PR #${param.pullRequest.number}.`);
await this.issueRepository.addComment(param.owner, param.repo, param.issueNumber, `This issue was closed after merging #${param.pullRequest.number}.`, param.tokens.token);
result.push(new result_1.Result({
id: this.taskId,
@@ -58055,6 +58200,7 @@ class CloseIssueAfterMergingUseCase {
}));
}
else {
+ (0, logger_1.logDebugInfo)(`Issue #${param.issueNumber} was already closed or close failed after merge.`);
result.push(new result_1.Result({
id: this.taskId,
success: true,
@@ -58063,6 +58209,7 @@ class CloseIssueAfterMergingUseCase {
}
}
catch (error) {
+ (0, logger_1.logError)(`CloseIssueAfterMerging: failed to close issue #${param.issueNumber}.`, error instanceof Error ? { stack: error.stack } : undefined);
result.push(new result_1.Result({
id: this.taskId,
success: false,
@@ -58103,6 +58250,7 @@ class CloseNotAllowedIssueUseCase {
try {
const closed = await this.issueRepository.closeIssue(param.owner, param.repo, param.issueNumber, param.tokens.token);
if (closed) {
+ (0, logger_1.logInfo)(`Issue #${param.issueNumber} closed (author not allowed). Adding comment.`);
await this.issueRepository.addComment(param.owner, param.repo, param.issueNumber, `This issue has been closed because the author is not a member of the project. The user may be banned if the fact is repeated.`, param.tokens.token);
result.push(new result_1.Result({
id: this.taskId,
@@ -58114,6 +58262,7 @@ class CloseNotAllowedIssueUseCase {
}));
}
else {
+ (0, logger_1.logDebugInfo)(`Issue #${param.issueNumber} was already closed or close failed.`);
result.push(new result_1.Result({
id: this.taskId,
success: true,
@@ -58122,6 +58271,7 @@ class CloseNotAllowedIssueUseCase {
}
}
catch (error) {
+ (0, logger_1.logError)(`CloseNotAllowedIssue: failed to close issue #${param.issueNumber}.`, error instanceof Error ? { stack: error.stack } : undefined);
result.push(new result_1.Result({
id: this.taskId,
success: false,
@@ -58354,8 +58504,13 @@ class LinkIssueProjectUseCase {
(0, logger_1.logInfo)(`${(0, task_emoji_1.getTaskEmoji)(this.taskId)} Executing ${this.taskId}.`);
const result = [];
const columnName = param.project.getProjectColumnIssueCreated();
+ const projects = param.project.getProjects();
+ if (projects.length === 0) {
+ (0, logger_1.logDebugInfo)('LinkIssueProject: no projects configured; skipping.');
+ return result;
+ }
try {
- for (const project of param.project.getProjects()) {
+ for (const project of projects) {
const issueId = await this.issueRepository.getId(param.owner, param.repo, param.issue.number, param.tokens.token);
let actionDone = await this.projectRepository.linkContentId(project, issueId, param.tokens.token);
if (actionDone) {
@@ -58375,6 +58530,7 @@ class LinkIssueProjectUseCase {
}));
}
else {
+ (0, logger_1.logWarn)(`LinkIssueProject: linked issue to project "${project?.title}" but move to column "${columnName}" failed.`);
result.push(new result_1.Result({
id: this.taskId,
success: true,
@@ -58383,6 +58539,9 @@ class LinkIssueProjectUseCase {
}));
}
}
+ else {
+ (0, logger_1.logDebugInfo)(`LinkIssueProject: issue already linked to project "${project?.title}" or link failed.`);
+ }
}
return result;
}
@@ -58585,6 +58744,7 @@ class PrepareBranchesUseCase {
}
}
else {
+ (0, logger_1.logWarn)('PrepareBranches: hotfix requested but no tag or base version found.');
result.push(new result_1.Result({
id: this.taskId,
success: false,
@@ -58668,6 +58828,7 @@ class PrepareBranchesUseCase {
}
}
else {
+ (0, logger_1.logWarn)('PrepareBranches: release requested but no release version found.');
result.push(new result_1.Result({
id: this.taskId,
success: false,
@@ -58747,7 +58908,7 @@ class PrepareBranchesUseCase {
return result;
}
catch (error) {
- (0, logger_1.logError)(error);
+ (0, logger_1.logError)(`PrepareBranches: error preparing branches for issue #${param.issueNumber}.`, error instanceof Error ? { stack: error.stack } : undefined);
result.push(new result_1.Result({
id: this.taskId,
success: false,
@@ -58800,8 +58961,10 @@ class RemoveIssueBranchesUseCase {
if (!matchingBranch)
continue;
branchName = matchingBranch;
+ (0, logger_1.logDebugInfo)(`RemoveIssueBranches: attempting to remove branch ${branchName}.`);
const removed = await this.branchRepository.removeBranch(param.owner, param.repo, branchName, param.tokens.token);
if (removed) {
+ (0, logger_1.logDebugInfo)(`RemoveIssueBranches: removed branch ${branchName}.`);
results.push(new result_1.Result({
id: this.taskId,
success: true,
@@ -58821,16 +58984,19 @@ class RemoveIssueBranchesUseCase {
}));
}
}
+ else {
+ (0, logger_1.logWarn)(`RemoveIssueBranches: failed to remove branch ${branchName}.`);
+ }
}
}
catch (error) {
- (0, logger_1.logError)(error);
+ (0, logger_1.logError)(`RemoveIssueBranches: error removing branches for issue #${param.issueNumber}.`, error instanceof Error ? { stack: error.stack } : undefined);
results.push(new result_1.Result({
id: this.taskId,
success: false,
executed: true,
steps: [
- `Tried to update issue's title, but there was a problem.`,
+ `Tried to remove issue branches, but there was a problem.`,
],
error: error,
}));
@@ -59073,6 +59239,7 @@ If you'd like this comment to be translated again, please delete the entire comm
}
const locale = param.locale.issue;
let prompt = (0, prompts_1.getCheckCommentLanguagePrompt)({ locale, commentBody });
+ (0, logger_1.logDebugInfo)(`CheckIssueCommentLanguage: locale=${locale}, comment length=${commentBody.length}. Calling OpenCode for language check.`);
const checkResponse = await this.aiRepository.askAgent(param.ai, ai_repository_1.OPENCODE_AGENT_PLAN, prompt, {
expectJson: true,
schema: ai_repository_1.LANGUAGE_CHECK_RESPONSE_SCHEMA,
@@ -59083,6 +59250,7 @@ If you'd like this comment to be translated again, please delete the entire comm
typeof checkResponse.status === 'string'
? checkResponse.status
: '';
+ (0, logger_1.logDebugInfo)(`CheckIssueCommentLanguage: language check status=${status}.`);
if (status === 'done') {
results.push(new result_1.Result({
id: this.taskId,
@@ -59092,6 +59260,7 @@ If you'd like this comment to be translated again, please delete the entire comm
return results;
}
prompt = (0, prompts_1.getTranslateCommentPrompt)({ locale, commentBody });
+ (0, logger_1.logDebugInfo)(`CheckIssueCommentLanguage: translating comment (prompt length=${prompt.length}).`);
const translationResponse = await this.aiRepository.askAgent(param.ai, ai_repository_1.OPENCODE_AGENT_PLAN, prompt, {
expectJson: true,
schema: ai_repository_1.TRANSLATION_RESPONSE_SCHEMA,
@@ -59102,6 +59271,7 @@ If you'd like this comment to be translated again, please delete the entire comm
typeof translationResponse.translatedText === 'string'
? translationResponse.translatedText.trim()
: '';
+ (0, logger_1.logDebugInfo)(`CheckIssueCommentLanguage: translation received. translatedText length=${translatedText.length}. Full translated text:\n${translatedText}`);
if (!translatedText) {
const reason = translationResponse != null &&
typeof translationResponse === 'object' &&
@@ -59370,8 +59540,13 @@ class LinkPullRequestProjectUseCase {
(0, logger_1.logInfo)(`${(0, task_emoji_1.getTaskEmoji)(this.taskId)} Executing ${this.taskId}.`);
const result = [];
const columnName = param.project.getProjectColumnPullRequestCreated();
+ const projects = param.project.getProjects();
+ if (projects.length === 0) {
+ (0, logger_1.logDebugInfo)('LinkPullRequestProject: no projects configured; skipping.');
+ return result;
+ }
try {
- for (const project of param.project.getProjects()) {
+ for (const project of projects) {
let actionDone = await this.projectRepository.linkContentId(project, param.pullRequest.id, param.tokens.token);
if (actionDone) {
/**
@@ -59390,6 +59565,7 @@ class LinkPullRequestProjectUseCase {
}));
}
else {
+ (0, logger_1.logWarn)(`LinkPullRequestProject: linked PR to project "${project?.title}" but move to column "${columnName}" failed.`);
result.push(new result_1.Result({
id: this.taskId,
success: false,
@@ -59400,6 +59576,9 @@ class LinkPullRequestProjectUseCase {
}));
}
}
+ else {
+ (0, logger_1.logDebugInfo)(`LinkPullRequestProject: PR already linked to project "${project?.title}" or link failed.`);
+ }
}
return result;
}
@@ -59531,7 +59710,7 @@ class UpdatePullRequestDescriptionUseCase {
this.projectRepository = new project_repository_1.ProjectRepository();
}
async invoke(param) {
- (0, logger_1.logDebugInfo)(`${(0, task_emoji_1.getTaskEmoji)(this.taskId)} Executing ${this.taskId}.`);
+ (0, logger_1.logInfo)(`${(0, task_emoji_1.getTaskEmoji)(this.taskId)} Executing ${this.taskId} (AI PR description).`);
const result = [];
try {
const prNumber = param.pullRequest.number;
@@ -59582,10 +59761,12 @@ class UpdatePullRequestDescriptionUseCase {
issueNumber: String(param.issueNumber),
issueDescription,
});
+ (0, logger_1.logDebugInfo)(`UpdatePullRequestDescription: prompt length=${prompt.length}, issue description length=${issueDescription.length}. Calling OpenCode Plan agent.`);
const agentResponse = await this.aiRepository.askAgent(param.ai, ai_repository_1.OPENCODE_AGENT_PLAN, prompt);
const prBody = typeof agentResponse === 'string'
? agentResponse
: (agentResponse && String(agentResponse.description)) || '';
+ (0, logger_1.logDebugInfo)(`UpdatePullRequestDescription: OpenCode response received. Description length=${prBody.length}. Full description:\n${prBody}`);
if (!prBody.trim()) {
result.push(new result_1.Result({
id: this.taskId,
@@ -59656,6 +59837,7 @@ If you'd like this comment to be translated again, please delete the entire comm
}
const locale = param.locale.pullRequest;
let prompt = (0, prompts_1.getCheckCommentLanguagePrompt)({ locale, commentBody });
+ (0, logger_1.logDebugInfo)(`CheckPullRequestCommentLanguage: locale=${locale}, comment length=${commentBody.length}. Calling OpenCode for language check.`);
const checkResponse = await this.aiRepository.askAgent(param.ai, ai_repository_1.OPENCODE_AGENT_PLAN, prompt, {
expectJson: true,
schema: ai_repository_1.LANGUAGE_CHECK_RESPONSE_SCHEMA,
@@ -59666,6 +59848,7 @@ If you'd like this comment to be translated again, please delete the entire comm
typeof checkResponse.status === 'string'
? checkResponse.status
: '';
+ (0, logger_1.logDebugInfo)(`CheckPullRequestCommentLanguage: language check status=${status}.`);
if (status === 'done') {
results.push(new result_1.Result({
id: this.taskId,
@@ -59675,6 +59858,7 @@ If you'd like this comment to be translated again, please delete the entire comm
return results;
}
prompt = (0, prompts_1.getTranslateCommentPrompt)({ locale, commentBody });
+ (0, logger_1.logDebugInfo)(`CheckPullRequestCommentLanguage: translating comment (prompt length=${prompt.length}).`);
const translationResponse = await this.aiRepository.askAgent(param.ai, ai_repository_1.OPENCODE_AGENT_PLAN, prompt, {
expectJson: true,
schema: ai_repository_1.TRANSLATION_RESPONSE_SCHEMA,
@@ -59685,6 +59869,7 @@ If you'd like this comment to be translated again, please delete the entire comm
typeof translationResponse.translatedText === 'string'
? translationResponse.translatedText.trim()
: '';
+ (0, logger_1.logDebugInfo)(`CheckPullRequestCommentLanguage: translation received. translatedText length=${translatedText.length}. Full translated text:\n${translatedText}`);
if (!translatedText) {
const reason = translationResponse != null &&
typeof translationResponse === 'object' &&
@@ -59710,6 +59895,34 @@ ${this.translatedKey}
exports.CheckPullRequestCommentLanguageUseCase = CheckPullRequestCommentLanguageUseCase;
+/***/ }),
+
+/***/ 4467:
+/***/ ((__unused_webpack_module, exports) => {
+
+"use strict";
+
+/**
+ * Watermark appended to comments (issues and PRs) to attribute Copilot.
+ * Bugbot comments include commit link and note about auto-update on new commits.
+ */
+Object.defineProperty(exports, "__esModule", ({ value: true }));
+exports.COPILOT_MARKETPLACE_URL = void 0;
+exports.getCommentWatermark = getCommentWatermark;
+exports.COPILOT_MARKETPLACE_URL = 'https://github.com/marketplace/actions/copilot-github-with-super-powers';
+const DEFAULT_WATERMARK = `Made with ❤️ by [vypdev/copilot](${exports.COPILOT_MARKETPLACE_URL})`;
+function commitUrl(owner, repo, sha) {
+ return `https://github.com/${encodeURIComponent(owner)}/${encodeURIComponent(repo)}/commit/${sha}`;
+}
+function getCommentWatermark(options) {
+ if (options?.commitSha && options?.owner && options?.repo) {
+ const url = commitUrl(options.owner, options.repo, options.commitSha);
+ return `Written by [vypdev/copilot](${exports.COPILOT_MARKETPLACE_URL}) for commit [${options.commitSha}](${url}). This will update automatically on new commits.`;
+ }
+ return DEFAULT_WATERMARK;
+}
+
+
/***/ }),
/***/ 8593:
@@ -60226,6 +60439,9 @@ exports.getRandomElement = getRandomElement;
"use strict";
Object.defineProperty(exports, "__esModule", ({ value: true }));
+exports.getAccumulatedLogEntries = getAccumulatedLogEntries;
+exports.getAccumulatedLogsAsText = getAccumulatedLogsAsText;
+exports.clearAccumulatedLogs = clearAccumulatedLogs;
exports.setGlobalLoggerDebug = setGlobalLoggerDebug;
exports.setStructuredLogging = setStructuredLogging;
exports.logInfo = logInfo;
@@ -60238,6 +60454,29 @@ exports.logDebugError = logDebugError;
let loggerDebug = false;
let loggerRemote = false;
let structuredLogging = false;
+const accumulatedLogEntries = [];
+/** Removes markdown code fences from message so log output does not break when visualized (e.g. GitHub Actions). */
+function sanitizeLogMessage(message) {
+ return message.replace(/```/g, '');
+}
+function pushLogEntry(entry) {
+ accumulatedLogEntries.push(entry);
+}
+function getAccumulatedLogEntries() {
+ return [...accumulatedLogEntries];
+}
+function getAccumulatedLogsAsText() {
+ return accumulatedLogEntries
+ .map((e) => {
+ const prefix = `[${e.level.toUpperCase()}]`;
+ const meta = e.metadata?.stack ? `\n${String(e.metadata.stack)}` : '';
+ return `${prefix} ${e.message}${meta}`;
+ })
+ .join('\n');
+}
+function clearAccumulatedLogs() {
+ accumulatedLogEntries.length = 0;
+}
function setGlobalLoggerDebug(debug, isRemote = false) {
loggerDebug = debug;
loggerRemote = isRemote;
@@ -60248,33 +60487,39 @@ function setStructuredLogging(enabled) {
function formatStructuredLog(entry) {
return JSON.stringify(entry);
}
-function logInfo(message, previousWasSingleLine = false, metadata) {
+function logInfo(message, previousWasSingleLine = false, metadata, skipAccumulation) {
+ const sanitized = sanitizeLogMessage(message);
+ if (!skipAccumulation) {
+ pushLogEntry({ level: 'info', message: sanitized, timestamp: Date.now(), metadata });
+ }
if (previousWasSingleLine && !loggerRemote) {
console.log();
}
if (structuredLogging) {
console.log(formatStructuredLog({
level: 'info',
- message,
+ message: sanitized,
timestamp: Date.now(),
metadata
}));
}
else {
- console.log(message);
+ console.log(sanitized);
}
}
function logWarn(message, metadata) {
+ const sanitized = sanitizeLogMessage(message);
+ pushLogEntry({ level: 'warn', message: sanitized, timestamp: Date.now(), metadata });
if (structuredLogging) {
console.warn(formatStructuredLog({
level: 'warn',
- message,
+ message: sanitized,
timestamp: Date.now(),
metadata
}));
}
else {
- console.warn(message);
+ console.warn(sanitized);
}
}
function logWarning(message) {
@@ -60282,33 +60527,38 @@ function logWarning(message) {
}
function logError(message, metadata) {
const errorMessage = message instanceof Error ? message.message : String(message);
+ const sanitized = sanitizeLogMessage(errorMessage);
+ const metaWithStack = {
+ ...metadata,
+ stack: message instanceof Error ? message.stack : undefined
+ };
+ pushLogEntry({ level: 'error', message: sanitized, timestamp: Date.now(), metadata: metaWithStack });
if (structuredLogging) {
console.error(formatStructuredLog({
level: 'error',
- message: errorMessage,
+ message: sanitized,
timestamp: Date.now(),
- metadata: {
- ...metadata,
- stack: message instanceof Error ? message.stack : undefined
- }
+ metadata: metaWithStack
}));
}
else {
- console.error(errorMessage);
+ console.error(sanitized);
}
}
function logDebugInfo(message, previousWasSingleLine = false, metadata) {
if (loggerDebug) {
+ const sanitized = sanitizeLogMessage(message);
+ pushLogEntry({ level: 'debug', message: sanitized, timestamp: Date.now(), metadata });
if (structuredLogging) {
console.log(formatStructuredLog({
level: 'debug',
- message,
+ message: sanitized,
timestamp: Date.now(),
metadata
}));
}
else {
- logInfo(message, previousWasSingleLine);
+ logInfo(sanitized, previousWasSingleLine, undefined, true);
}
}
}
diff --git a/build/cli/src/data/repository/issue_repository.d.ts b/build/cli/src/data/repository/issue_repository.d.ts
index dbd004e0..f69e3668 100644
--- a/build/cli/src/data/repository/issue_repository.d.ts
+++ b/build/cli/src/data/repository/issue_repository.d.ts
@@ -37,8 +37,12 @@ export declare class IssueRepository {
isIssue: (owner: string, repository: string, issueNumber: number, token: string) => Promise;
isPullRequest: (owner: string, repository: string, issueNumber: number, token: string) => Promise;
getHeadBranch: (owner: string, repository: string, issueNumber: number, token: string) => Promise;
- addComment: (owner: string, repository: string, issueNumber: number, comment: string, token: string) => Promise;
- updateComment: (owner: string, repository: string, issueNumber: number, commentId: number, comment: string, token: string) => Promise;
+ addComment: (owner: string, repository: string, issueNumber: number, comment: string, token: string, options?: {
+ commitSha?: string;
+ }) => Promise;
+ updateComment: (owner: string, repository: string, issueNumber: number, commentId: number, comment: string, token: string, options?: {
+ commitSha?: string;
+ }) => Promise;
/**
* Lists all comments on an issue (for bugbot: find existing findings by marker).
* Uses pagination to fetch every comment (default API returns only 30 per page).
diff --git a/build/cli/src/data/repository/merge_repository.d.ts b/build/cli/src/data/repository/merge_repository.d.ts
index a152b014..d13d541a 100644
--- a/build/cli/src/data/repository/merge_repository.d.ts
+++ b/build/cli/src/data/repository/merge_repository.d.ts
@@ -1,7 +1,13 @@
import { Result } from '../model/result';
/**
- * Repository for merging branches (via PR or direct merge).
- * Isolated to allow unit tests with mocked Octokit.
+ * Repository for merging branches: creates a PR, waits for that PR's check runs (or status checks),
+ * then merges the PR; on failure, falls back to a direct Git merge.
+ *
+ * Check runs are filtered by PR (pull_requests) so we only wait for the current PR's checks,
+ * not those of another PR sharing the same head (e.g. release→main vs release→develop).
+ * If the PR has no check runs after a short wait, we proceed to merge (branch may have no required checks).
+ *
+ * @see docs/single-actions/deploy-label-and-merge.mdx for the deploy flow and check-wait behaviour.
*/
export declare class MergeRepository {
mergeBranch: (owner: string, repository: string, head: string, base: string, timeout: number, token: string) => Promise;
diff --git a/build/cli/src/usecase/actions/deployed_action_use_case.d.ts b/build/cli/src/usecase/actions/deployed_action_use_case.d.ts
index bb20b27c..80f821f5 100644
--- a/build/cli/src/usecase/actions/deployed_action_use_case.d.ts
+++ b/build/cli/src/usecase/actions/deployed_action_use_case.d.ts
@@ -1,6 +1,16 @@
import { Execution } from "../../data/model/execution";
import { Result } from "../../data/model/result";
import { ParamUseCase } from "../base/param_usecase";
+/**
+ * Single action run after a successful deployment (triggered with the "deployed" action and an issue number).
+ *
+ * Requires the issue to have the "deploy" label and not already have the "deployed" label. Then:
+ * 1. Replaces the "deploy" label with "deployed".
+ * 2. If a release or hotfix branch is configured: merges it into default and develop (each via PR, waiting for that PR's checks).
+ * 3. Closes the issue only when all merges succeed.
+ *
+ * @see docs/single-actions/deploy-label-and-merge.mdx for the full flow and how merge/check waiting works.
+ */
export declare class DeployedActionUseCase implements ParamUseCase {
taskId: string;
private issueRepository;
diff --git a/build/cli/src/usecase/steps/commit/bugbot/publish_findings_use_case.d.ts b/build/cli/src/usecase/steps/commit/bugbot/publish_findings_use_case.d.ts
index 22a093cc..712e16b8 100644
--- a/build/cli/src/usecase/steps/commit/bugbot/publish_findings_use_case.d.ts
+++ b/build/cli/src/usecase/steps/commit/bugbot/publish_findings_use_case.d.ts
@@ -12,6 +12,8 @@ export interface PublishFindingsParam {
execution: Execution;
context: BugbotContext;
findings: BugbotFinding[];
+ /** Commit SHA for bugbot watermark (commit link). When set, comment uses "for commit ..." watermark. */
+ commitSha?: string;
/** When findings were limited by max comments, add one summary comment with this overflow info. */
overflowCount?: number;
overflowTitles?: string[];
diff --git a/build/cli/src/utils/comment_watermark.d.ts b/build/cli/src/utils/comment_watermark.d.ts
new file mode 100644
index 00000000..858f7dc1
--- /dev/null
+++ b/build/cli/src/utils/comment_watermark.d.ts
@@ -0,0 +1,11 @@
+/**
+ * Watermark appended to comments (issues and PRs) to attribute Copilot.
+ * Bugbot comments include commit link and note about auto-update on new commits.
+ */
+export declare const COPILOT_MARKETPLACE_URL = "https://github.com/marketplace/actions/copilot-github-with-super-powers";
+export interface BugbotWatermarkOptions {
+ commitSha: string;
+ owner: string;
+ repo: string;
+}
+export declare function getCommentWatermark(options?: BugbotWatermarkOptions): string;
diff --git a/build/cli/src/utils/logger.d.ts b/build/cli/src/utils/logger.d.ts
index cb1bcc34..14c6d4d8 100644
--- a/build/cli/src/utils/logger.d.ts
+++ b/build/cli/src/utils/logger.d.ts
@@ -4,9 +4,12 @@ export interface LogEntry {
timestamp: number;
metadata?: Record;
}
+export declare function getAccumulatedLogEntries(): LogEntry[];
+export declare function getAccumulatedLogsAsText(): string;
+export declare function clearAccumulatedLogs(): void;
export declare function setGlobalLoggerDebug(debug: boolean, isRemote?: boolean): void;
export declare function setStructuredLogging(enabled: boolean): void;
-export declare function logInfo(message: string, previousWasSingleLine?: boolean, metadata?: Record): void;
+export declare function logInfo(message: string, previousWasSingleLine?: boolean, metadata?: Record, skipAccumulation?: boolean): void;
export declare function logWarn(message: string, metadata?: Record): void;
export declare function logWarning(message: string): void;
export declare function logError(message: unknown, metadata?: Record): void;
diff --git a/build/github_action/index.js b/build/github_action/index.js
index f2d088a0..1a9776e6 100644
--- a/build/github_action/index.js
+++ b/build/github_action/index.js
@@ -42211,7 +42211,11 @@ const boxen_1 = __importDefault(__nccwpck_require__(4506));
const queue_utils_1 = __nccwpck_require__(9800);
async function mainRun(execution) {
const results = [];
+ (0, logger_1.logInfo)('GitHub Action: starting main run.');
+ (0, logger_1.logDebugInfo)(`Event: ${execution.eventName}, actor: ${execution.actor}, repo: ${execution.owner}/${execution.repo}, debug: ${execution.debug}`);
await execution.setup();
+ (0, logger_1.clearAccumulatedLogs)();
+ (0, logger_1.logDebugInfo)(`Setup done. Issue number: ${execution.issueNumber}, isSingleAction: ${execution.isSingleAction}, isIssue: ${execution.isIssue}, isPullRequest: ${execution.isPullRequest}, isPush: ${execution.isPush}`);
if (!execution.welcome) {
/**
* Wait for previous runs to finish
@@ -42223,19 +42227,21 @@ async function mainRun(execution) {
}
if (execution.runnedByToken) {
if (execution.isSingleAction && execution.singleAction.validSingleAction) {
- (0, logger_1.logInfo)(`User from token (${execution.tokenUser}) matches actor. Executing single action.`);
+ (0, logger_1.logInfo)(`User from token (${execution.tokenUser}) matches actor. Executing single action: ${execution.singleAction.currentSingleAction}.`);
results.push(...await new single_action_use_case_1.SingleActionUseCase().invoke(execution));
+ (0, logger_1.logInfo)(`Single action finished. Results: ${results.length}.`);
return results;
}
- (0, logger_1.logInfo)(`User from token (${execution.tokenUser}) matches actor. Ignoring.`);
+ (0, logger_1.logInfo)(`User from token (${execution.tokenUser}) matches actor. Ignoring (not a valid single action).`);
return results;
}
if (execution.issueNumber === -1) {
if (execution.isSingleAction && execution.singleAction.isSingleActionWithoutIssue) {
+ (0, logger_1.logInfo)('No issue number; running single action without issue.');
results.push(...await new single_action_use_case_1.SingleActionUseCase().invoke(execution));
}
else {
- (0, logger_1.logInfo)(`Issue number not found. Skipping.`);
+ (0, logger_1.logInfo)('Issue number not found. Skipping.');
}
return results;
}
@@ -42252,34 +42258,45 @@ async function mainRun(execution) {
}
try {
if (execution.isSingleAction) {
+ (0, logger_1.logInfo)(`Running SingleActionUseCase (action: ${execution.singleAction.currentSingleAction}).`);
results.push(...await new single_action_use_case_1.SingleActionUseCase().invoke(execution));
}
else if (execution.isIssue) {
if (execution.issue.isIssueComment) {
+ (0, logger_1.logInfo)(`Running IssueCommentUseCase for issue #${execution.issue.number}.`);
results.push(...await new issue_comment_use_case_1.IssueCommentUseCase().invoke(execution));
}
else {
+ (0, logger_1.logInfo)(`Running IssueUseCase for issue #${execution.issueNumber}.`);
results.push(...await new issue_use_case_1.IssueUseCase().invoke(execution));
}
}
else if (execution.isPullRequest) {
if (execution.pullRequest.isPullRequestReviewComment) {
+ (0, logger_1.logInfo)(`Running PullRequestReviewCommentUseCase for PR #${execution.pullRequest.number}.`);
results.push(...await new pull_request_review_comment_use_case_1.PullRequestReviewCommentUseCase().invoke(execution));
}
else {
+ (0, logger_1.logInfo)(`Running PullRequestUseCase for PR #${execution.pullRequest.number}.`);
results.push(...await new pull_request_use_case_1.PullRequestUseCase().invoke(execution));
}
}
else if (execution.isPush) {
+ (0, logger_1.logDebugInfo)(`Push event. Branch: ${execution.commit?.branch ?? 'unknown'}, commits: ${execution.commit?.commits?.length ?? 0}, issue number: ${execution.issueNumber}.`);
+ (0, logger_1.logInfo)('Running CommitUseCase.');
results.push(...await new commit_use_case_1.CommitUseCase().invoke(execution));
}
else {
+ (0, logger_1.logError)(`Action not handled. Event: ${execution.eventName}.`);
core.setFailed(`Action not handled.`);
}
+ const totalSteps = results.reduce((acc, r) => acc + (r.steps?.length ?? 0), 0);
+ (0, logger_1.logInfo)(`Main run finished. Results: ${results.length}, total steps: ${totalSteps}.`);
return results;
}
catch (error) {
const msg = error instanceof Error ? error.message : String(error);
+ (0, logger_1.logError)(`Main run failed: ${msg}`, error instanceof Error ? { stack: error.stack } : undefined);
core.setFailed(msg);
return [];
}
@@ -42356,10 +42373,14 @@ const opencode_server_1 = __nccwpck_require__(1942);
const common_action_1 = __nccwpck_require__(3752);
async function runGitHubAction() {
const projectRepository = new project_repository_1.ProjectRepository();
+ (0, logger_1.logInfo)('GitHub Action: runGitHubAction started.');
/**
* Debug
*/
const debug = getInput(constants_1.INPUT_KEYS.DEBUG) == 'true';
+ if (debug) {
+ (0, logger_1.logInfo)('Debug mode is enabled. Full logs will be included in the report.');
+ }
/**
* Single action
*/
@@ -42380,8 +42401,13 @@ async function runGitHubAction() {
const opencodeStartServer = getInput(constants_1.INPUT_KEYS.OPENCODE_START_SERVER) === 'true';
let managedOpencodeServer;
if (opencodeStartServer) {
+ (0, logger_1.logInfo)('Starting managed OpenCode server...');
managedOpencodeServer = await (0, opencode_server_1.startOpencodeServer)({ cwd: process.cwd() });
opencodeServerUrl = managedOpencodeServer.url;
+ (0, logger_1.logInfo)(`OpenCode server started at ${opencodeServerUrl}.`);
+ }
+ else {
+ (0, logger_1.logDebugInfo)(`Using OpenCode server URL: ${opencodeServerUrl}, model: ${opencodeModel}.`);
}
try {
const aiPullRequestDescription = getInput(constants_1.INPUT_KEYS.AI_PULL_REQUEST_DESCRIPTION) === 'true';
@@ -42718,6 +42744,7 @@ async function runGitHubAction() {
const pullRequestDesiredReviewersCount = parseInt(getInput(constants_1.INPUT_KEYS.PULL_REQUEST_DESIRED_REVIEWERS_COUNT)) ?? 0;
const pullRequestMergeTimeout = parseInt(getInput(constants_1.INPUT_KEYS.PULL_REQUEST_MERGE_TIMEOUT)) ?? 0;
const execution = new execution_1.Execution(debug, new single_action_1.SingleAction(singleAction, singleActionIssue, singleActionVersion, singleActionTitle, singleActionChangelog), commitPrefixBuilder, new issue_1.Issue(branchManagementAlways, reopenIssueOnPush, issueDesiredAssigneesCount), new pull_request_1.PullRequest(pullRequestDesiredAssigneesCount, pullRequestDesiredReviewersCount, pullRequestMergeTimeout), new emoji_1.Emoji(titleEmoji, branchManagementEmoji), new images_1.Images(imagesOnIssue, imagesOnPullRequest, imagesOnCommit, imagesIssueAutomatic, imagesIssueFeature, imagesIssueBugfix, imagesIssueDocs, imagesIssueChore, imagesIssueRelease, imagesIssueHotfix, imagesPullRequestAutomatic, imagesPullRequestFeature, imagesPullRequestBugfix, imagesPullRequestRelease, imagesPullRequestHotfix, imagesPullRequestDocs, imagesPullRequestChore, imagesCommitAutomatic, imagesCommitFeature, imagesCommitBugfix, imagesCommitRelease, imagesCommitHotfix, imagesCommitDocs, imagesCommitChore), new tokens_1.Tokens(token), new ai_1.Ai(opencodeServerUrl, opencodeModel, aiPullRequestDescription, aiMembersOnly, aiIgnoreFiles, aiIncludeReasoning, bugbotSeverity, bugbotCommentLimit, bugbotFixVerifyCommands), new labels_1.Labels(branchManagementLauncherLabel, bugLabel, bugfixLabel, hotfixLabel, enhancementLabel, featureLabel, releaseLabel, questionLabel, helpLabel, deployLabel, deployedLabel, docsLabel, documentationLabel, choreLabel, maintenanceLabel, priorityHighLabel, priorityMediumLabel, priorityLowLabel, priorityNoneLabel, sizeXxlLabel, sizeXlLabel, sizeLLabel, sizeMLabel, sizeSLabel, sizeXsLabel), new issue_types_1.IssueTypes(issueTypeTask, issueTypeTaskDescription, issueTypeTaskColor, issueTypeBug, issueTypeBugDescription, issueTypeBugColor, issueTypeFeature, issueTypeFeatureDescription, issueTypeFeatureColor, issueTypeDocumentation, issueTypeDocumentationDescription, issueTypeDocumentationColor, issueTypeMaintenance, issueTypeMaintenanceDescription, issueTypeMaintenanceColor, issueTypeHotfix, issueTypeHotfixDescription, issueTypeHotfixColor, issueTypeRelease, issueTypeReleaseDescription, issueTypeReleaseColor, issueTypeQuestion, issueTypeQuestionDescription, issueTypeQuestionColor, issueTypeHelp, issueTypeHelpDescription, issueTypeHelpColor), new locale_1.Locale(issueLocale, pullRequestLocale), new size_thresholds_1.SizeThresholds(new size_threshold_1.SizeThreshold(sizeXxlThresholdLines, sizeXxlThresholdFiles, sizeXxlThresholdCommits), new size_threshold_1.SizeThreshold(sizeXlThresholdLines, sizeXlThresholdFiles, sizeXlThresholdCommits), new size_threshold_1.SizeThreshold(sizeLThresholdLines, sizeLThresholdFiles, sizeLThresholdCommits), new size_threshold_1.SizeThreshold(sizeMThresholdLines, sizeMThresholdFiles, sizeMThresholdCommits), new size_threshold_1.SizeThreshold(sizeSThresholdLines, sizeSThresholdFiles, sizeSThresholdCommits), new size_threshold_1.SizeThreshold(sizeXsThresholdLines, sizeXsThresholdFiles, sizeXsThresholdCommits)), new branches_1.Branches(mainBranch, developmentBranch, featureTree, bugfixTree, hotfixTree, releaseTree, docsTree, choreTree), new release_1.Release(), new hotfix_1.Hotfix(), new workflows_1.Workflows(releaseWorkflow, hotfixWorkflow), new projects_1.Projects(projects, projectColumnIssueCreated, projectColumnPullRequestCreated, projectColumnIssueInProgress, projectColumnPullRequestInProgress), undefined, undefined);
+ (0, logger_1.logDebugInfo)(`Execution built. Event will be resolved in mainRun. Single action: ${execution.singleAction.currentSingleAction ?? 'none'}, AI PR description: ${execution.ai.getAiPullRequestDescription()}, bugbot min severity: ${execution.ai.getBugbotMinSeverity()}.`);
const results = await (0, common_action_1.mainRun)(execution);
await finishWithResults(execution, results);
}
@@ -42730,6 +42757,9 @@ async function runGitHubAction() {
}
}
async function finishWithResults(execution, results) {
+ const stepCount = results.reduce((acc, r) => acc + (r.steps?.length ?? 0), 0);
+ const errorCount = results.reduce((acc, r) => acc + (r.errors?.length ?? 0), 0);
+ (0, logger_1.logInfo)(`Publishing result: ${results.length} result(s), ${stepCount} step(s), ${errorCount} error(s).`);
execution.currentConfiguration.results = results;
await new publish_resume_use_case_1.PublishResultUseCase().invoke(execution);
await new store_configuration_use_case_1.StoreConfigurationUseCase().invoke(execution);
@@ -44321,11 +44351,6 @@ function createTimeoutSignal(ms) {
function ensureNoTrailingSlash(url) {
return url.replace(/\/+$/, '') || url;
}
-function truncate(s, maxLen) {
- return s.length <= maxLen ? s : s.slice(0, maxLen) + '...';
-}
-const OPENCODE_PROMPT_LOG_PREVIEW_LEN = 500;
-const OPENCODE_PROMPT_LOG_FULL_LEN = 3000;
function getValidatedOpenCodeConfig(ai) {
const serverUrl = ai.getOpencodeServerUrl();
const model = ai.getOpencodeModel();
@@ -44410,16 +44435,12 @@ function parseJsonFromAgentText(text) {
}
catch (e) {
const msg = e instanceof Error ? e.message : String(e);
- (0, logger_1.logDebugInfo)(`OpenCode agent response (expectJson): failed to parse extracted JSON. Full text length=${trimmed.length} firstChars=${JSON.stringify(trimmed.slice(0, 200))}`);
+ (0, logger_1.logDebugInfo)(`OpenCode agent response (expectJson): failed to parse extracted JSON. Full text length=${trimmed.length}. Full text:\n${trimmed}`);
throw new Error(`Agent response is not valid JSON: ${msg}`);
}
}
- const previewLen = 500;
- const msg = trimmed.length > previewLen ? `${trimmed.slice(0, previewLen)}...` : trimmed;
- const fullTruncated = trimmed.length > 3000 ? `${trimmed.slice(0, 3000)}... [total ${trimmed.length} chars]` : trimmed;
- (0, logger_1.logDebugInfo)(`OpenCode agent response (expectJson): no JSON object found. length=${trimmed.length} preview=${JSON.stringify(msg)}`);
- (0, logger_1.logDebugInfo)(`OpenCode agent response (expectJson) full text for debugging:\n${fullTruncated}`);
- throw new Error(`Agent response is not valid JSON: no JSON object found. Response starts with: ${msg.slice(0, 150)}`);
+ (0, logger_1.logDebugInfo)(`OpenCode agent response (expectJson): no JSON object found. length=${trimmed.length}. Full text:\n${trimmed}`);
+ throw new Error(`Agent response is not valid JSON: no JSON object found. Response length: ${trimmed.length} chars.`);
}
}
}
@@ -44435,14 +44456,10 @@ function extractPartsByType(parts, type, joinWith) {
.join(joinWith)
.trim();
}
-const OPENCODE_RESPONSE_LOG_MAX_LEN = 80000;
-/** Parse response as JSON; on empty or invalid body throw a clear error with context. */
+/** Parse response as JSON; on empty or invalid body throw a clear error with context. Logs full body (no truncation). */
async function parseJsonResponse(res, context) {
const raw = await res.text();
- const truncated = raw.length > OPENCODE_RESPONSE_LOG_MAX_LEN
- ? `${raw.slice(0, OPENCODE_RESPONSE_LOG_MAX_LEN)}... [truncated, total ${raw.length} chars]`
- : raw;
- (0, logger_1.logDebugInfo)(`OpenCode response [${context}] status=${res.status} bodyLength=${raw.length}: ${truncated}`);
+ (0, logger_1.logDebugInfo)(`OpenCode response [${context}] status=${res.status} bodyLength=${raw.length}. Full body:\n${raw}`);
if (!raw || !raw.trim()) {
throw new Error(`${context}: empty response body (status ${res.status}). The server may have returned nothing or closed the connection early.`);
}
@@ -44450,8 +44467,7 @@ async function parseJsonResponse(res, context) {
return JSON.parse(raw);
}
catch (parseError) {
- const snippet = raw.length > 200 ? `${raw.slice(0, 200)}...` : raw;
- const err = new Error(`${context}: invalid JSON (status ${res.status}). Body snippet: ${snippet}`);
+ const err = new Error(`${context}: invalid JSON (status ${res.status}). Body length: ${raw.length} chars. See debug log for full body.`);
if (parseError instanceof Error && 'cause' in err)
err.cause = parseError;
throw err;
@@ -44465,25 +44481,27 @@ function extractTextFromParts(parts) {
function extractReasoningFromParts(parts) {
return extractPartsByType(parts, 'reasoning', '\n\n');
}
-/** Max length of per-part text preview in debug log (to avoid huge log lines). */
-const OPENCODE_PART_PREVIEW_LEN = 80;
/**
- * Build a short summary of OpenCode message parts for debug logs (types, text lengths, and short preview).
+ * Log OpenCode message parts: summary line and full text of each part (no truncation).
*/
-function summarizePartsForLog(parts, context) {
+function logPartsForDebug(parts, context) {
if (!Array.isArray(parts) || parts.length === 0) {
- return `${context}: 0 parts`;
+ (0, logger_1.logDebugInfo)(`${context}: 0 parts`);
+ return;
}
- const items = parts.map((p, i) => {
+ const summary = parts.map((p, i) => {
+ const type = p?.type ?? '(missing type)';
+ const len = typeof p?.text === 'string' ? p.text.length : 0;
+ return `[${i}] type=${type} length=${len}`;
+ }).join(' | ');
+ (0, logger_1.logDebugInfo)(`${context}: ${parts.length} part(s) — ${summary}`);
+ parts.forEach((p, i) => {
const type = p?.type ?? '(missing type)';
const text = typeof p?.text === 'string' ? p.text : '';
- const len = text.length;
- const preview = len > OPENCODE_PART_PREVIEW_LEN
- ? `${text.slice(0, OPENCODE_PART_PREVIEW_LEN).replace(/\n/g, ' ')}...`
- : text.replace(/\n/g, ' ');
- return `[${i}] type=${type} length=${len}${preview ? ` preview=${JSON.stringify(preview)}` : ''}`;
+ if (text) {
+ (0, logger_1.logDebugInfo)(`OpenCode part [${i}] type=${type} full text:\n${text}`);
+ }
});
- return `${context}: ${parts.length} part(s) — ${items.join(' | ')}`;
}
/** Default OpenCode agent for analysis/planning (read-only, no file edits). */
exports.OPENCODE_AGENT_PLAN = 'plan';
@@ -44536,8 +44554,8 @@ exports.LANGUAGE_CHECK_RESPONSE_SCHEMA = {
*/
async function opencodeMessageWithAgentRaw(baseUrl, options) {
(0, logger_1.logInfo)(`OpenCode request [agent ${options.agent}] model=${options.providerID}/${options.modelID} promptLength=${options.promptText.length}`);
- (0, logger_1.logInfo)(`OpenCode sending prompt (preview): ${truncate(options.promptText, OPENCODE_PROMPT_LOG_PREVIEW_LEN)}`);
- (0, logger_1.logDebugInfo)(`OpenCode prompt (full): ${truncate(options.promptText, OPENCODE_PROMPT_LOG_FULL_LEN)}`);
+ (0, logger_1.logInfo)(`OpenCode sending prompt (full):\n${options.promptText}`);
+ (0, logger_1.logDebugInfo)(`OpenCode prompt (full, no truncation):\n${options.promptText}`);
(0, logger_1.logDebugInfo)(`OpenCode message body: agent=${options.agent}, model=${options.providerID}/${options.modelID}, parts[0].text length=${options.promptText.length}`);
const base = ensureNoTrailingSlash(baseUrl);
const signal = createTimeoutSignal(constants_1.OPENCODE_REQUEST_TIMEOUT_MS);
@@ -44579,7 +44597,7 @@ async function opencodeMessageWithAgentRaw(baseUrl, options) {
const messageData = await parseJsonResponse(messageRes, `OpenCode agent "${options.agent}" message`);
const parts = messageData?.parts ?? messageData?.data?.parts ?? [];
const partsArray = Array.isArray(parts) ? parts : [];
- (0, logger_1.logDebugInfo)(summarizePartsForLog(partsArray, `OpenCode agent "${options.agent}" message parts`));
+ logPartsForDebug(partsArray, `OpenCode agent "${options.agent}" message parts`);
const text = extractTextFromParts(partsArray);
(0, logger_1.logInfo)(`OpenCode response [agent ${options.agent}] responseLength=${text.length} sessionId=${sessionId}`);
return { text, parts: partsArray, sessionId };
@@ -44648,9 +44666,8 @@ class AiRepository {
throw new Error('Empty response text');
const reasoning = options.includeReasoning ? extractReasoningFromParts(parts) : '';
if (options.expectJson && options.schema) {
- const maxLogLen = 5000000;
- const toLog = text.length > maxLogLen ? `${text.slice(0, maxLogLen)}\n... [truncated, total ${text.length} chars]` : text;
- (0, logger_1.logInfo)(`OpenCode agent response (full text, expectJson=true) length=${text.length}:\n${toLog}`);
+ (0, logger_1.logInfo)(`OpenCode agent response (expectJson=true) length=${text.length}`);
+ (0, logger_1.logDebugInfo)(`OpenCode agent response (full text, no truncation) length=${text.length}:\n${text}`);
const parsed = parseJsonFromAgentText(text);
if (options.includeReasoning && reasoning) {
return { ...parsed, reasoning };
@@ -45402,6 +45419,7 @@ Object.defineProperty(exports, "__esModule", ({ value: true }));
exports.IssueRepository = exports.PROGRESS_LABEL_PATTERN = void 0;
const core = __importStar(__nccwpck_require__(2186));
const github = __importStar(__nccwpck_require__(5438));
+const comment_watermark_1 = __nccwpck_require__(4467);
const logger_1 = __nccwpck_require__(8836);
const milestone_1 = __nccwpck_require__(2298);
/** Matches labels that are progress percentages (e.g. "0%", "85%"). Used for setProgressLabel and syncing. */
@@ -45758,23 +45776,27 @@ class IssueRepository {
});
return pullRequest.data.head.ref;
};
- this.addComment = async (owner, repository, issueNumber, comment, token) => {
+ this.addComment = async (owner, repository, issueNumber, comment, token, options) => {
+ const watermark = (0, comment_watermark_1.getCommentWatermark)(options?.commitSha ? { commitSha: options.commitSha, owner, repo: repository } : undefined);
+ const body = `${comment}\n\n${watermark}`;
const octokit = github.getOctokit(token);
await octokit.rest.issues.createComment({
owner: owner,
repo: repository,
issue_number: issueNumber,
- body: comment,
+ body,
});
(0, logger_1.logDebugInfo)(`Comment added to Issue ${issueNumber}.`);
};
- this.updateComment = async (owner, repository, issueNumber, commentId, comment, token) => {
+ this.updateComment = async (owner, repository, issueNumber, commentId, comment, token, options) => {
+ const watermark = (0, comment_watermark_1.getCommentWatermark)(options?.commitSha ? { commitSha: options.commitSha, owner, repo: repository } : undefined);
+ const body = `${comment}\n\n${watermark}`;
const octokit = github.getOctokit(token);
await octokit.rest.issues.updateComment({
owner: owner,
repo: repository,
comment_id: commentId,
- body: comment,
+ body,
});
(0, logger_1.logDebugInfo)(`Comment ${commentId} updated in Issue ${issueNumber}.`);
};
@@ -46343,8 +46365,14 @@ const github = __importStar(__nccwpck_require__(5438));
const logger_1 = __nccwpck_require__(8836);
const result_1 = __nccwpck_require__(7305);
/**
- * Repository for merging branches (via PR or direct merge).
- * Isolated to allow unit tests with mocked Octokit.
+ * Repository for merging branches: creates a PR, waits for that PR's check runs (or status checks),
+ * then merges the PR; on failure, falls back to a direct Git merge.
+ *
+ * Check runs are filtered by PR (pull_requests) so we only wait for the current PR's checks,
+ * not those of another PR sharing the same head (e.g. release→main vs release→develop).
+ * If the PR has no check runs after a short wait, we proceed to merge (branch may have no required checks).
+ *
+ * @see docs/single-actions/deploy-label-and-merge.mdx for the deploy flow and check-wait behaviour.
*/
class MergeRepository {
constructor() {
@@ -46386,10 +46414,13 @@ This PR merges **${head}** into **${base}**.
'\n\nThis PR was automatically created by [`copilot`](https://github.com/vypdev/copilot).',
});
const iteration = 10;
+ /** Give workflows a short window to register check runs for this PR; after this, we allow merge with no check runs (e.g. branch has no required checks). */
+ const maxWaitForPrChecksAttempts = 3;
if (timeout > iteration) {
// Wait for checks to complete - can use regular token for reading checks
let checksCompleted = false;
let attempts = 0;
+ let waitForPrChecksAttempts = 0;
const maxAttempts = timeout > iteration ? Math.floor(timeout / iteration) : iteration;
while (!checksCompleted && attempts < maxAttempts) {
const { data: checkRuns } = await octokit.rest.checks.listForRef({
@@ -46397,6 +46428,11 @@ This PR merges **${head}** into **${base}**.
repo: repository,
ref: head,
});
+ // Only consider check runs that are for this PR. When the same branch is used in
+ // multiple PRs (e.g. release→master and release→develop), listForRef returns runs
+ // for all PRs; we must wait for runs tied to the current PR or we may see completed
+ // runs from the other PR and merge before this PR's checks have run.
+ const runsForThisPr = checkRuns.check_runs.filter(run => run.pull_requests?.some(pr => pr.number === pullRequest.number));
// Get commit status checks for the PR head commit
const { data: commitStatus } = await octokit.rest.repos.getCombinedStatusForRef({
owner: owner,
@@ -46404,15 +46440,15 @@ This PR merges **${head}** into **${base}**.
ref: head,
});
(0, logger_1.logDebugInfo)(`Combined status state: ${commitStatus.state}`);
- (0, logger_1.logDebugInfo)(`Number of check runs: ${checkRuns.check_runs.length}`);
- // If there are check runs, prioritize those over status checks
- if (checkRuns.check_runs.length > 0) {
- const pendingCheckRuns = checkRuns.check_runs.filter(check => check.status !== 'completed');
+ (0, logger_1.logDebugInfo)(`Number of check runs for this PR: ${runsForThisPr.length} (total on ref: ${checkRuns.check_runs.length})`);
+ // If there are check runs for this PR, wait for them to complete
+ if (runsForThisPr.length > 0) {
+ const pendingCheckRuns = runsForThisPr.filter(check => check.status !== 'completed');
if (pendingCheckRuns.length === 0) {
checksCompleted = true;
(0, logger_1.logDebugInfo)('All check runs have completed.');
// Verify if all checks passed
- const failedChecks = checkRuns.check_runs.filter(check => check.conclusion === 'failure');
+ const failedChecks = runsForThisPr.filter(check => check.conclusion === 'failure');
if (failedChecks.length > 0) {
throw new Error(`Checks failed: ${failedChecks.map(check => check.name).join(', ')}`);
}
@@ -46427,6 +46463,37 @@ This PR merges **${head}** into **${base}**.
continue;
}
}
+ else if (checkRuns.check_runs.length > 0 && runsForThisPr.length === 0) {
+ // There are runs on the ref but none for this PR. Either workflows for this PR
+ // haven't registered yet, or this PR/base has no required checks.
+ waitForPrChecksAttempts++;
+ if (waitForPrChecksAttempts >= maxWaitForPrChecksAttempts) {
+ // Give up waiting for PR-specific check runs; fall back to status checks
+ // before proceeding to merge (PR may have required status checks).
+ const pendingChecksFallback = commitStatus.statuses.filter(status => {
+ (0, logger_1.logDebugInfo)(`Status check (fallback): ${status.context} (State: ${status.state})`);
+ return status.state === 'pending';
+ });
+ if (pendingChecksFallback.length === 0) {
+ checksCompleted = true;
+ (0, logger_1.logDebugInfo)(`No check runs for this PR after ${maxWaitForPrChecksAttempts} polls; no pending status checks; proceeding to merge.`);
+ }
+ else {
+ (0, logger_1.logDebugInfo)(`No check runs for this PR after ${maxWaitForPrChecksAttempts} polls; falling back to status checks. Waiting for ${pendingChecksFallback.length} status checks to complete.`);
+ pendingChecksFallback.forEach(check => {
+ (0, logger_1.logDebugInfo)(` - ${check.context} (State: ${check.state})`);
+ });
+ await new Promise(resolve => setTimeout(resolve, iteration * 1000));
+ attempts++;
+ }
+ }
+ else {
+ (0, logger_1.logDebugInfo)('Check runs exist on ref but none for this PR yet; waiting for workflows to register.');
+ await new Promise(resolve => setTimeout(resolve, iteration * 1000));
+ attempts++;
+ }
+ continue;
+ }
else {
// Fall back to status checks if no check runs exist
const pendingChecks = commitStatus.statuses.filter(status => {
@@ -48470,11 +48537,12 @@ const TEMPLATE = `You are in the repository workspace. Your task is to produce a
- **Breaking Changes:** list any, or "None".
- **Notes for Reviewers / Additional Context:** fill only if useful; otherwise a short placeholder or omit.
5. Do not output a single compact paragraph. Output the full filled template so the PR description is well-structured and easy to scan. Preserve the template's formatting (headings with # and ##, horizontal rules). Use checkboxes \`- [ ]\` / \`- [x]\` only where they add value; you may simplify or drop a section if it does not apply.
+6. **Output format:** Return only the filled template content. Do not add any preamble, meta-commentary, or framing phrases (e.g. "Based on my analysis...", "After reviewing the diff...", "Here is the description..."). Start directly with the first heading of the template (e.g. # Summary). Do not wrap the output in code blocks.
**Issue description:**
{{issueDescription}}
-Output only the filled template content (the PR description body), starting with the first heading of the template (e.g. # Summary). Do not wrap it in code blocks or add extra commentary.`;
+Output only the filled template content (the PR description body), starting with the first heading. No preamble, no commentary.`;
function getUpdatePullRequestDescriptionPrompt(params) {
return (0, fill_1.fillTemplate)(TEMPLATE, {
projectContextInstruction: params.projectContextInstruction,
@@ -48653,12 +48721,23 @@ class CheckProgressUseCase {
baseBranch: developmentBranch,
currentBranch: branch,
});
+ (0, logger_1.logDebugInfo)(`CheckProgress: prompt length=${prompt.length}, issue description length=${issueDescription.length}.`);
(0, logger_1.logInfo)('🤖 Analyzing progress using OpenCode Plan agent...');
const attemptResult = await this.fetchProgressAttempt(param.ai, prompt);
const progress = attemptResult.progress;
const summary = attemptResult.summary;
const reasoning = attemptResult.reasoning;
const remaining = attemptResult.remaining;
+ (0, logger_1.logDebugInfo)(`CheckProgress: raw progress=${progress}, summary length=${summary.length}, reasoning length=${reasoning.length}, remaining length=${remaining?.length ?? 0}. Full summary:\n${summary}`);
+ if (reasoning) {
+ (0, logger_1.logDebugInfo)(`CheckProgress: full reasoning:\n${reasoning}`);
+ }
+ if (remaining) {
+ (0, logger_1.logDebugInfo)(`CheckProgress: full remaining:\n${remaining}`);
+ }
+ if (progress < 0 || progress > 100) {
+ (0, logger_1.logWarn)(`CheckProgress: unexpected progress value ${progress} (expected 0-100). Clamping for display.`);
+ }
if (progress > 0) {
(0, logger_1.logInfo)(`✅ Progress detection completed: ${progress}%`);
}
@@ -48852,6 +48931,7 @@ class CreateReleaseUseCase {
}));
}
else {
+ (0, logger_1.logWarn)(`CreateRelease: createRelease returned no URL for version ${param.singleAction.version}.`);
result.push(new result_1.Result({
id: this.taskId,
success: false,
@@ -48937,6 +49017,7 @@ class CreateTagUseCase {
}));
}
else {
+ (0, logger_1.logWarn)(`CreateTag: createTag returned no SHA for version ${param.singleAction.version}.`);
result.push(new result_1.Result({
id: this.taskId,
success: false,
@@ -48979,6 +49060,16 @@ const branch_repository_1 = __nccwpck_require__(7701);
const issue_repository_1 = __nccwpck_require__(57);
const logger_1 = __nccwpck_require__(8836);
const task_emoji_1 = __nccwpck_require__(9785);
+/**
+ * Single action run after a successful deployment (triggered with the "deployed" action and an issue number).
+ *
+ * Requires the issue to have the "deploy" label and not already have the "deployed" label. Then:
+ * 1. Replaces the "deploy" label with "deployed".
+ * 2. If a release or hotfix branch is configured: merges it into default and develop (each via PR, waiting for that PR's checks).
+ * 3. Closes the issue only when all merges succeed.
+ *
+ * @see docs/single-actions/deploy-label-and-merge.mdx for the full flow and how merge/check waiting works.
+ */
class DeployedActionUseCase {
constructor() {
this.taskId = 'DeployedActionUseCase';
@@ -49403,11 +49494,13 @@ class RecommendStepsUseCase {
issueNumber: String(issueNumber),
issueDescription,
});
+ (0, logger_1.logDebugInfo)(`RecommendSteps: prompt length=${prompt.length}, issue description length=${issueDescription.length}.`);
(0, logger_1.logInfo)(`🤖 Recommending steps using OpenCode Plan agent...`);
const response = await this.aiRepository.askAgent(param.ai, ai_repository_1.OPENCODE_AGENT_PLAN, prompt);
const steps = typeof response === 'string'
? response
: (response && String(response.steps)) || 'No response.';
+ (0, logger_1.logDebugInfo)(`RecommendSteps: OpenCode response received. Steps length=${steps.length}. Full steps:\n${steps}`);
results.push(new result_1.Result({
id: this.taskId,
success: true,
@@ -49960,9 +50053,10 @@ class SingleActionUseCase {
const results = [];
try {
if (!param.singleAction.validSingleAction) {
- (0, logger_1.logDebugInfo)(`Not a valid single action: ${param.singleAction.currentSingleAction}`);
+ (0, logger_1.logWarn)(`Single action invoked but not a valid single action: ${param.singleAction.currentSingleAction}. Skipping.`);
return results;
}
+ (0, logger_1.logDebugInfo)(`SingleAction: dispatching to handler for action: ${param.singleAction.currentSingleAction}.`);
if (param.singleAction.isDeployedAction) {
results.push(...await new deployed_action_use_case_1.DeployedActionUseCase().invoke(param));
}
@@ -50382,8 +50476,10 @@ class BugbotAutofixUseCase {
}
const verifyCommands = execution.ai.getBugbotFixVerifyCommands?.() ?? [];
const prompt = (0, build_bugbot_fix_prompt_1.buildBugbotFixPrompt)(execution, context, idsToFix, userComment, verifyCommands);
+ (0, logger_1.logDebugInfo)(`BugbotAutofix: prompt length=${prompt.length}, target finding ids=${idsToFix.length}, verifyCommands=${verifyCommands.length}.`);
(0, logger_1.logInfo)("Running OpenCode build agent to fix selected findings (changes applied in workspace).");
const response = await this.aiRepository.copilotMessage(execution.ai, prompt);
+ (0, logger_1.logDebugInfo)(`BugbotAutofix: OpenCode build agent response length=${response?.text?.length ?? 0}. Full response:\n${response?.text ?? '(none)'}`);
if (!response?.text) {
(0, logger_1.logError)("Bugbot autofix: no response from OpenCode build agent.");
results.push(new result_1.Result({
@@ -50730,6 +50826,7 @@ class DetectBugbotFixIntentUseCase {
parentCommentBody = parentBody ?? undefined;
}
const prompt = (0, build_bugbot_fix_intent_prompt_1.buildBugbotFixIntentPrompt)(commentBody, unresolvedFindings, parentCommentBody);
+ (0, logger_1.logDebugInfo)(`DetectBugbotFixIntent: prompt length=${prompt.length}, unresolved findings=${unresolvedFindings.length}. Calling OpenCode Plan agent.`);
const response = await this.aiRepository.askAgent(param.ai, ai_repository_1.OPENCODE_AGENT_PLAN, prompt, {
expectJson: true,
schema: schema_1.BUGBOT_FIX_INTENT_RESPONSE_SCHEMA,
@@ -50754,6 +50851,7 @@ class DetectBugbotFixIntentUseCase {
: [];
const validIds = new Set(unresolvedIds);
const filteredIds = targetFindingIds.filter((id) => validIds.has(id));
+ (0, logger_1.logDebugInfo)(`DetectBugbotFixIntent: OpenCode payload is_fix_request=${isFixRequest}, is_do_request=${isDoRequest}, target_finding_ids=${JSON.stringify(targetFindingIds)}, filteredIds=${JSON.stringify(filteredIds)}.`);
results.push(new result_1.Result({
id: this.taskId,
success: true,
@@ -50889,6 +50987,7 @@ const issue_repository_1 = __nccwpck_require__(57);
const pull_request_repository_1 = __nccwpck_require__(634);
const build_bugbot_fix_prompt_1 = __nccwpck_require__(1822);
const marker_1 = __nccwpck_require__(2401);
+const logger_1 = __nccwpck_require__(8836);
/** Builds the text block sent to OpenCode for task 2 (decide which previous findings are now resolved). */
function buildPreviousFindingsBlock(previousFindings) {
if (previousFindings.length === 0)
@@ -50919,6 +51018,7 @@ async function loadBugbotContext(param, options) {
const owner = param.owner;
const repo = param.repo;
if (!headBranch) {
+ (0, logger_1.logDebugInfo)('LoadBugbotContext: no head branch (branchOverride or commit.branch); returning empty context.');
return {
existingByFindingId: {},
issueComments: [],
@@ -50984,6 +51084,7 @@ async function loadBugbotContext(param, options) {
}
const previousFindingsBlock = buildPreviousFindingsBlock(previousFindingsForPrompt);
const unresolvedFindingsWithBody = previousFindingsForPrompt.map((p) => ({ id: p.id, fullBody: p.fullBody }));
+ (0, logger_1.logDebugInfo)(`LoadBugbotContext: issue #${issueNumber}, branch ${headBranch}, open PRs=${openPrNumbers.length}, existing findings=${Object.keys(existingByFindingId).length}, unresolved with body=${unresolvedFindingsWithBody.length}.`);
// PR context is only for publishing: we need file list and diff lines so GitHub review comments attach to valid (path, line).
let prContext = null;
if (openPrNumbers.length > 0) {
@@ -51285,12 +51386,13 @@ Object.defineProperty(exports, "__esModule", ({ value: true }));
exports.publishFindings = publishFindings;
const issue_repository_1 = __nccwpck_require__(57);
const pull_request_repository_1 = __nccwpck_require__(634);
+const comment_watermark_1 = __nccwpck_require__(4467);
const logger_1 = __nccwpck_require__(8836);
const marker_1 = __nccwpck_require__(2401);
const path_validation_1 = __nccwpck_require__(1999);
/** Creates or updates issue comments for each finding; creates PR review comments only when finding.file is in prFiles. */
async function publishFindings(param) {
- const { execution, context, findings, overflowCount = 0, overflowTitles = [] } = param;
+ const { execution, context, findings, commitSha, overflowCount = 0, overflowTitles = [] } = param;
const { existingByFindingId, openPrNumbers, prContext } = context;
const issueNumber = execution.issueNumber;
const token = execution.tokens.token;
@@ -51298,18 +51400,22 @@ async function publishFindings(param) {
const repo = execution.repo;
const issueRepository = new issue_repository_1.IssueRepository();
const pullRequestRepository = new pull_request_repository_1.PullRequestRepository();
+ const bugbotWatermark = commitSha && owner && repo
+ ? (0, comment_watermark_1.getCommentWatermark)({ commitSha, owner, repo })
+ : (0, comment_watermark_1.getCommentWatermark)();
const prFiles = prContext?.prFiles ?? [];
const pathToFirstDiffLine = prContext?.pathToFirstDiffLine ?? {};
const prCommentsToCreate = [];
for (const finding of findings) {
const existing = existingByFindingId[finding.id];
const commentBody = (0, marker_1.buildCommentBody)(finding, false);
+ const bodyWithWatermark = `${commentBody}\n\n${bugbotWatermark}`;
if (existing?.issueCommentId != null) {
- await issueRepository.updateComment(owner, repo, issueNumber, existing.issueCommentId, commentBody, token);
+ await issueRepository.updateComment(owner, repo, issueNumber, existing.issueCommentId, commentBody, token, commitSha ? { commitSha } : undefined);
(0, logger_1.logDebugInfo)(`Updated bugbot comment for finding ${finding.id} on issue.`);
}
else {
- await issueRepository.addComment(owner, repo, issueNumber, commentBody, token);
+ await issueRepository.addComment(owner, repo, issueNumber, commentBody, token, commitSha ? { commitSha } : undefined);
(0, logger_1.logDebugInfo)(`Added bugbot comment for finding ${finding.id} on issue.`);
}
// PR review comment: only if this finding's file is in the PR changed files (so GitHub can attach the comment).
@@ -51318,10 +51424,10 @@ async function publishFindings(param) {
if (path) {
const line = finding.line ?? pathToFirstDiffLine[path] ?? 1;
if (existing?.prCommentId != null && existing.prNumber === openPrNumbers[0]) {
- await pullRequestRepository.updatePullRequestReviewComment(owner, repo, existing.prCommentId, commentBody, token);
+ await pullRequestRepository.updatePullRequestReviewComment(owner, repo, existing.prCommentId, bodyWithWatermark, token);
}
else {
- prCommentsToCreate.push({ path, line, body: commentBody });
+ prCommentsToCreate.push({ path, line, body: bodyWithWatermark });
}
}
else if (finding.file != null && String(finding.file).trim() !== "") {
@@ -51339,7 +51445,7 @@ async function publishFindings(param) {
const overflowBody = `## More findings (comment limit)
There are **${overflowCount}** more finding(s) that were not published as individual comments. Review locally or in the full diff to see the list.${titlesList}`;
- await issueRepository.addComment(owner, repo, issueNumber, overflowBody, token);
+ await issueRepository.addComment(owner, repo, issueNumber, overflowBody, token, commitSha ? { commitSha } : undefined);
(0, logger_1.logDebugInfo)(`Added overflow comment: ${overflowCount} additional finding(s) not published individually.`);
}
}
@@ -51579,7 +51685,7 @@ class CheckChangesIssueSizeUseCase {
}
}
catch (error) {
- (0, logger_1.logError)(error);
+ (0, logger_1.logError)(`CheckChangesIssueSize: failed for issue #${param.issueNumber}.`, error instanceof Error ? { stack: error.stack } : undefined);
result.push(new result_1.Result({
id: this.taskId,
success: false,
@@ -51599,12 +51705,46 @@ exports.CheckChangesIssueSizeUseCase = CheckChangesIssueSizeUseCase;
/***/ }),
/***/ 7395:
-/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => {
+/***/ (function(__unused_webpack_module, exports, __nccwpck_require__) {
"use strict";
+var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
+ if (k2 === undefined) k2 = k;
+ var desc = Object.getOwnPropertyDescriptor(m, k);
+ if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
+ desc = { enumerable: true, get: function() { return m[k]; } };
+ }
+ Object.defineProperty(o, k2, desc);
+}) : (function(o, m, k, k2) {
+ if (k2 === undefined) k2 = k;
+ o[k2] = m[k];
+}));
+var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
+ Object.defineProperty(o, "default", { enumerable: true, value: v });
+}) : function(o, v) {
+ o["default"] = v;
+});
+var __importStar = (this && this.__importStar) || (function () {
+ var ownKeys = function(o) {
+ ownKeys = Object.getOwnPropertyNames || function (o) {
+ var ar = [];
+ for (var k in o) if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k;
+ return ar;
+ };
+ return ownKeys(o);
+ };
+ return function (mod) {
+ if (mod && mod.__esModule) return mod;
+ var result = {};
+ if (mod != null) for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
+ __setModuleDefault(result, mod);
+ return result;
+ };
+})();
Object.defineProperty(exports, "__esModule", ({ value: true }));
exports.DetectPotentialProblemsUseCase = void 0;
+const github = __importStar(__nccwpck_require__(5438));
const result_1 = __nccwpck_require__(7305);
const ai_repository_1 = __nccwpck_require__(8307);
const constants_1 = __nccwpck_require__(8593);
@@ -51635,11 +51775,13 @@ class DetectPotentialProblemsUseCase {
return results;
}
if (param.issueNumber === -1) {
- (0, logger_1.logDebugInfo)('No issue number for this branch; skipping.');
+ (0, logger_1.logDebugInfo)('No issue number for this branch; skipping potential problems detection.');
return results;
}
+ (0, logger_1.logDebugInfo)(`DetectPotentialProblems: loading context for issue #${param.issueNumber}.`);
const context = await (0, load_bugbot_context_use_case_1.loadBugbotContext)(param);
const prompt = (0, build_bugbot_prompt_1.buildBugbotPrompt)(param, context);
+ (0, logger_1.logDebugInfo)(`DetectPotentialProblems: prompt length=${prompt.length}. Calling OpenCode Plan agent.`);
(0, logger_1.logInfo)('Detecting potential problems via OpenCode (agent computes changes and checks resolved)...');
const response = await this.aiRepository.askAgent(param.ai, ai_repository_1.OPENCODE_AGENT_PLAN, prompt, {
expectJson: true,
@@ -51647,7 +51789,7 @@ class DetectPotentialProblemsUseCase {
schemaName: 'bugbot_findings',
});
if (response == null || typeof response !== 'object') {
- (0, logger_1.logDebugInfo)('No response from OpenCode.');
+ (0, logger_1.logDebugInfo)('DetectPotentialProblems: No response from OpenCode.');
return results;
}
const payload = response;
@@ -51655,6 +51797,7 @@ class DetectPotentialProblemsUseCase {
const resolvedFindingIdsRaw = Array.isArray(payload.resolved_finding_ids) ? payload.resolved_finding_ids : [];
const resolvedFindingIds = new Set(resolvedFindingIdsRaw);
const normalizedResolvedIds = new Set(resolvedFindingIdsRaw.map(marker_1.sanitizeFindingIdForMarker));
+ (0, logger_1.logDebugInfo)(`DetectPotentialProblems: OpenCode returned findings=${findings.length}, resolved_finding_ids=${resolvedFindingIdsRaw.length}. Resolved ids: ${JSON.stringify([...resolvedFindingIds])}.`);
const ignorePatterns = param.ai?.getAiIgnoreFiles?.() ?? [];
const minSeverity = (0, severity_1.normalizeMinSeverity)(param.ai?.getBugbotMinSeverity?.());
findings = findings.filter((f) => f.file == null || String(f.file).trim() === '' || (0, path_validation_1.isSafeFindingFilePath)(f.file));
@@ -51663,8 +51806,9 @@ class DetectPotentialProblemsUseCase {
findings = (0, deduplicate_findings_1.deduplicateFindings)(findings);
const maxComments = param.ai?.getBugbotCommentLimit?.() ?? constants_1.BUGBOT_MAX_COMMENTS;
const { toPublish, overflowCount, overflowTitles } = (0, limit_comments_1.applyCommentLimit)(findings, maxComments);
+ (0, logger_1.logDebugInfo)(`DetectPotentialProblems: after filters and limit — toPublish=${toPublish.length}, overflow=${overflowCount}, minSeverity applied, ignore patterns applied.`);
if (toPublish.length === 0 && resolvedFindingIds.size === 0) {
- (0, logger_1.logDebugInfo)('OpenCode returned no new findings (after filters) and no resolved ids.');
+ (0, logger_1.logDebugInfo)('DetectPotentialProblems: OpenCode returned no new findings (after filters) and no resolved ids.');
results.push(new result_1.Result({
id: this.taskId,
success: true,
@@ -51683,6 +51827,7 @@ class DetectPotentialProblemsUseCase {
execution: param,
context,
findings: toPublish,
+ commitSha: github.context.sha,
overflowCount: overflowCount > 0 ? overflowCount : undefined,
overflowTitles: overflowCount > 0 ? overflowTitles : undefined,
});
@@ -51834,7 +51979,7 @@ ${this.separator}
await this.issueRepository.addComment(param.owner, param.repo, param.issueNumber, commentBody, param.tokens.token);
}
catch (error) {
- (0, logger_1.logError)(error);
+ (0, logger_1.logError)(`NotifyNewCommitOnIssue: failed to notify issue #${param.issueNumber}.`, error instanceof Error ? { stack: error.stack } : undefined);
result.push(new result_1.Result({
id: this.taskId,
success: false,
@@ -51903,8 +52048,10 @@ class DoUserRequestUseCase {
issueNumber: String(execution.issueNumber),
userComment: (0, sanitize_user_comment_for_prompt_1.sanitizeUserCommentForPrompt)(userComment),
});
+ (0, logger_1.logDebugInfo)(`DoUserRequest: prompt length=${prompt.length}, user comment length=${commentTrimmed.length}.`);
(0, logger_1.logInfo)("Running OpenCode build agent to perform user request (changes applied in workspace).");
const response = await this.aiRepository.copilotMessage(execution.ai, prompt);
+ (0, logger_1.logDebugInfo)(`DoUserRequest: OpenCode build agent response length=${response?.text?.length ?? 0}. Full response:\n${response?.text ?? '(none)'}`);
if (!response?.text) {
(0, logger_1.logError)("DoUserRequest: no response from OpenCode build agent.");
results.push(new result_1.Result({
@@ -51985,6 +52132,7 @@ class CheckPermissionsUseCase {
}));
}
else {
+ (0, logger_1.logWarn)(`CheckPermissions: @${param.issue.creator} not authorized to create [${param.labels.currentIssueLabels.join(',')}] issues.`);
result.push(new result_1.Result({
id: this.taskId,
success: false,
@@ -52003,7 +52151,7 @@ class CheckPermissionsUseCase {
}
}
catch (error) {
- (0, logger_1.logError)(error);
+ (0, logger_1.logError)(`CheckPermissions: failed to get project members or check creator.`, error instanceof Error ? { stack: error.stack } : undefined);
result.push(new result_1.Result({
id: this.taskId,
success: false,
@@ -52362,6 +52510,7 @@ class GetReleaseVersionUseCase {
}
const description = await this.issueRepository.getDescription(param.owner, param.repo, number, param.tokens.token);
if (description === undefined) {
+ (0, logger_1.logDebugInfo)(`GetReleaseVersion: no description for issue/PR ${number}.`);
result.push(new result_1.Result({
id: this.taskId,
success: false,
@@ -52372,6 +52521,7 @@ class GetReleaseVersionUseCase {
}
const releaseVersion = (0, content_utils_1.extractVersion)('Release Version', description);
if (releaseVersion === undefined) {
+ (0, logger_1.logDebugInfo)(`GetReleaseVersion: no "Release Version" found in description (issue/PR ${number}).`);
result.push(new result_1.Result({
id: this.taskId,
success: false,
@@ -52389,12 +52539,12 @@ class GetReleaseVersionUseCase {
}));
}
catch (error) {
- (0, logger_1.logError)(error);
+ (0, logger_1.logError)(`GetReleaseVersion: failed to get version for issue/PR.`, error instanceof Error ? { stack: error.stack } : undefined);
result.push(new result_1.Result({
id: this.taskId,
success: false,
executed: true,
- steps: [`Tried to check action permissions.`],
+ steps: [`Tried to get the release version but there was a problem.`],
error: error,
}));
}
@@ -52547,6 +52697,22 @@ ${errors}
Check your project configuration, if everything is okay consider [opening an issue](https://github.com/vypdev/copilot/issues/new/choose).
`;
}
+ let debugLogSection = '';
+ if (param.debug) {
+ const logsText = (0, logger_1.getAccumulatedLogsAsText)();
+ if (logsText.length > 0) {
+ debugLogSection = `
+
+
+Debug log
+
+\`\`\`
+${logsText}
+\`\`\`
+
+`;
+ }
+ }
const commentBody = `# ${title}
${content}
${errors.length > 0 ? errors : ''}
@@ -52554,7 +52720,7 @@ ${errors.length > 0 ? errors : ''}
${stupidGif}
${footer}
-
+${debugLogSection}
🚀 Happy coding!
`;
if (content.length === 0) {
@@ -52616,7 +52782,7 @@ class StoreConfigurationUseCase {
await this.handler.update(param);
}
catch (error) {
- (0, logger_1.logError)(error);
+ (0, logger_1.logError)(`StoreConfiguration: failed to update configuration.`, error instanceof Error ? { stack: error.stack } : undefined);
}
}
}
@@ -52646,6 +52812,7 @@ class ThinkUseCase {
}
async invoke(param) {
const results = [];
+ (0, logger_1.logInfo)('Think: processing comment (AI Q&A).');
try {
const commentBody = param.issue.isIssueComment
? (param.issue.commentBody ?? '')
@@ -52708,11 +52875,13 @@ class ThinkUseCase {
const contextBlock = issueDescription
? `\n\nContext (issue #${issueNumberForContext} description):\n${issueDescription}\n\n`
: '\n\n';
+ (0, logger_1.logDebugInfo)(`Think: question length=${question.length}, issue context length=${issueDescription.length}. Full question:\n${question}`);
const prompt = (0, prompts_1.getThinkPrompt)({
projectContextInstruction: opencode_project_context_instruction_1.OPENCODE_PROJECT_CONTEXT_INSTRUCTION,
contextBlock,
question,
});
+ (0, logger_1.logDebugInfo)(`Think: calling OpenCode Plan agent (prompt length=${prompt.length}).`);
const response = await this.aiRepository.askAgent(param.ai, ai_repository_1.OPENCODE_AGENT_PLAN, prompt, {
expectJson: true,
schema: ai_repository_1.THINK_RESPONSE_SCHEMA,
@@ -52723,6 +52892,7 @@ class ThinkUseCase {
typeof response.answer === 'string'
? response.answer.trim()
: '';
+ (0, logger_1.logDebugInfo)(`Think: OpenCode response received. Answer length=${answer.length}. Full answer:\n${answer}`);
if (!answer) {
(0, logger_1.logError)('OpenCode returned no answer for Think.');
results.push(new result_1.Result({
@@ -52916,6 +53086,7 @@ class AnswerIssueHelpUseCase {
}
async invoke(param) {
const results = [];
+ (0, logger_1.logInfo)('AnswerIssueHelp: checking if initial help reply is needed (AI).');
try {
if (!param.issue.opened) {
results.push(new result_1.Result({
@@ -52966,6 +53137,7 @@ class AnswerIssueHelpUseCase {
description,
projectContextInstruction: opencode_project_context_instruction_1.OPENCODE_PROJECT_CONTEXT_INSTRUCTION,
});
+ (0, logger_1.logDebugInfo)(`AnswerIssueHelp: prompt length=${prompt.length}, issue description length=${description.length}. Calling OpenCode Plan agent.`);
const response = await this.aiRepository.askAgent(param.ai, ai_repository_1.OPENCODE_AGENT_PLAN, prompt, {
expectJson: true,
schema: ai_repository_1.THINK_RESPONSE_SCHEMA,
@@ -52976,6 +53148,7 @@ class AnswerIssueHelpUseCase {
typeof response.answer === 'string'
? response.answer.trim()
: '';
+ (0, logger_1.logDebugInfo)(`AnswerIssueHelp: OpenCode response. Answer length=${answer.length}. Full answer:\n${answer}`);
if (!answer) {
(0, logger_1.logError)('OpenCode returned no answer for initial help.');
results.push(new result_1.Result({
@@ -53331,6 +53504,7 @@ class CloseIssueAfterMergingUseCase {
try {
const closed = await this.issueRepository.closeIssue(param.owner, param.repo, param.issueNumber, param.tokens.token);
if (closed) {
+ (0, logger_1.logInfo)(`Issue #${param.issueNumber} closed after merging PR #${param.pullRequest.number}.`);
await this.issueRepository.addComment(param.owner, param.repo, param.issueNumber, `This issue was closed after merging #${param.pullRequest.number}.`, param.tokens.token);
result.push(new result_1.Result({
id: this.taskId,
@@ -53342,6 +53516,7 @@ class CloseIssueAfterMergingUseCase {
}));
}
else {
+ (0, logger_1.logDebugInfo)(`Issue #${param.issueNumber} was already closed or close failed after merge.`);
result.push(new result_1.Result({
id: this.taskId,
success: true,
@@ -53350,6 +53525,7 @@ class CloseIssueAfterMergingUseCase {
}
}
catch (error) {
+ (0, logger_1.logError)(`CloseIssueAfterMerging: failed to close issue #${param.issueNumber}.`, error instanceof Error ? { stack: error.stack } : undefined);
result.push(new result_1.Result({
id: this.taskId,
success: false,
@@ -53390,6 +53566,7 @@ class CloseNotAllowedIssueUseCase {
try {
const closed = await this.issueRepository.closeIssue(param.owner, param.repo, param.issueNumber, param.tokens.token);
if (closed) {
+ (0, logger_1.logInfo)(`Issue #${param.issueNumber} closed (author not allowed). Adding comment.`);
await this.issueRepository.addComment(param.owner, param.repo, param.issueNumber, `This issue has been closed because the author is not a member of the project. The user may be banned if the fact is repeated.`, param.tokens.token);
result.push(new result_1.Result({
id: this.taskId,
@@ -53401,6 +53578,7 @@ class CloseNotAllowedIssueUseCase {
}));
}
else {
+ (0, logger_1.logDebugInfo)(`Issue #${param.issueNumber} was already closed or close failed.`);
result.push(new result_1.Result({
id: this.taskId,
success: true,
@@ -53409,6 +53587,7 @@ class CloseNotAllowedIssueUseCase {
}
}
catch (error) {
+ (0, logger_1.logError)(`CloseNotAllowedIssue: failed to close issue #${param.issueNumber}.`, error instanceof Error ? { stack: error.stack } : undefined);
result.push(new result_1.Result({
id: this.taskId,
success: false,
@@ -53641,8 +53820,13 @@ class LinkIssueProjectUseCase {
(0, logger_1.logInfo)(`${(0, task_emoji_1.getTaskEmoji)(this.taskId)} Executing ${this.taskId}.`);
const result = [];
const columnName = param.project.getProjectColumnIssueCreated();
+ const projects = param.project.getProjects();
+ if (projects.length === 0) {
+ (0, logger_1.logDebugInfo)('LinkIssueProject: no projects configured; skipping.');
+ return result;
+ }
try {
- for (const project of param.project.getProjects()) {
+ for (const project of projects) {
const issueId = await this.issueRepository.getId(param.owner, param.repo, param.issue.number, param.tokens.token);
let actionDone = await this.projectRepository.linkContentId(project, issueId, param.tokens.token);
if (actionDone) {
@@ -53662,6 +53846,7 @@ class LinkIssueProjectUseCase {
}));
}
else {
+ (0, logger_1.logWarn)(`LinkIssueProject: linked issue to project "${project?.title}" but move to column "${columnName}" failed.`);
result.push(new result_1.Result({
id: this.taskId,
success: true,
@@ -53670,6 +53855,9 @@ class LinkIssueProjectUseCase {
}));
}
}
+ else {
+ (0, logger_1.logDebugInfo)(`LinkIssueProject: issue already linked to project "${project?.title}" or link failed.`);
+ }
}
return result;
}
@@ -53872,6 +54060,7 @@ class PrepareBranchesUseCase {
}
}
else {
+ (0, logger_1.logWarn)('PrepareBranches: hotfix requested but no tag or base version found.');
result.push(new result_1.Result({
id: this.taskId,
success: false,
@@ -53955,6 +54144,7 @@ class PrepareBranchesUseCase {
}
}
else {
+ (0, logger_1.logWarn)('PrepareBranches: release requested but no release version found.');
result.push(new result_1.Result({
id: this.taskId,
success: false,
@@ -54034,7 +54224,7 @@ class PrepareBranchesUseCase {
return result;
}
catch (error) {
- (0, logger_1.logError)(error);
+ (0, logger_1.logError)(`PrepareBranches: error preparing branches for issue #${param.issueNumber}.`, error instanceof Error ? { stack: error.stack } : undefined);
result.push(new result_1.Result({
id: this.taskId,
success: false,
@@ -54087,8 +54277,10 @@ class RemoveIssueBranchesUseCase {
if (!matchingBranch)
continue;
branchName = matchingBranch;
+ (0, logger_1.logDebugInfo)(`RemoveIssueBranches: attempting to remove branch ${branchName}.`);
const removed = await this.branchRepository.removeBranch(param.owner, param.repo, branchName, param.tokens.token);
if (removed) {
+ (0, logger_1.logDebugInfo)(`RemoveIssueBranches: removed branch ${branchName}.`);
results.push(new result_1.Result({
id: this.taskId,
success: true,
@@ -54108,16 +54300,19 @@ class RemoveIssueBranchesUseCase {
}));
}
}
+ else {
+ (0, logger_1.logWarn)(`RemoveIssueBranches: failed to remove branch ${branchName}.`);
+ }
}
}
catch (error) {
- (0, logger_1.logError)(error);
+ (0, logger_1.logError)(`RemoveIssueBranches: error removing branches for issue #${param.issueNumber}.`, error instanceof Error ? { stack: error.stack } : undefined);
results.push(new result_1.Result({
id: this.taskId,
success: false,
executed: true,
steps: [
- `Tried to update issue's title, but there was a problem.`,
+ `Tried to remove issue branches, but there was a problem.`,
],
error: error,
}));
@@ -54360,6 +54555,7 @@ If you'd like this comment to be translated again, please delete the entire comm
}
const locale = param.locale.issue;
let prompt = (0, prompts_1.getCheckCommentLanguagePrompt)({ locale, commentBody });
+ (0, logger_1.logDebugInfo)(`CheckIssueCommentLanguage: locale=${locale}, comment length=${commentBody.length}. Calling OpenCode for language check.`);
const checkResponse = await this.aiRepository.askAgent(param.ai, ai_repository_1.OPENCODE_AGENT_PLAN, prompt, {
expectJson: true,
schema: ai_repository_1.LANGUAGE_CHECK_RESPONSE_SCHEMA,
@@ -54370,6 +54566,7 @@ If you'd like this comment to be translated again, please delete the entire comm
typeof checkResponse.status === 'string'
? checkResponse.status
: '';
+ (0, logger_1.logDebugInfo)(`CheckIssueCommentLanguage: language check status=${status}.`);
if (status === 'done') {
results.push(new result_1.Result({
id: this.taskId,
@@ -54379,6 +54576,7 @@ If you'd like this comment to be translated again, please delete the entire comm
return results;
}
prompt = (0, prompts_1.getTranslateCommentPrompt)({ locale, commentBody });
+ (0, logger_1.logDebugInfo)(`CheckIssueCommentLanguage: translating comment (prompt length=${prompt.length}).`);
const translationResponse = await this.aiRepository.askAgent(param.ai, ai_repository_1.OPENCODE_AGENT_PLAN, prompt, {
expectJson: true,
schema: ai_repository_1.TRANSLATION_RESPONSE_SCHEMA,
@@ -54389,6 +54587,7 @@ If you'd like this comment to be translated again, please delete the entire comm
typeof translationResponse.translatedText === 'string'
? translationResponse.translatedText.trim()
: '';
+ (0, logger_1.logDebugInfo)(`CheckIssueCommentLanguage: translation received. translatedText length=${translatedText.length}. Full translated text:\n${translatedText}`);
if (!translatedText) {
const reason = translationResponse != null &&
typeof translationResponse === 'object' &&
@@ -54657,8 +54856,13 @@ class LinkPullRequestProjectUseCase {
(0, logger_1.logInfo)(`${(0, task_emoji_1.getTaskEmoji)(this.taskId)} Executing ${this.taskId}.`);
const result = [];
const columnName = param.project.getProjectColumnPullRequestCreated();
+ const projects = param.project.getProjects();
+ if (projects.length === 0) {
+ (0, logger_1.logDebugInfo)('LinkPullRequestProject: no projects configured; skipping.');
+ return result;
+ }
try {
- for (const project of param.project.getProjects()) {
+ for (const project of projects) {
let actionDone = await this.projectRepository.linkContentId(project, param.pullRequest.id, param.tokens.token);
if (actionDone) {
/**
@@ -54677,6 +54881,7 @@ class LinkPullRequestProjectUseCase {
}));
}
else {
+ (0, logger_1.logWarn)(`LinkPullRequestProject: linked PR to project "${project?.title}" but move to column "${columnName}" failed.`);
result.push(new result_1.Result({
id: this.taskId,
success: false,
@@ -54687,6 +54892,9 @@ class LinkPullRequestProjectUseCase {
}));
}
}
+ else {
+ (0, logger_1.logDebugInfo)(`LinkPullRequestProject: PR already linked to project "${project?.title}" or link failed.`);
+ }
}
return result;
}
@@ -54818,7 +55026,7 @@ class UpdatePullRequestDescriptionUseCase {
this.projectRepository = new project_repository_1.ProjectRepository();
}
async invoke(param) {
- (0, logger_1.logDebugInfo)(`${(0, task_emoji_1.getTaskEmoji)(this.taskId)} Executing ${this.taskId}.`);
+ (0, logger_1.logInfo)(`${(0, task_emoji_1.getTaskEmoji)(this.taskId)} Executing ${this.taskId} (AI PR description).`);
const result = [];
try {
const prNumber = param.pullRequest.number;
@@ -54869,10 +55077,12 @@ class UpdatePullRequestDescriptionUseCase {
issueNumber: String(param.issueNumber),
issueDescription,
});
+ (0, logger_1.logDebugInfo)(`UpdatePullRequestDescription: prompt length=${prompt.length}, issue description length=${issueDescription.length}. Calling OpenCode Plan agent.`);
const agentResponse = await this.aiRepository.askAgent(param.ai, ai_repository_1.OPENCODE_AGENT_PLAN, prompt);
const prBody = typeof agentResponse === 'string'
? agentResponse
: (agentResponse && String(agentResponse.description)) || '';
+ (0, logger_1.logDebugInfo)(`UpdatePullRequestDescription: OpenCode response received. Description length=${prBody.length}. Full description:\n${prBody}`);
if (!prBody.trim()) {
result.push(new result_1.Result({
id: this.taskId,
@@ -54943,6 +55153,7 @@ If you'd like this comment to be translated again, please delete the entire comm
}
const locale = param.locale.pullRequest;
let prompt = (0, prompts_1.getCheckCommentLanguagePrompt)({ locale, commentBody });
+ (0, logger_1.logDebugInfo)(`CheckPullRequestCommentLanguage: locale=${locale}, comment length=${commentBody.length}. Calling OpenCode for language check.`);
const checkResponse = await this.aiRepository.askAgent(param.ai, ai_repository_1.OPENCODE_AGENT_PLAN, prompt, {
expectJson: true,
schema: ai_repository_1.LANGUAGE_CHECK_RESPONSE_SCHEMA,
@@ -54953,6 +55164,7 @@ If you'd like this comment to be translated again, please delete the entire comm
typeof checkResponse.status === 'string'
? checkResponse.status
: '';
+ (0, logger_1.logDebugInfo)(`CheckPullRequestCommentLanguage: language check status=${status}.`);
if (status === 'done') {
results.push(new result_1.Result({
id: this.taskId,
@@ -54962,6 +55174,7 @@ If you'd like this comment to be translated again, please delete the entire comm
return results;
}
prompt = (0, prompts_1.getTranslateCommentPrompt)({ locale, commentBody });
+ (0, logger_1.logDebugInfo)(`CheckPullRequestCommentLanguage: translating comment (prompt length=${prompt.length}).`);
const translationResponse = await this.aiRepository.askAgent(param.ai, ai_repository_1.OPENCODE_AGENT_PLAN, prompt, {
expectJson: true,
schema: ai_repository_1.TRANSLATION_RESPONSE_SCHEMA,
@@ -54972,6 +55185,7 @@ If you'd like this comment to be translated again, please delete the entire comm
typeof translationResponse.translatedText === 'string'
? translationResponse.translatedText.trim()
: '';
+ (0, logger_1.logDebugInfo)(`CheckPullRequestCommentLanguage: translation received. translatedText length=${translatedText.length}. Full translated text:\n${translatedText}`);
if (!translatedText) {
const reason = translationResponse != null &&
typeof translationResponse === 'object' &&
@@ -54997,6 +55211,34 @@ ${this.translatedKey}
exports.CheckPullRequestCommentLanguageUseCase = CheckPullRequestCommentLanguageUseCase;
+/***/ }),
+
+/***/ 4467:
+/***/ ((__unused_webpack_module, exports) => {
+
+"use strict";
+
+/**
+ * Watermark appended to comments (issues and PRs) to attribute Copilot.
+ * Bugbot comments include commit link and note about auto-update on new commits.
+ */
+Object.defineProperty(exports, "__esModule", ({ value: true }));
+exports.COPILOT_MARKETPLACE_URL = void 0;
+exports.getCommentWatermark = getCommentWatermark;
+exports.COPILOT_MARKETPLACE_URL = 'https://github.com/marketplace/actions/copilot-github-with-super-powers';
+const DEFAULT_WATERMARK = `Made with ❤️ by [vypdev/copilot](${exports.COPILOT_MARKETPLACE_URL})`;
+function commitUrl(owner, repo, sha) {
+ return `https://github.com/${encodeURIComponent(owner)}/${encodeURIComponent(repo)}/commit/${sha}`;
+}
+function getCommentWatermark(options) {
+ if (options?.commitSha && options?.owner && options?.repo) {
+ const url = commitUrl(options.owner, options.repo, options.commitSha);
+ return `Written by [vypdev/copilot](${exports.COPILOT_MARKETPLACE_URL}) for commit [${options.commitSha}](${url}). This will update automatically on new commits.`;
+ }
+ return DEFAULT_WATERMARK;
+}
+
+
/***/ }),
/***/ 8593:
@@ -55513,6 +55755,9 @@ exports.getRandomElement = getRandomElement;
"use strict";
Object.defineProperty(exports, "__esModule", ({ value: true }));
+exports.getAccumulatedLogEntries = getAccumulatedLogEntries;
+exports.getAccumulatedLogsAsText = getAccumulatedLogsAsText;
+exports.clearAccumulatedLogs = clearAccumulatedLogs;
exports.setGlobalLoggerDebug = setGlobalLoggerDebug;
exports.setStructuredLogging = setStructuredLogging;
exports.logInfo = logInfo;
@@ -55525,6 +55770,29 @@ exports.logDebugError = logDebugError;
let loggerDebug = false;
let loggerRemote = false;
let structuredLogging = false;
+const accumulatedLogEntries = [];
+/** Removes markdown code fences from message so log output does not break when visualized (e.g. GitHub Actions). */
+function sanitizeLogMessage(message) {
+ return message.replace(/```/g, '');
+}
+function pushLogEntry(entry) {
+ accumulatedLogEntries.push(entry);
+}
+function getAccumulatedLogEntries() {
+ return [...accumulatedLogEntries];
+}
+function getAccumulatedLogsAsText() {
+ return accumulatedLogEntries
+ .map((e) => {
+ const prefix = `[${e.level.toUpperCase()}]`;
+ const meta = e.metadata?.stack ? `\n${String(e.metadata.stack)}` : '';
+ return `${prefix} ${e.message}${meta}`;
+ })
+ .join('\n');
+}
+function clearAccumulatedLogs() {
+ accumulatedLogEntries.length = 0;
+}
function setGlobalLoggerDebug(debug, isRemote = false) {
loggerDebug = debug;
loggerRemote = isRemote;
@@ -55535,33 +55803,39 @@ function setStructuredLogging(enabled) {
function formatStructuredLog(entry) {
return JSON.stringify(entry);
}
-function logInfo(message, previousWasSingleLine = false, metadata) {
+function logInfo(message, previousWasSingleLine = false, metadata, skipAccumulation) {
+ const sanitized = sanitizeLogMessage(message);
+ if (!skipAccumulation) {
+ pushLogEntry({ level: 'info', message: sanitized, timestamp: Date.now(), metadata });
+ }
if (previousWasSingleLine && !loggerRemote) {
console.log();
}
if (structuredLogging) {
console.log(formatStructuredLog({
level: 'info',
- message,
+ message: sanitized,
timestamp: Date.now(),
metadata
}));
}
else {
- console.log(message);
+ console.log(sanitized);
}
}
function logWarn(message, metadata) {
+ const sanitized = sanitizeLogMessage(message);
+ pushLogEntry({ level: 'warn', message: sanitized, timestamp: Date.now(), metadata });
if (structuredLogging) {
console.warn(formatStructuredLog({
level: 'warn',
- message,
+ message: sanitized,
timestamp: Date.now(),
metadata
}));
}
else {
- console.warn(message);
+ console.warn(sanitized);
}
}
function logWarning(message) {
@@ -55569,33 +55843,38 @@ function logWarning(message) {
}
function logError(message, metadata) {
const errorMessage = message instanceof Error ? message.message : String(message);
+ const sanitized = sanitizeLogMessage(errorMessage);
+ const metaWithStack = {
+ ...metadata,
+ stack: message instanceof Error ? message.stack : undefined
+ };
+ pushLogEntry({ level: 'error', message: sanitized, timestamp: Date.now(), metadata: metaWithStack });
if (structuredLogging) {
console.error(formatStructuredLog({
level: 'error',
- message: errorMessage,
+ message: sanitized,
timestamp: Date.now(),
- metadata: {
- ...metadata,
- stack: message instanceof Error ? message.stack : undefined
- }
+ metadata: metaWithStack
}));
}
else {
- console.error(errorMessage);
+ console.error(sanitized);
}
}
function logDebugInfo(message, previousWasSingleLine = false, metadata) {
if (loggerDebug) {
+ const sanitized = sanitizeLogMessage(message);
+ pushLogEntry({ level: 'debug', message: sanitized, timestamp: Date.now(), metadata });
if (structuredLogging) {
console.log(formatStructuredLog({
level: 'debug',
- message,
+ message: sanitized,
timestamp: Date.now(),
metadata
}));
}
else {
- logInfo(message, previousWasSingleLine);
+ logInfo(sanitized, previousWasSingleLine, undefined, true);
}
}
}
diff --git a/build/github_action/src/data/repository/issue_repository.d.ts b/build/github_action/src/data/repository/issue_repository.d.ts
index dbd004e0..f69e3668 100644
--- a/build/github_action/src/data/repository/issue_repository.d.ts
+++ b/build/github_action/src/data/repository/issue_repository.d.ts
@@ -37,8 +37,12 @@ export declare class IssueRepository {
isIssue: (owner: string, repository: string, issueNumber: number, token: string) => Promise;
isPullRequest: (owner: string, repository: string, issueNumber: number, token: string) => Promise;
getHeadBranch: (owner: string, repository: string, issueNumber: number, token: string) => Promise;
- addComment: (owner: string, repository: string, issueNumber: number, comment: string, token: string) => Promise;
- updateComment: (owner: string, repository: string, issueNumber: number, commentId: number, comment: string, token: string) => Promise;
+ addComment: (owner: string, repository: string, issueNumber: number, comment: string, token: string, options?: {
+ commitSha?: string;
+ }) => Promise;
+ updateComment: (owner: string, repository: string, issueNumber: number, commentId: number, comment: string, token: string, options?: {
+ commitSha?: string;
+ }) => Promise;
/**
* Lists all comments on an issue (for bugbot: find existing findings by marker).
* Uses pagination to fetch every comment (default API returns only 30 per page).
diff --git a/build/github_action/src/data/repository/merge_repository.d.ts b/build/github_action/src/data/repository/merge_repository.d.ts
index a152b014..d13d541a 100644
--- a/build/github_action/src/data/repository/merge_repository.d.ts
+++ b/build/github_action/src/data/repository/merge_repository.d.ts
@@ -1,7 +1,13 @@
import { Result } from '../model/result';
/**
- * Repository for merging branches (via PR or direct merge).
- * Isolated to allow unit tests with mocked Octokit.
+ * Repository for merging branches: creates a PR, waits for that PR's check runs (or status checks),
+ * then merges the PR; on failure, falls back to a direct Git merge.
+ *
+ * Check runs are filtered by PR (pull_requests) so we only wait for the current PR's checks,
+ * not those of another PR sharing the same head (e.g. release→main vs release→develop).
+ * If the PR has no check runs after a short wait, we proceed to merge (branch may have no required checks).
+ *
+ * @see docs/single-actions/deploy-label-and-merge.mdx for the deploy flow and check-wait behaviour.
*/
export declare class MergeRepository {
mergeBranch: (owner: string, repository: string, head: string, base: string, timeout: number, token: string) => Promise;
diff --git a/build/github_action/src/usecase/actions/deployed_action_use_case.d.ts b/build/github_action/src/usecase/actions/deployed_action_use_case.d.ts
index bb20b27c..80f821f5 100644
--- a/build/github_action/src/usecase/actions/deployed_action_use_case.d.ts
+++ b/build/github_action/src/usecase/actions/deployed_action_use_case.d.ts
@@ -1,6 +1,16 @@
import { Execution } from "../../data/model/execution";
import { Result } from "../../data/model/result";
import { ParamUseCase } from "../base/param_usecase";
+/**
+ * Single action run after a successful deployment (triggered with the "deployed" action and an issue number).
+ *
+ * Requires the issue to have the "deploy" label and not already have the "deployed" label. Then:
+ * 1. Replaces the "deploy" label with "deployed".
+ * 2. If a release or hotfix branch is configured: merges it into default and develop (each via PR, waiting for that PR's checks).
+ * 3. Closes the issue only when all merges succeed.
+ *
+ * @see docs/single-actions/deploy-label-and-merge.mdx for the full flow and how merge/check waiting works.
+ */
export declare class DeployedActionUseCase implements ParamUseCase {
taskId: string;
private issueRepository;
diff --git a/build/github_action/src/usecase/steps/commit/bugbot/publish_findings_use_case.d.ts b/build/github_action/src/usecase/steps/commit/bugbot/publish_findings_use_case.d.ts
index 22a093cc..712e16b8 100644
--- a/build/github_action/src/usecase/steps/commit/bugbot/publish_findings_use_case.d.ts
+++ b/build/github_action/src/usecase/steps/commit/bugbot/publish_findings_use_case.d.ts
@@ -12,6 +12,8 @@ export interface PublishFindingsParam {
execution: Execution;
context: BugbotContext;
findings: BugbotFinding[];
+ /** Commit SHA for bugbot watermark (commit link). When set, comment uses "for commit ..." watermark. */
+ commitSha?: string;
/** When findings were limited by max comments, add one summary comment with this overflow info. */
overflowCount?: number;
overflowTitles?: string[];
diff --git a/build/github_action/src/utils/comment_watermark.d.ts b/build/github_action/src/utils/comment_watermark.d.ts
new file mode 100644
index 00000000..858f7dc1
--- /dev/null
+++ b/build/github_action/src/utils/comment_watermark.d.ts
@@ -0,0 +1,11 @@
+/**
+ * Watermark appended to comments (issues and PRs) to attribute Copilot.
+ * Bugbot comments include commit link and note about auto-update on new commits.
+ */
+export declare const COPILOT_MARKETPLACE_URL = "https://github.com/marketplace/actions/copilot-github-with-super-powers";
+export interface BugbotWatermarkOptions {
+ commitSha: string;
+ owner: string;
+ repo: string;
+}
+export declare function getCommentWatermark(options?: BugbotWatermarkOptions): string;
diff --git a/build/github_action/src/utils/logger.d.ts b/build/github_action/src/utils/logger.d.ts
index cb1bcc34..14c6d4d8 100644
--- a/build/github_action/src/utils/logger.d.ts
+++ b/build/github_action/src/utils/logger.d.ts
@@ -4,9 +4,12 @@ export interface LogEntry {
timestamp: number;
metadata?: Record;
}
+export declare function getAccumulatedLogEntries(): LogEntry[];
+export declare function getAccumulatedLogsAsText(): string;
+export declare function clearAccumulatedLogs(): void;
export declare function setGlobalLoggerDebug(debug: boolean, isRemote?: boolean): void;
export declare function setStructuredLogging(enabled: boolean): void;
-export declare function logInfo(message: string, previousWasSingleLine?: boolean, metadata?: Record): void;
+export declare function logInfo(message: string, previousWasSingleLine?: boolean, metadata?: Record, skipAccumulation?: boolean): void;
export declare function logWarn(message: string, metadata?: Record): void;
export declare function logWarning(message: string): void;
export declare function logError(message: unknown, metadata?: Record): void;
diff --git a/docs/single-actions/deploy-label-and-merge.mdx b/docs/single-actions/deploy-label-and-merge.mdx
new file mode 100644
index 00000000..9cba6484
--- /dev/null
+++ b/docs/single-actions/deploy-label-and-merge.mdx
@@ -0,0 +1,69 @@
+---
+title: Deploy label and merge flow
+description: How the deploy and deployed labels work and how post-deploy merges and check runs are handled.
+---
+
+# Deploy label and merge flow
+
+This page describes how the **deploy** label is used and what happens when you run the **deployed** single action after a successful deployment. It also explains how branch merges and check runs are handled so that both release→default and release→develop (or hotfix equivalents) complete correctly.
+
+## The deploy label
+
+- **`deploy`** — Applied to the issue when the release (or hotfix) is ready to be deployed. Your CI/CD or team adds this label when deployment has been triggered or is about to be.
+- **`deployed`** — Set by Copilot when the **deployed** single action runs and the issue had the **deploy** label. It indicates "deployment is done and post-deploy steps have been run."
+
+The **deployed** single action is intended to be run **after a successful deployment** (e.g. from a pipeline step or a manual trigger). It requires:
+
+- An issue number (single action is run with `single-action-issue: `).
+- The issue must have the **deploy** label and must **not** already have the **deployed** label.
+
+## What the deployed action does
+
+1. **Label update**
+ Replaces the **deploy** label with **deployed** on the issue.
+
+2. **Branch merges** (if a release or hotfix branch is configured):
+ - **Release:** merges the release branch into the default branch (e.g. `main`), then into the development branch (e.g. `develop`).
+ - **Hotfix:** merges the hotfix branch into the default branch, then merges the default branch into the development branch.
+
+3. **Issue closure**
+ If **all** merge operations succeed, the issue is closed. If any merge fails, the issue is left open and the action reports that one or more merge operations failed.
+
+Each merge is done via a **pull request**: create PR, wait for checks (see below), then merge the PR. If the PR workflow fails (e.g. checks fail or timeout), the code falls back to a **direct** Git merge when possible.
+
+## Waiting for checks before merging
+
+For each PR we create (e.g. release→main, release→develop), we wait until the PR is ready to merge before calling the merge API.
+
+### Per-PR check runs
+
+- GitHub's Checks API returns check runs **by ref** (branch), not by PR. The same branch can be the head of **multiple PRs** (e.g. release→main and release→develop).
+- We **only consider check runs that belong to the current PR**, using the `pull_requests` array on each check run (filter by `pull_request.number === this PR`). We do **not** use check runs from another PR (e.g. the first merge) to decide that the second PR is ready.
+- We wait until all check runs for **this** PR are completed and none have failed. Only then do we merge.
+
+### When the PR has no check runs
+
+- If there are **no** check runs on the ref, we use **commit status checks** (legacy) and merge when none are pending.
+- If there **are** check runs on the ref but **none** for this PR (e.g. they are from another PR with the same head, or this base branch has no required checks), we wait a short number of polls (~30 seconds). If no check runs appear for this PR in that window, we assume this PR does not require check runs and **proceed to merge**. If the branch actually requires checks, GitHub will reject the merge and we fall back to the direct merge path (which may also fail under branch protection).
+
+### Timeout
+
+- A configurable **merge timeout** (input `merge-timeout`, default 600 seconds) limits how long we wait for checks **per merge**. If we hit the timeout, we throw and the code attempts a direct merge as fallback.
+
+## Summary table
+
+| Scenario | Behaviour |
+|----------|-----------|
+| PR has check runs for this PR | Wait until all are completed and none failed, then merge. |
+| PR has no check runs on ref | Use status checks; if none pending, merge. |
+| Check runs on ref but none for this PR | Wait a few polls (~30 s); if still none, proceed to merge (branch may have no required checks). |
+| Timeout waiting for checks | Attempt direct merge. |
+| PR merge fails, direct merge fails | Return failure; issue is not closed. |
+
+## Related code and tests
+
+- **Use case:** `src/usecase/actions/deployed_action_use_case.ts` — deploy label handling, merge order, issue close.
+- **Merge and checks:** `src/data/repository/merge_repository.ts` — PR creation, per-PR check filtering, status checks fallback, direct merge fallback.
+- **Tests:**
+ - `src/usecase/actions/__tests__/deployed_action_use_case.test.ts` — deploy label validation, release/hotfix merge order, close on success, no close on merge failure.
+ - `src/data/repository/__tests__/merge_repository.test.ts` — check runs per PR, no check runs, timeout, direct merge fallback.
diff --git a/docs/single-actions/index.mdx b/docs/single-actions/index.mdx
index 160b7ee2..c8557812 100644
--- a/docs/single-actions/index.mdx
+++ b/docs/single-actions/index.mdx
@@ -14,6 +14,9 @@ When you set the **`single-action`** input (and any required targets such as `si
single-action, single-action-issue, single-action-version, and other inputs.
+
+ Deploy/deployed labels, post-deploy merges (release/hotfix → default and develop), and how we wait for checks per PR.
+
Run from a GitHub Actions workflow or from the giik CLI.
@@ -38,5 +41,6 @@ When you set the **`single-action`** input (and any required targets such as `si
## Next steps
- **[Available actions](/single-actions/available-actions)** — Complete list with inputs and descriptions.
+- **[Deploy label and merge flow](/single-actions/deploy-label-and-merge)** — Deploy/deployed labels and post-deploy merges.
- **[Workflow & CLI](/single-actions/workflow-and-cli)** — How to run from a workflow and from the CLI.
- **[Examples](/single-actions/examples)** — YAML and CLI examples.
diff --git a/src/actions/__tests__/common_action.test.ts b/src/actions/__tests__/common_action.test.ts
index 78a9af1d..47fab29d 100644
--- a/src/actions/__tests__/common_action.test.ts
+++ b/src/actions/__tests__/common_action.test.ts
@@ -21,6 +21,8 @@ jest.mock('@actions/core', () => ({
jest.mock('../../utils/logger', () => ({
logInfo: jest.fn(),
logError: jest.fn(),
+ logDebugInfo: jest.fn(),
+ clearAccumulatedLogs: jest.fn(),
}));
jest.mock('../../utils/queue_utils', () => ({
@@ -66,6 +68,7 @@ jest.mock('../../usecase/commit_use_case', () => ({
}));
const core = require('@actions/core');
+const logger = require('../../utils/logger');
const { waitForPreviousRuns } = require('../../utils/queue_utils');
function mockExecution(overrides: Record = {}): Execution {
@@ -103,11 +106,12 @@ describe('mainRun', () => {
mockCommitInvoke.mockResolvedValue([]);
});
- it('calls execution.setup()', async () => {
+ it('calls execution.setup() and clearAccumulatedLogs()', async () => {
const setupMock = jest.fn().mockResolvedValue(undefined);
const execution = mockExecution({ setup: setupMock });
await mainRun(execution);
expect(setupMock).toHaveBeenCalledTimes(1);
+ expect(logger.clearAccumulatedLogs).toHaveBeenCalledTimes(1);
});
it('waits for previous runs when welcome is false', async () => {
diff --git a/src/actions/__tests__/github_action.test.ts b/src/actions/__tests__/github_action.test.ts
index 9e8ecc42..045a8e54 100644
--- a/src/actions/__tests__/github_action.test.ts
+++ b/src/actions/__tests__/github_action.test.ts
@@ -15,6 +15,7 @@ jest.mock('@actions/core', () => ({
jest.mock('../../utils/logger', () => ({
logInfo: jest.fn(),
logError: jest.fn(),
+ logDebugInfo: jest.fn(),
}));
jest.mock('../../utils/opencode_server', () => ({
diff --git a/src/actions/common_action.ts b/src/actions/common_action.ts
index b6608dde..6c157b45 100644
--- a/src/actions/common_action.ts
+++ b/src/actions/common_action.ts
@@ -7,16 +7,22 @@ import { IssueUseCase } from '../usecase/issue_use_case';
import { PullRequestReviewCommentUseCase } from '../usecase/pull_request_review_comment_use_case';
import { PullRequestUseCase } from '../usecase/pull_request_use_case';
import { SingleActionUseCase } from '../usecase/single_action_use_case';
-import { logError, logInfo } from '../utils/logger';
+import { clearAccumulatedLogs, logDebugInfo, logError, logInfo } from '../utils/logger';
import { TITLE } from '../utils/constants';
import chalk from 'chalk';
import boxen from 'boxen';
import { waitForPreviousRuns } from '../utils/queue_utils';
export async function mainRun(execution: Execution): Promise {
- const results: Result[] = []
-
+ const results: Result[] = [];
+
+ logInfo('GitHub Action: starting main run.');
+ logDebugInfo(`Event: ${execution.eventName}, actor: ${execution.actor}, repo: ${execution.owner}/${execution.repo}, debug: ${execution.debug}`);
+
await execution.setup();
+ clearAccumulatedLogs();
+
+ logDebugInfo(`Setup done. Issue number: ${execution.issueNumber}, isSingleAction: ${execution.isSingleAction}, isIssue: ${execution.isIssue}, isPullRequest: ${execution.isPullRequest}, isPush: ${execution.isPush}`);
if (!execution.welcome) {
/**
@@ -30,20 +36,21 @@ export async function mainRun(execution: Execution): Promise {
if (execution.runnedByToken) {
if (execution.isSingleAction && execution.singleAction.validSingleAction) {
- logInfo(`User from token (${execution.tokenUser}) matches actor. Executing single action.`);
+ logInfo(`User from token (${execution.tokenUser}) matches actor. Executing single action: ${execution.singleAction.currentSingleAction}.`);
results.push(...await new SingleActionUseCase().invoke(execution));
+ logInfo(`Single action finished. Results: ${results.length}.`);
return results;
}
- logInfo(`User from token (${execution.tokenUser}) matches actor. Ignoring.`);
+ logInfo(`User from token (${execution.tokenUser}) matches actor. Ignoring (not a valid single action).`);
return results;
}
-
if (execution.issueNumber === -1) {
if (execution.isSingleAction && execution.singleAction.isSingleActionWithoutIssue) {
+ logInfo('No issue number; running single action without issue.');
results.push(...await new SingleActionUseCase().invoke(execution));
} else {
- logInfo(`Issue number not found. Skipping.`);
+ logInfo('Issue number not found. Skipping.');
}
return results;
}
@@ -67,28 +74,39 @@ export async function mainRun(execution: Execution): Promise {
try {
if (execution.isSingleAction) {
+ logInfo(`Running SingleActionUseCase (action: ${execution.singleAction.currentSingleAction}).`);
results.push(...await new SingleActionUseCase().invoke(execution));
} else if (execution.isIssue) {
if (execution.issue.isIssueComment) {
+ logInfo(`Running IssueCommentUseCase for issue #${execution.issue.number}.`);
results.push(...await new IssueCommentUseCase().invoke(execution));
} else {
+ logInfo(`Running IssueUseCase for issue #${execution.issueNumber}.`);
results.push(...await new IssueUseCase().invoke(execution));
}
} else if (execution.isPullRequest) {
if (execution.pullRequest.isPullRequestReviewComment) {
+ logInfo(`Running PullRequestReviewCommentUseCase for PR #${execution.pullRequest.number}.`);
results.push(...await new PullRequestReviewCommentUseCase().invoke(execution));
} else {
+ logInfo(`Running PullRequestUseCase for PR #${execution.pullRequest.number}.`);
results.push(...await new PullRequestUseCase().invoke(execution));
}
} else if (execution.isPush) {
+ logDebugInfo(`Push event. Branch: ${execution.commit?.branch ?? 'unknown'}, commits: ${execution.commit?.commits?.length ?? 0}, issue number: ${execution.issueNumber}.`);
+ logInfo('Running CommitUseCase.');
results.push(...await new CommitUseCase().invoke(execution));
} else {
+ logError(`Action not handled. Event: ${execution.eventName}.`);
core.setFailed(`Action not handled.`);
}
+ const totalSteps = results.reduce((acc, r) => acc + (r.steps?.length ?? 0), 0);
+ logInfo(`Main run finished. Results: ${results.length}, total steps: ${totalSteps}.`);
return results;
} catch (error: unknown) {
const msg = error instanceof Error ? error.message : String(error);
+ logError(`Main run failed: ${msg}`, error instanceof Error ? { stack: (error as Error).stack } : undefined);
core.setFailed(msg);
return [];
}
diff --git a/src/actions/github_action.ts b/src/actions/github_action.ts
index 2584fca7..f4bf7815 100644
--- a/src/actions/github_action.ts
+++ b/src/actions/github_action.ts
@@ -23,17 +23,22 @@ import { ProjectRepository } from '../data/repository/project_repository';
import { PublishResultUseCase } from '../usecase/steps/common/publish_resume_use_case';
import { StoreConfigurationUseCase } from '../usecase/steps/common/store_configuration_use_case';
import { BUGBOT_MAX_COMMENTS, BUGBOT_MIN_SEVERITY, DEFAULT_IMAGE_CONFIG, INPUT_KEYS, OPENCODE_DEFAULT_MODEL } from '../utils/constants';
-import { logError, logInfo } from '../utils/logger';
+import { logDebugInfo, logError, logInfo } from '../utils/logger';
import { startOpencodeServer, type ManagedOpencodeServer } from '../utils/opencode_server';
import { mainRun } from './common_action';
export async function runGitHubAction(): Promise {
const projectRepository = new ProjectRepository();
+ logInfo('GitHub Action: runGitHubAction started.');
+
/**
* Debug
*/
- const debug = getInput(INPUT_KEYS.DEBUG) == 'true'
+ const debug = getInput(INPUT_KEYS.DEBUG) == 'true';
+ if (debug) {
+ logInfo('Debug mode is enabled. Full logs will be included in the report.');
+ }
/**
* Single action
@@ -58,8 +63,12 @@ export async function runGitHubAction(): Promise {
let managedOpencodeServer: ManagedOpencodeServer | undefined;
if (opencodeStartServer) {
+ logInfo('Starting managed OpenCode server...');
managedOpencodeServer = await startOpencodeServer({ cwd: process.cwd() });
opencodeServerUrl = managedOpencodeServer.url;
+ logInfo(`OpenCode server started at ${opencodeServerUrl}.`);
+ } else {
+ logDebugInfo(`Using OpenCode server URL: ${opencodeServerUrl}, model: ${opencodeModel}.`);
}
try {
@@ -642,6 +651,8 @@ export async function runGitHubAction(): Promise {
undefined,
)
+ logDebugInfo(`Execution built. Event will be resolved in mainRun. Single action: ${execution.singleAction.currentSingleAction ?? 'none'}, AI PR description: ${execution.ai.getAiPullRequestDescription()}, bugbot min severity: ${execution.ai.getBugbotMinSeverity()}.`);
+
const results: Result[] = await mainRun(execution);
await finishWithResults(execution, results);
@@ -655,9 +666,13 @@ export async function runGitHubAction(): Promise {
}
async function finishWithResults(execution: Execution, results: Result[]): Promise {
+ const stepCount = results.reduce((acc, r) => acc + (r.steps?.length ?? 0), 0);
+ const errorCount = results.reduce((acc, r) => acc + (r.errors?.length ?? 0), 0);
+ logInfo(`Publishing result: ${results.length} result(s), ${stepCount} step(s), ${errorCount} error(s).`);
+
execution.currentConfiguration.results = results;
- await new PublishResultUseCase().invoke(execution)
- await new StoreConfigurationUseCase().invoke(execution)
+ await new PublishResultUseCase().invoke(execution);
+ await new StoreConfigurationUseCase().invoke(execution);
logInfo('Configuration stored. Finishing.');
/**
diff --git a/src/data/model/__tests__/ai.test.ts b/src/data/model/__tests__/ai.test.ts
new file mode 100644
index 00000000..ca782def
--- /dev/null
+++ b/src/data/model/__tests__/ai.test.ts
@@ -0,0 +1,185 @@
+import { OPENCODE_DEFAULT_MODEL } from '../../../utils/constants';
+import { Ai } from '../ai';
+
+describe('Ai', () => {
+ const defaultArgs: [
+ string,
+ string,
+ boolean,
+ boolean,
+ string[],
+ boolean,
+ string,
+ number,
+ ] = [
+ 'https://opencode.example',
+ 'opencode/kimi-k2.5-free',
+ true,
+ false,
+ ['*.min.js'],
+ true,
+ 'warning',
+ 10,
+ ];
+
+ function createAi(
+ overrides: Partial<{
+ opencodeServerUrl: string;
+ opencodeModel: string;
+ aiPullRequestDescription: boolean;
+ aiMembersOnly: boolean;
+ aiIgnoreFiles: string[];
+ aiIncludeReasoning: boolean;
+ bugbotMinSeverity: string;
+ bugbotCommentLimit: number;
+ bugbotFixVerifyCommands: string[];
+ }> = {},
+ ): Ai {
+ return new Ai(
+ overrides.opencodeServerUrl ?? defaultArgs[0],
+ overrides.opencodeModel ?? defaultArgs[1],
+ overrides.aiPullRequestDescription ?? defaultArgs[2],
+ overrides.aiMembersOnly ?? defaultArgs[3],
+ overrides.aiIgnoreFiles ?? defaultArgs[4],
+ overrides.aiIncludeReasoning ?? defaultArgs[5],
+ overrides.bugbotMinSeverity ?? defaultArgs[6],
+ overrides.bugbotCommentLimit ?? defaultArgs[7],
+ overrides.bugbotFixVerifyCommands,
+ );
+ }
+
+ describe('constructor and getters', () => {
+ it('stores and returns all constructor values', () => {
+ const ai = new Ai(
+ 'https://server',
+ 'anthropic/claude-3',
+ true,
+ true,
+ ['a', 'b'],
+ false,
+ 'error',
+ 5,
+ ['npm run test'],
+ );
+
+ expect(ai.getOpencodeServerUrl()).toBe('https://server');
+ expect(ai.getOpencodeModel()).toBe('anthropic/claude-3');
+ expect(ai.getAiPullRequestDescription()).toBe(true);
+ expect(ai.getAiMembersOnly()).toBe(true);
+ expect(ai.getAiIgnoreFiles()).toEqual(['a', 'b']);
+ expect(ai.getAiIncludeReasoning()).toBe(false);
+ expect(ai.getBugbotMinSeverity()).toBe('error');
+ expect(ai.getBugbotCommentLimit()).toBe(5);
+ expect(ai.getBugbotFixVerifyCommands()).toEqual(['npm run test']);
+ });
+
+ it('defaults bugbotFixVerifyCommands to empty array when omitted', () => {
+ const ai = new Ai(
+ defaultArgs[0],
+ defaultArgs[1],
+ defaultArgs[2],
+ defaultArgs[3],
+ defaultArgs[4],
+ defaultArgs[5],
+ defaultArgs[6],
+ defaultArgs[7],
+ );
+
+ expect(ai.getBugbotFixVerifyCommands()).toEqual([]);
+ });
+ });
+
+ describe('getOpencodeModelParts', () => {
+ it('returns provider and model when opencodeModel is "provider/model"', () => {
+ const ai = createAi({ opencodeModel: 'anthropic/claude-3-opus' });
+
+ expect(ai.getOpencodeModelParts()).toEqual({
+ providerID: 'anthropic',
+ modelID: 'claude-3-opus',
+ });
+ });
+
+ it('trims whitespace from provider and model', () => {
+ const ai = createAi({ opencodeModel: ' openai / gpt-4 ' });
+
+ expect(ai.getOpencodeModelParts()).toEqual({
+ providerID: 'openai',
+ modelID: 'gpt-4',
+ });
+ });
+
+ it('uses OPENCODE_DEFAULT_MODEL when opencodeModel is empty string', () => {
+ const ai = createAi({ opencodeModel: '' });
+
+ expect(ai.getOpencodeModelParts()).toEqual({
+ providerID: 'opencode',
+ modelID: 'kimi-k2.5-free',
+ });
+ });
+
+ it('uses OPENCODE_DEFAULT_MODEL when opencodeModel is whitespace-only', () => {
+ const ai = createAi({ opencodeModel: ' ' });
+
+ expect(ai.getOpencodeModelParts()).toEqual({
+ providerID: 'opencode',
+ modelID: 'kimi-k2.5-free',
+ });
+ });
+
+ it('when no slash returns providerID "opencode" and modelID as effective', () => {
+ const ai = createAi({ opencodeModel: 'single-model-id' });
+
+ expect(ai.getOpencodeModelParts()).toEqual({
+ providerID: 'opencode',
+ modelID: 'single-model-id',
+ });
+ });
+
+ it('when slash at start (slash <= 0) uses opencode and effective as modelID', () => {
+ const ai = createAi({ opencodeModel: '/only-model' });
+
+ expect(ai.getOpencodeModelParts()).toEqual({
+ providerID: 'opencode',
+ modelID: '/only-model',
+ });
+ });
+
+ it('when model is empty after trim uses default modelID from OPENCODE_DEFAULT_MODEL', () => {
+ const ai = createAi({ opencodeModel: 'provider/ ' });
+
+ expect(ai.getOpencodeModelParts()).toEqual({
+ providerID: 'provider',
+ modelID: 'kimi-k2.5-free',
+ });
+ });
+
+ it('uses OPENCODE_DEFAULT_MODEL when opencodeModel is not set (falsy)', () => {
+ const ai = createAi({ opencodeModel: '' });
+ const parts = ai.getOpencodeModelParts();
+
+ expect(parts.providerID).toBe('opencode');
+ expect(parts.modelID).toBe(OPENCODE_DEFAULT_MODEL.split('/')[1]);
+ });
+
+ it('uses "opencode" when provider part is empty after trim', () => {
+ const ai = createAi({ opencodeModel: 'x' });
+ const originalTrim = String.prototype.trim;
+ let trimCallCount = 0;
+ jest.spyOn(String.prototype, 'trim').mockImplementation(function (this: string) {
+ trimCallCount++;
+ if (trimCallCount === 1) {
+ return ' /model';
+ }
+ if (this === ' ') {
+ return '';
+ }
+ return originalTrim.call(this);
+ });
+
+ const parts = ai.getOpencodeModelParts();
+
+ expect(parts).toEqual({ providerID: 'opencode', modelID: 'model' });
+ (String.prototype.trim as jest.Mock).mockRestore();
+ });
+ });
+});
diff --git a/src/data/repository/__tests__/issue_repository.test.ts b/src/data/repository/__tests__/issue_repository.test.ts
index ce105459..b591fefd 100644
--- a/src/data/repository/__tests__/issue_repository.test.ts
+++ b/src/data/repository/__tests__/issue_repository.test.ts
@@ -367,15 +367,24 @@ describe('IssueRepository', () => {
});
describe('addComment', () => {
- it('calls issues.createComment with owner, repo, issue_number, body', async () => {
+ it('calls issues.createComment with owner, repo, issue_number, body including default watermark', async () => {
mockRest.issues.createComment.mockResolvedValue(undefined);
await repo.addComment('owner', 'repo', 10, 'Hello comment', 'token');
- expect(mockRest.issues.createComment).toHaveBeenCalledWith({
- owner: 'owner',
- repo: 'repo',
- issue_number: 10,
- body: 'Hello comment',
- });
+ const call = mockRest.issues.createComment.mock.calls[0][0];
+ expect(call).toMatchObject({ owner: 'owner', repo: 'repo', issue_number: 10 });
+ expect(call.body).toContain('Hello comment');
+ expect(call.body).toContain('Made with');
+ expect(call.body).toContain('vypdev/copilot');
+ });
+
+ it('appends bugbot watermark when commitSha option is provided', async () => {
+ mockRest.issues.createComment.mockResolvedValue(undefined);
+ await repo.addComment('owner', 'repo', 10, 'Bugbot finding', 'token', { commitSha: 'abc123' });
+ const call = mockRest.issues.createComment.mock.calls[0][0];
+ expect(call.body).toContain('Bugbot finding');
+ expect(call.body).toContain('Written by');
+ expect(call.body).toContain('abc123');
+ expect(call.body).toContain('github.com/owner/repo/commit/abc123');
});
});
@@ -588,15 +597,22 @@ describe('IssueRepository', () => {
});
describe('updateComment', () => {
- it('calls issues.updateComment with comment_id and body', async () => {
+ it('calls issues.updateComment with comment_id and body including default watermark', async () => {
mockRest.issues.updateComment.mockResolvedValue(undefined);
await repo.updateComment('o', 'r', 1, 100, 'Updated body', 'token');
- expect(mockRest.issues.updateComment).toHaveBeenCalledWith({
- owner: 'o',
- repo: 'r',
- comment_id: 100,
- body: 'Updated body',
- });
+ const call = mockRest.issues.updateComment.mock.calls[0][0];
+ expect(call).toMatchObject({ owner: 'o', repo: 'r', comment_id: 100 });
+ expect(call.body).toContain('Updated body');
+ expect(call.body).toContain('Made with');
+ });
+
+ it('appends bugbot watermark when commitSha option is provided', async () => {
+ mockRest.issues.updateComment.mockResolvedValue(undefined);
+ await repo.updateComment('o', 'r', 1, 100, 'Finding update', 'token', { commitSha: 'sha1' });
+ const call = mockRest.issues.updateComment.mock.calls[0][0];
+ expect(call.body).toContain('Finding update');
+ expect(call.body).toContain('Written by');
+ expect(call.body).toContain('sha1');
});
});
diff --git a/src/data/repository/__tests__/merge_repository.test.ts b/src/data/repository/__tests__/merge_repository.test.ts
index 470ab780..9bfd2fd2 100644
--- a/src/data/repository/__tests__/merge_repository.test.ts
+++ b/src/data/repository/__tests__/merge_repository.test.ts
@@ -1,5 +1,8 @@
/**
- * Unit tests for MergeRepository: mergeBranch (PR merge and direct merge fallback).
+ * Unit tests for MergeRepository.mergeBranch: PR creation, waiting for checks per PR,
+ * fallback when the PR has no check runs, timeout, and direct merge fallback.
+ *
+ * Used by the deploy flow (release/hotfix → default and develop). See docs/single-actions/deploy-label-and-merge.mdx.
*/
import { MergeRepository } from '../merge_repository';
@@ -50,6 +53,7 @@ describe('MergeRepository', () => {
mockReposGetCombinedStatusForRef.mockReset();
});
+ describe('PR creation and merge (timeout <= 10 skips waiting for checks)', () => {
it('creates PR, updates body, merges and returns success (timeout <= 10 skips wait)', async () => {
mockPullsCreate.mockResolvedValue({
data: { number: 42 },
@@ -136,7 +140,9 @@ describe('MergeRepository', () => {
expect(result.some(r => r.success === false && r.steps?.some(s => s.includes('Failed to merge')))).toBe(true);
expect(result.length).toBeGreaterThanOrEqual(2);
});
+ });
+ describe('waiting for checks (timeout > 10): per-PR check runs, no checks, timeout', () => {
it('when timeout > 10 waits for check runs (all completed) then merges', async () => {
mockPullsCreate.mockResolvedValue({ data: { number: 1 } });
mockPullsListCommits.mockResolvedValue({ data: [{ commit: { message: 'msg' } }] });
@@ -145,7 +151,7 @@ describe('MergeRepository', () => {
mockChecksListForRef.mockResolvedValue({
data: {
check_runs: [
- { name: 'ci', status: 'completed', conclusion: 'success' },
+ { name: 'ci', status: 'completed', conclusion: 'success', pull_requests: [{ number: 1 }] },
],
},
});
@@ -179,7 +185,7 @@ describe('MergeRepository', () => {
mockChecksListForRef.mockResolvedValue({
data: {
check_runs: [
- { name: 'ci', status: 'completed', conclusion: 'failure' },
+ { name: 'ci', status: 'completed', conclusion: 'failure', pull_requests: [{ number: 1 }] },
],
},
});
@@ -214,6 +220,122 @@ describe('MergeRepository', () => {
expect(mockReposGetCombinedStatusForRef).toHaveBeenCalled();
});
+ it('when timeout > 10 waits only for check runs tied to this PR (ignores runs from other PRs with same head)', async () => {
+ jest.useFakeTimers();
+ mockPullsCreate.mockResolvedValue({ data: { number: 42 } });
+ mockPullsListCommits.mockResolvedValue({ data: [{ commit: { message: 'msg' } }] });
+ mockPullsUpdate.mockResolvedValue({});
+ mockPullsMerge.mockResolvedValue({});
+ // First poll: runs exist but for another PR (e.g. release→master already merged). We must not treat as completed.
+ mockChecksListForRef
+ .mockResolvedValueOnce({
+ data: {
+ check_runs: [
+ { name: 'ci', status: 'completed', conclusion: 'success', pull_requests: [{ number: 1 }] },
+ ],
+ },
+ })
+ .mockResolvedValueOnce({
+ data: {
+ check_runs: [
+ { name: 'ci', status: 'completed', conclusion: 'success', pull_requests: [{ number: 42 }] },
+ ],
+ },
+ });
+ mockReposGetCombinedStatusForRef.mockResolvedValue({
+ data: { state: 'success', statuses: [] },
+ });
+
+ const promise = repo.mergeBranch(
+ 'owner',
+ 'repo',
+ 'release/1.0',
+ 'develop',
+ 30,
+ 'token',
+ );
+ await jest.runAllTimersAsync();
+ const result = await promise;
+
+ jest.useRealTimers();
+ expect(result).toHaveLength(1);
+ expect(result[0].success).toBe(true);
+ expect(mockChecksListForRef).toHaveBeenCalledTimes(2);
+ expect(mockPullsMerge).toHaveBeenCalled();
+ });
+
+ it('when timeout > 10 and no check runs for this PR after a few polls, proceeds to merge (branch may have no required checks)', async () => {
+ jest.useFakeTimers();
+ mockPullsCreate.mockResolvedValue({ data: { number: 99 } });
+ mockPullsListCommits.mockResolvedValue({ data: [] });
+ mockPullsUpdate.mockResolvedValue({});
+ mockPullsMerge.mockResolvedValue({});
+ // Check runs on ref are for another PR only; this PR (99) has none (e.g. develop has no required checks).
+ mockChecksListForRef.mockResolvedValue({
+ data: {
+ check_runs: [
+ { name: 'ci', status: 'completed', conclusion: 'success', pull_requests: [{ number: 1 }] },
+ ],
+ },
+ });
+ mockReposGetCombinedStatusForRef.mockResolvedValue({
+ data: { state: 'success', statuses: [] },
+ });
+
+ const promise = repo.mergeBranch('o', 'r', 'release/1.0', 'develop', 60, 'token');
+ await jest.runAllTimersAsync();
+ const result = await promise;
+
+ jest.useRealTimers();
+ expect(result).toHaveLength(1);
+ expect(result[0].success).toBe(true);
+ expect(mockChecksListForRef).toHaveBeenCalledTimes(3);
+ expect(mockPullsMerge).toHaveBeenCalled();
+ });
+
+ it('when no check runs for this PR after max polls but status checks are pending, falls back to status checks and waits then merges', async () => {
+ jest.useFakeTimers();
+ mockPullsCreate.mockResolvedValue({ data: { number: 99 } });
+ mockPullsListCommits.mockResolvedValue({ data: [] });
+ mockPullsUpdate.mockResolvedValue({});
+ mockPullsMerge.mockResolvedValue({});
+ // Check runs on ref are for another PR only; this PR (99) has none.
+ mockChecksListForRef.mockResolvedValue({
+ data: {
+ check_runs: [
+ { name: 'ci', status: 'completed', conclusion: 'success', pull_requests: [{ number: 1 }] },
+ ],
+ },
+ });
+ // First 4 polls: pending status check; 5th: completed so we proceed to merge.
+ mockReposGetCombinedStatusForRef
+ .mockResolvedValueOnce({
+ data: { state: 'pending', statuses: [{ context: 'ci', state: 'pending' }] },
+ })
+ .mockResolvedValueOnce({
+ data: { state: 'pending', statuses: [{ context: 'ci', state: 'pending' }] },
+ })
+ .mockResolvedValueOnce({
+ data: { state: 'pending', statuses: [{ context: 'ci', state: 'pending' }] },
+ })
+ .mockResolvedValueOnce({
+ data: { state: 'pending', statuses: [{ context: 'ci', state: 'pending' }] },
+ })
+ .mockResolvedValue({
+ data: { state: 'success', statuses: [{ context: 'ci', state: 'success' }] },
+ });
+
+ const promise = repo.mergeBranch('o', 'r', 'release/1.0', 'develop', 60, 'token');
+ await jest.runAllTimersAsync();
+ const result = await promise;
+
+ jest.useRealTimers();
+ expect(result).toHaveLength(1);
+ expect(result[0].success).toBe(true);
+ expect(mockReposGetCombinedStatusForRef).toHaveBeenCalled();
+ expect(mockPullsMerge).toHaveBeenCalled();
+ });
+
it('when timeout > 10 and checks never complete throws then direct merge succeeds', async () => {
jest.useFakeTimers();
mockPullsCreate.mockResolvedValue({ data: { number: 1 } });
@@ -221,7 +343,7 @@ describe('MergeRepository', () => {
mockPullsUpdate.mockResolvedValue({});
mockChecksListForRef.mockResolvedValue({
data: {
- check_runs: [{ name: 'ci', status: 'in_progress', conclusion: null }],
+ check_runs: [{ name: 'ci', status: 'in_progress', conclusion: null, pull_requests: [{ number: 1 }] }],
},
});
mockReposGetCombinedStatusForRef.mockResolvedValue({
@@ -236,4 +358,5 @@ describe('MergeRepository', () => {
jest.useRealTimers();
expect(result.some(r => r.success === true && r.steps?.some(s => s.includes('direct merge')))).toBe(true);
});
+ });
});
diff --git a/src/data/repository/ai_repository.ts b/src/data/repository/ai_repository.ts
index e013eca2..9c1876d3 100644
--- a/src/data/repository/ai_repository.ts
+++ b/src/data/repository/ai_repository.ts
@@ -62,13 +62,6 @@ function ensureNoTrailingSlash(url: string): string {
return url.replace(/\/+$/, '') || url;
}
-function truncate(s: string, maxLen: number): string {
- return s.length <= maxLen ? s : s.slice(0, maxLen) + '...';
-}
-
-const OPENCODE_PROMPT_LOG_PREVIEW_LEN = 500;
-const OPENCODE_PROMPT_LOG_FULL_LEN = 3000;
-
/** Result of validating AI config for OpenCode calls. null when invalid. */
interface OpenCodeConfig {
serverUrl: string;
@@ -157,20 +150,16 @@ function parseJsonFromAgentText(text: string): Record {
} catch (e) {
const msg = e instanceof Error ? e.message : String(e);
logDebugInfo(
- `OpenCode agent response (expectJson): failed to parse extracted JSON. Full text length=${trimmed.length} firstChars=${JSON.stringify(trimmed.slice(0, 200))}`
+ `OpenCode agent response (expectJson): failed to parse extracted JSON. Full text length=${trimmed.length}. Full text:\n${trimmed}`
);
throw new Error(`Agent response is not valid JSON: ${msg}`);
}
}
- const previewLen = 500;
- const msg = trimmed.length > previewLen ? `${trimmed.slice(0, previewLen)}...` : trimmed;
- const fullTruncated = trimmed.length > 3000 ? `${trimmed.slice(0, 3000)}... [total ${trimmed.length} chars]` : trimmed;
logDebugInfo(
- `OpenCode agent response (expectJson): no JSON object found. length=${trimmed.length} preview=${JSON.stringify(msg)}`
+ `OpenCode agent response (expectJson): no JSON object found. length=${trimmed.length}. Full text:\n${trimmed}`
);
- logDebugInfo(`OpenCode agent response (expectJson) full text for debugging:\n${fullTruncated}`);
throw new Error(
- `Agent response is not valid JSON: no JSON object found. Response starts with: ${msg.slice(0, 150)}`
+ `Agent response is not valid JSON: no JSON object found. Response length: ${trimmed.length} chars.`
);
}
}
@@ -188,16 +177,10 @@ function extractPartsByType(parts: unknown, type: string, joinWith: string): str
.trim();
}
-const OPENCODE_RESPONSE_LOG_MAX_LEN = 80000;
-
-/** Parse response as JSON; on empty or invalid body throw a clear error with context. */
+/** Parse response as JSON; on empty or invalid body throw a clear error with context. Logs full body (no truncation). */
async function parseJsonResponse(res: Response, context: string): Promise {
const raw = await res.text();
- const truncated =
- raw.length > OPENCODE_RESPONSE_LOG_MAX_LEN
- ? `${raw.slice(0, OPENCODE_RESPONSE_LOG_MAX_LEN)}... [truncated, total ${raw.length} chars]`
- : raw;
- logDebugInfo(`OpenCode response [${context}] status=${res.status} bodyLength=${raw.length}: ${truncated}`);
+ logDebugInfo(`OpenCode response [${context}] status=${res.status} bodyLength=${raw.length}. Full body:\n${raw}`);
if (!raw || !raw.trim()) {
throw new Error(
`${context}: empty response body (status ${res.status}). The server may have returned nothing or closed the connection early.`
@@ -206,9 +189,8 @@ async function parseJsonResponse(res: Response, context: string): Promise
try {
return JSON.parse(raw) as T;
} catch (parseError) {
- const snippet = raw.length > 200 ? `${raw.slice(0, 200)}...` : raw;
const err = new Error(
- `${context}: invalid JSON (status ${res.status}). Body snippet: ${snippet}`
+ `${context}: invalid JSON (status ${res.status}). Body length: ${raw.length} chars. See debug log for full body.`
);
if (parseError instanceof Error && 'cause' in err) (err as Error & { cause: unknown }).cause = parseError;
throw err;
@@ -225,27 +207,27 @@ function extractReasoningFromParts(parts: unknown): string {
return extractPartsByType(parts, 'reasoning', '\n\n');
}
-/** Max length of per-part text preview in debug log (to avoid huge log lines). */
-const OPENCODE_PART_PREVIEW_LEN = 80;
-
/**
- * Build a short summary of OpenCode message parts for debug logs (types, text lengths, and short preview).
+ * Log OpenCode message parts: summary line and full text of each part (no truncation).
*/
-function summarizePartsForLog(parts: unknown[], context: string): string {
+function logPartsForDebug(parts: unknown[], context: string): void {
if (!Array.isArray(parts) || parts.length === 0) {
- return `${context}: 0 parts`;
+ logDebugInfo(`${context}: 0 parts`);
+ return;
}
- const items = (parts as Array<{ type?: string; text?: string }>).map((p, i) => {
+ const summary = (parts as Array<{ type?: string; text?: string }>).map((p, i) => {
+ const type = p?.type ?? '(missing type)';
+ const len = typeof p?.text === 'string' ? p.text.length : 0;
+ return `[${i}] type=${type} length=${len}`;
+ }).join(' | ');
+ logDebugInfo(`${context}: ${parts.length} part(s) — ${summary}`);
+ (parts as Array<{ type?: string; text?: string }>).forEach((p, i) => {
const type = p?.type ?? '(missing type)';
const text = typeof p?.text === 'string' ? p.text : '';
- const len = text.length;
- const preview =
- len > OPENCODE_PART_PREVIEW_LEN
- ? `${text.slice(0, OPENCODE_PART_PREVIEW_LEN).replace(/\n/g, ' ')}...`
- : text.replace(/\n/g, ' ');
- return `[${i}] type=${type} length=${len}${preview ? ` preview=${JSON.stringify(preview)}` : ''}`;
+ if (text) {
+ logDebugInfo(`OpenCode part [${i}] type=${type} full text:\n${text}`);
+ }
});
- return `${context}: ${parts.length} part(s) — ${items.join(' | ')}`;
}
/** Default OpenCode agent for analysis/planning (read-only, no file edits). */
@@ -331,8 +313,8 @@ async function opencodeMessageWithAgentRaw(
logInfo(
`OpenCode request [agent ${options.agent}] model=${options.providerID}/${options.modelID} promptLength=${options.promptText.length}`
);
- logInfo(`OpenCode sending prompt (preview): ${truncate(options.promptText, OPENCODE_PROMPT_LOG_PREVIEW_LEN)}`);
- logDebugInfo(`OpenCode prompt (full): ${truncate(options.promptText, OPENCODE_PROMPT_LOG_FULL_LEN)}`);
+ logInfo(`OpenCode sending prompt (full):\n${options.promptText}`);
+ logDebugInfo(`OpenCode prompt (full, no truncation):\n${options.promptText}`);
logDebugInfo(
`OpenCode message body: agent=${options.agent}, model=${options.providerID}/${options.modelID}, parts[0].text length=${options.promptText.length}`
);
@@ -382,7 +364,7 @@ async function opencodeMessageWithAgentRaw(
);
const parts = messageData?.parts ?? messageData?.data?.parts ?? [];
const partsArray = Array.isArray(parts) ? parts : [];
- logDebugInfo(summarizePartsForLog(partsArray, `OpenCode agent "${options.agent}" message parts`));
+ logPartsForDebug(partsArray, `OpenCode agent "${options.agent}" message parts`);
const text = extractTextFromParts(partsArray);
logInfo(
`OpenCode response [agent ${options.agent}] responseLength=${text.length} sessionId=${sessionId}`
@@ -467,9 +449,8 @@ export class AiRepository {
if (!text) throw new Error('Empty response text');
const reasoning = options.includeReasoning ? extractReasoningFromParts(parts) : '';
if (options.expectJson && options.schema) {
- const maxLogLen = 5000000;
- const toLog = text.length > maxLogLen ? `${text.slice(0, maxLogLen)}\n... [truncated, total ${text.length} chars]` : text;
- logInfo(`OpenCode agent response (full text, expectJson=true) length=${text.length}:\n${toLog}`);
+ logInfo(`OpenCode agent response (expectJson=true) length=${text.length}`);
+ logDebugInfo(`OpenCode agent response (full text, no truncation) length=${text.length}:\n${text}`);
const parsed = parseJsonFromAgentText(text);
if (options.includeReasoning && reasoning) {
return { ...parsed, reasoning };
diff --git a/src/data/repository/issue_repository.ts b/src/data/repository/issue_repository.ts
index fd264305..a0446c29 100644
--- a/src/data/repository/issue_repository.ts
+++ b/src/data/repository/issue_repository.ts
@@ -1,5 +1,6 @@
import * as core from "@actions/core";
import * as github from "@actions/github";
+import { getCommentWatermark } from "../../utils/comment_watermark";
import { logDebugInfo, logError } from "../../utils/logger";
import { Labels } from "../model/labels";
import { Milestone } from "../model/milestone";
@@ -503,13 +504,18 @@ export class IssueRepository {
issueNumber: number,
comment: string,
token: string,
+ options?: { commitSha?: string },
) => {
+ const watermark = getCommentWatermark(
+ options?.commitSha ? { commitSha: options.commitSha, owner, repo: repository } : undefined
+ );
+ const body = `${comment}\n\n${watermark}`;
const octokit = github.getOctokit(token);
await octokit.rest.issues.createComment({
owner: owner,
repo: repository,
issue_number: issueNumber,
- body: comment,
+ body,
});
logDebugInfo(`Comment added to Issue ${issueNumber}.`);
@@ -522,13 +528,18 @@ export class IssueRepository {
commentId: number,
comment: string,
token: string,
+ options?: { commitSha?: string },
) => {
+ const watermark = getCommentWatermark(
+ options?.commitSha ? { commitSha: options.commitSha, owner, repo: repository } : undefined
+ );
+ const body = `${comment}\n\n${watermark}`;
const octokit = github.getOctokit(token);
await octokit.rest.issues.updateComment({
owner: owner,
repo: repository,
comment_id: commentId,
- body: comment,
+ body,
});
logDebugInfo(`Comment ${commentId} updated in Issue ${issueNumber}.`);
diff --git a/src/data/repository/merge_repository.ts b/src/data/repository/merge_repository.ts
index 5181bdf5..812341f5 100644
--- a/src/data/repository/merge_repository.ts
+++ b/src/data/repository/merge_repository.ts
@@ -3,8 +3,14 @@ import { logDebugInfo, logError } from '../../utils/logger';
import { Result } from '../model/result';
/**
- * Repository for merging branches (via PR or direct merge).
- * Isolated to allow unit tests with mocked Octokit.
+ * Repository for merging branches: creates a PR, waits for that PR's check runs (or status checks),
+ * then merges the PR; on failure, falls back to a direct Git merge.
+ *
+ * Check runs are filtered by PR (pull_requests) so we only wait for the current PR's checks,
+ * not those of another PR sharing the same head (e.g. release→main vs release→develop).
+ * If the PR has no check runs after a short wait, we proceed to merge (branch may have no required checks).
+ *
+ * @see docs/single-actions/deploy-label-and-merge.mdx for the deploy flow and check-wait behaviour.
*/
export class MergeRepository {
@@ -61,10 +67,13 @@ This PR merges **${head}** into **${base}**.
});
const iteration = 10;
+ /** Give workflows a short window to register check runs for this PR; after this, we allow merge with no check runs (e.g. branch has no required checks). */
+ const maxWaitForPrChecksAttempts = 3;
if (timeout > iteration) {
// Wait for checks to complete - can use regular token for reading checks
let checksCompleted = false;
let attempts = 0;
+ let waitForPrChecksAttempts = 0;
const maxAttempts = timeout > iteration ? Math.floor(timeout / iteration) : iteration;
while (!checksCompleted && attempts < maxAttempts) {
@@ -74,6 +83,14 @@ This PR merges **${head}** into **${base}**.
ref: head,
});
+ // Only consider check runs that are for this PR. When the same branch is used in
+ // multiple PRs (e.g. release→master and release→develop), listForRef returns runs
+ // for all PRs; we must wait for runs tied to the current PR or we may see completed
+ // runs from the other PR and merge before this PR's checks have run.
+ const runsForThisPr = (checkRuns.check_runs as Array<{ status: string; conclusion: string | null; name: string; pull_requests?: Array<{ number: number }> }>).filter(
+ run => run.pull_requests?.some(pr => pr.number === pullRequest.number),
+ );
+
// Get commit status checks for the PR head commit
const { data: commitStatus } = await octokit.rest.repos.getCombinedStatusForRef({
owner: owner,
@@ -82,11 +99,11 @@ This PR merges **${head}** into **${base}**.
});
logDebugInfo(`Combined status state: ${commitStatus.state}`);
- logDebugInfo(`Number of check runs: ${checkRuns.check_runs.length}`);
+ logDebugInfo(`Number of check runs for this PR: ${runsForThisPr.length} (total on ref: ${checkRuns.check_runs.length})`);
- // If there are check runs, prioritize those over status checks
- if (checkRuns.check_runs.length > 0) {
- const pendingCheckRuns = checkRuns.check_runs.filter(
+ // If there are check runs for this PR, wait for them to complete
+ if (runsForThisPr.length > 0) {
+ const pendingCheckRuns = runsForThisPr.filter(
check => check.status !== 'completed',
);
@@ -95,7 +112,7 @@ This PR merges **${head}** into **${base}**.
logDebugInfo('All check runs have completed.');
// Verify if all checks passed
- const failedChecks = checkRuns.check_runs.filter(
+ const failedChecks = runsForThisPr.filter(
check => check.conclusion === 'failure',
);
@@ -111,6 +128,39 @@ This PR merges **${head}** into **${base}**.
attempts++;
continue;
}
+ } else if (checkRuns.check_runs.length > 0 && runsForThisPr.length === 0) {
+ // There are runs on the ref but none for this PR. Either workflows for this PR
+ // haven't registered yet, or this PR/base has no required checks.
+ waitForPrChecksAttempts++;
+ if (waitForPrChecksAttempts >= maxWaitForPrChecksAttempts) {
+ // Give up waiting for PR-specific check runs; fall back to status checks
+ // before proceeding to merge (PR may have required status checks).
+ const pendingChecksFallback = commitStatus.statuses.filter(status => {
+ logDebugInfo(`Status check (fallback): ${status.context} (State: ${status.state})`);
+ return status.state === 'pending';
+ });
+
+ if (pendingChecksFallback.length === 0) {
+ checksCompleted = true;
+ logDebugInfo(
+ `No check runs for this PR after ${maxWaitForPrChecksAttempts} polls; no pending status checks; proceeding to merge.`,
+ );
+ } else {
+ logDebugInfo(
+ `No check runs for this PR after ${maxWaitForPrChecksAttempts} polls; falling back to status checks. Waiting for ${pendingChecksFallback.length} status checks to complete.`,
+ );
+ pendingChecksFallback.forEach(check => {
+ logDebugInfo(` - ${check.context} (State: ${check.state})`);
+ });
+ await new Promise(resolve => setTimeout(resolve, iteration * 1000));
+ attempts++;
+ }
+ } else {
+ logDebugInfo('Check runs exist on ref but none for this PR yet; waiting for workflows to register.');
+ await new Promise(resolve => setTimeout(resolve, iteration * 1000));
+ attempts++;
+ }
+ continue;
} else {
// Fall back to status checks if no check runs exist
const pendingChecks = commitStatus.statuses.filter(status => {
diff --git a/src/manager/description/base/__tests__/content_interface.test.ts b/src/manager/description/base/__tests__/content_interface.test.ts
new file mode 100644
index 00000000..ca95800d
--- /dev/null
+++ b/src/manager/description/base/__tests__/content_interface.test.ts
@@ -0,0 +1,179 @@
+import { ContentInterface } from '../content_interface';
+
+jest.mock('../../../../utils/logger', () => ({
+ logError: jest.fn(),
+}));
+
+/** Concrete implementation for testing the abstract ContentInterface. */
+class TestContent extends ContentInterface {
+ constructor(
+ public readonly testId: string,
+ public readonly testVisible: boolean,
+ ) {
+ super();
+ }
+ get id(): string {
+ return this.testId;
+ }
+ get visibleContent(): boolean {
+ return this.testVisible;
+ }
+}
+
+describe('ContentInterface', () => {
+ describe('visibleContent: true (HTML comment style)', () => {
+ const handler = new TestContent('foo', true);
+ const start = '';
+ const end = '';
+
+ describe('getContent', () => {
+ it('returns undefined when description is undefined', () => {
+ expect(handler.getContent(undefined)).toBeUndefined();
+ });
+
+ it('returns undefined when start pattern is missing', () => {
+ const desc = `pre\n${end}\npost`;
+ expect(handler.getContent(desc)).toBeUndefined();
+ });
+
+ it('returns undefined when end pattern is missing', () => {
+ const desc = `pre\n${start}\nmid`;
+ expect(handler.getContent(desc)).toBeUndefined();
+ });
+
+ it('returns content between start and end patterns', () => {
+ const desc = `pre\n${start}\ninner\n${end}\npost`;
+ expect(handler.getContent(desc)).toBe('\ninner\n');
+ });
+
+ it('returns first block when multiple blocks exist', () => {
+ const desc = `${start}\nfirst\n${end}\n${start}\nsecond\n${end}`;
+ expect(handler.getContent(desc)).toBe('\nfirst\n');
+ });
+
+ it('returns undefined when only start tag is present', () => {
+ const desc = `pre\n${start}\norphan`;
+ expect(handler.getContent(desc)).toBeUndefined();
+ });
+
+ it('logs and rethrows when extraction throws', () => {
+ const desc = `pre\n${start}\ninner\n${end}\npost`;
+ const originalSplit = String.prototype.split;
+ (jest.spyOn(String.prototype, 'split') as jest.Mock).mockImplementation(
+ function (this: string, separator: unknown, limit?: number) {
+ if (separator === start) {
+ return ['only-one-element'];
+ }
+ return (originalSplit as (sep: string, limit?: number) => string[]).call(
+ this,
+ separator as string,
+ limit,
+ );
+ },
+ );
+ const { logError } = require('../../../../utils/logger');
+
+ expect(() => handler.getContent(desc)).toThrow();
+ expect(logError).toHaveBeenCalledWith(
+ expect.stringMatching(/Error reading issue configuration/),
+ );
+
+ (String.prototype.split as jest.Mock).mockRestore();
+ });
+ });
+
+ describe('updateContent', () => {
+ it('returns undefined when description is undefined', () => {
+ expect(handler.updateContent(undefined, 'x')).toBeUndefined();
+ });
+
+ it('returns undefined when content is undefined', () => {
+ expect(handler.updateContent('body', undefined)).toBeUndefined();
+ });
+
+ it('appends new block when no existing block', () => {
+ const desc = 'some body';
+ const result = handler.updateContent(desc, 'new');
+ expect(result).toBe(`some body\n\n${start}\nnew\n${end}`);
+ });
+
+ it('replaces existing block when block exists', () => {
+ const desc = `pre\n${start}\nold\n${end}\npost`;
+ const result = handler.updateContent(desc, 'new');
+ expect(result).toBe(`pre\n${start}\nnew\n${end}\npost`);
+ });
+
+ it('returns undefined when only start tag exists (cannot add, update fails)', () => {
+ const desc = `pre\n${start}\norphan`;
+ const result = handler.updateContent(desc, 'new');
+ expect(result).toBeUndefined();
+ });
+
+ it('logs and returns undefined when update throws', () => {
+ const desc = `pre\n${start}\nold\n${end}\npost`;
+ const originalSplit = String.prototype.split;
+ (jest.spyOn(String.prototype, 'split') as jest.Mock).mockImplementation(
+ function (this: string, separator: unknown, limit?: number) {
+ if (separator === start) {
+ throw new Error('split failed');
+ }
+ return (originalSplit as (sep: string, limit?: number) => string[]).call(
+ this,
+ separator as string,
+ limit,
+ );
+ },
+ );
+ const { logError } = require('../../../../utils/logger');
+
+ const result = handler.updateContent(desc, 'new');
+
+ expect(result).toBeUndefined();
+ expect(logError).toHaveBeenCalledWith(
+ expect.stringMatching(/Error updating issue description/),
+ );
+
+ (String.prototype.split as jest.Mock).mockRestore();
+ });
+ });
+ });
+
+ describe('visibleContent: false (hidden comment style)', () => {
+ const handler = new TestContent('config', false);
+ const start = '';
+
+ describe('getContent', () => {
+ it('returns undefined when description is undefined', () => {
+ expect(handler.getContent(undefined)).toBeUndefined();
+ });
+
+ it('returns undefined when start pattern is missing', () => {
+ expect(handler.getContent(`pre\n${end}\npost`)).toBeUndefined();
+ });
+
+ it('returns undefined when end pattern is missing', () => {
+ expect(handler.getContent(`pre\n${start}\nmid`)).toBeUndefined();
+ });
+
+ it('returns content between start and end patterns', () => {
+ const desc = `pre\n${start}\n{"x":1}\n${end}\npost`;
+ expect(handler.getContent(desc)).toBe('\n{"x":1}\n');
+ });
+ });
+
+ describe('updateContent', () => {
+ it('appends new block when no existing block', () => {
+ const desc = 'body';
+ const result = handler.updateContent(desc, 'data');
+ expect(result).toBe(`body\n\n${start}\ndata\n${end}`);
+ });
+
+ it('replaces existing block when block exists', () => {
+ const desc = `pre\n${start}\nold\n${end}\npost`;
+ const result = handler.updateContent(desc, 'new');
+ expect(result).toBe(`pre\n${start}\nnew\n${end}\npost`);
+ });
+ });
+ });
+});
diff --git a/src/manager/description/base/__tests__/issue_content_interface.test.ts b/src/manager/description/base/__tests__/issue_content_interface.test.ts
new file mode 100644
index 00000000..8dac3c9a
--- /dev/null
+++ b/src/manager/description/base/__tests__/issue_content_interface.test.ts
@@ -0,0 +1,281 @@
+import type { Execution } from '../../../../data/model/execution';
+import { IssueContentInterface } from '../issue_content_interface';
+
+jest.mock('../../../../utils/logger', () => ({
+ logError: jest.fn(),
+}));
+
+const mockGetDescription = jest.fn();
+const mockUpdateDescription = jest.fn();
+
+jest.mock('../../../../data/repository/issue_repository', () => ({
+ IssueRepository: jest.fn().mockImplementation(() => ({
+ getDescription: mockGetDescription,
+ updateDescription: mockUpdateDescription,
+ })),
+}));
+
+/** Concrete implementation for testing IssueContentInterface. */
+class TestIssueContent extends IssueContentInterface {
+ get id(): string {
+ return 'test-block';
+ }
+ get visibleContent(): boolean {
+ return false;
+ }
+}
+
+const START = '';
+
+function descriptionWithBlock(body: string): string {
+ return `pre\n${START}\n${body}\n${END}\npost`;
+}
+
+function minimalExecution(overrides: Record = {}): Execution {
+ return {
+ owner: 'o',
+ repo: 'r',
+ tokens: { token: 't' },
+ isIssue: true,
+ isPullRequest: false,
+ isPush: false,
+ isSingleAction: false,
+ issue: { number: 42 },
+ pullRequest: { number: 99 },
+ issueNumber: 42,
+ singleAction: { issue: 123, isIssue: false, isPullRequest: false, isPush: false },
+ ...overrides,
+ } as unknown as Execution;
+}
+
+describe('IssueContentInterface', () => {
+ let handler: TestIssueContent;
+
+ beforeEach(() => {
+ jest.clearAllMocks();
+ handler = new TestIssueContent();
+ });
+
+ describe('internalGetter', () => {
+ it('uses issue.number when isIssue and not single action', async () => {
+ mockGetDescription.mockResolvedValue(descriptionWithBlock('data'));
+ const execution = minimalExecution({ isIssue: true, isSingleAction: false });
+
+ const result = await handler.internalGetter(execution);
+
+ expect(mockGetDescription).toHaveBeenCalledWith('o', 'r', 42, 't');
+ expect(result).toBe('\ndata\n');
+ });
+
+ it('uses pullRequest.number when isPullRequest and not single action', async () => {
+ mockGetDescription.mockResolvedValue(descriptionWithBlock('pr-data'));
+ const execution = minimalExecution({
+ isIssue: false,
+ isPullRequest: true,
+ isSingleAction: false,
+ });
+
+ const result = await handler.internalGetter(execution);
+
+ expect(mockGetDescription).toHaveBeenCalledWith('o', 'r', 99, 't');
+ expect(result).toBe('\npr-data\n');
+ });
+
+ it('uses issueNumber when isPush', async () => {
+ mockGetDescription.mockResolvedValue(descriptionWithBlock('push-data'));
+ const execution = minimalExecution({
+ isIssue: false,
+ isPullRequest: false,
+ isPush: true,
+ issueNumber: 7,
+ });
+
+ const result = await handler.internalGetter(execution);
+
+ expect(mockGetDescription).toHaveBeenCalledWith('o', 'r', 7, 't');
+ expect(result).toBe('\npush-data\n');
+ });
+
+ it('uses issueNumber when isSingleAction', async () => {
+ mockGetDescription.mockResolvedValue(descriptionWithBlock('single'));
+ const execution = minimalExecution({
+ isSingleAction: true,
+ issueNumber: 5,
+ });
+
+ const result = await handler.internalGetter(execution);
+
+ expect(mockGetDescription).toHaveBeenCalledWith('o', 'r', 5, 't');
+ expect(result).toBe('\nsingle\n');
+ });
+
+ it('returns undefined when execution is not issue, PR, push or single action', async () => {
+ const execution = minimalExecution({
+ isIssue: false,
+ isPullRequest: false,
+ isPush: false,
+ isSingleAction: false,
+ });
+
+ const result = await handler.internalGetter(execution);
+
+ expect(mockGetDescription).not.toHaveBeenCalled();
+ expect(result).toBeUndefined();
+ });
+
+ it('returns undefined when getContent finds no block', async () => {
+ mockGetDescription.mockResolvedValue('no block here');
+ const execution = minimalExecution();
+
+ const result = await handler.internalGetter(execution);
+
+ expect(result).toBeUndefined();
+ });
+
+ it('throws when getDescription rejects', async () => {
+ mockGetDescription.mockRejectedValue(new Error('api error'));
+ const execution = minimalExecution();
+
+ await expect(handler.internalGetter(execution)).rejects.toThrow('api error');
+ });
+ });
+
+ describe('internalUpdate', () => {
+ it('fetches description, updates content and calls updateDescription when isIssue', async () => {
+ const desc = descriptionWithBlock('old');
+ mockGetDescription.mockResolvedValue(desc);
+ mockUpdateDescription.mockResolvedValue(undefined);
+ const execution = minimalExecution({ isIssue: true, isSingleAction: false });
+
+ const result = await handler.internalUpdate(execution, 'new');
+
+ expect(mockGetDescription).toHaveBeenCalledWith('o', 'r', 42, 't');
+ expect(result).toContain('\nnew\n');
+ expect(mockUpdateDescription).toHaveBeenCalledWith('o', 'r', 42, expect.any(String), 't');
+ });
+
+ it('uses pullRequest.number when isPullRequest', async () => {
+ mockGetDescription.mockResolvedValue(descriptionWithBlock('x'));
+ mockUpdateDescription.mockResolvedValue(undefined);
+ const execution = minimalExecution({
+ isIssue: false,
+ isPullRequest: true,
+ isSingleAction: false,
+ });
+
+ await handler.internalUpdate(execution, 'y');
+
+ expect(mockGetDescription).toHaveBeenCalledWith('o', 'r', 99, 't');
+ expect(mockUpdateDescription).toHaveBeenCalledWith('o', 'r', 99, expect.any(String), 't');
+ });
+
+ it('uses issueNumber when isPush', async () => {
+ mockGetDescription.mockResolvedValue(descriptionWithBlock('a'));
+ mockUpdateDescription.mockResolvedValue(undefined);
+ const execution = minimalExecution({
+ isIssue: false,
+ isPullRequest: false,
+ isPush: true,
+ issueNumber: 11,
+ });
+
+ await handler.internalUpdate(execution, 'b');
+
+ expect(mockGetDescription).toHaveBeenCalledWith('o', 'r', 11, 't');
+ expect(mockUpdateDescription).toHaveBeenCalledWith('o', 'r', 11, expect.any(String), 't');
+ });
+
+ it('when isSingleAction and isIssue uses issue.number', async () => {
+ mockGetDescription.mockResolvedValue(descriptionWithBlock('c'));
+ mockUpdateDescription.mockResolvedValue(undefined);
+ const execution = minimalExecution({
+ isSingleAction: true,
+ isIssue: true,
+ issue: { number: 88 },
+ });
+
+ await handler.internalUpdate(execution, 'd');
+
+ expect(mockGetDescription).toHaveBeenCalledWith('o', 'r', 88, 't');
+ });
+
+ it('when isSingleAction and isPullRequest uses pullRequest.number', async () => {
+ mockGetDescription.mockResolvedValue(descriptionWithBlock('e'));
+ mockUpdateDescription.mockResolvedValue(undefined);
+ const execution = minimalExecution({
+ isSingleAction: true,
+ isIssue: false,
+ isPullRequest: true,
+ pullRequest: { number: 77 },
+ });
+
+ await handler.internalUpdate(execution, 'f');
+
+ expect(mockGetDescription).toHaveBeenCalledWith('o', 'r', 77, 't');
+ });
+
+ it('when isSingleAction and isPush uses issueNumber', async () => {
+ mockGetDescription.mockResolvedValue(descriptionWithBlock('g'));
+ mockUpdateDescription.mockResolvedValue(undefined);
+ const execution = minimalExecution({
+ isSingleAction: true,
+ isIssue: false,
+ isPullRequest: false,
+ isPush: true,
+ issueNumber: 33,
+ });
+
+ await handler.internalUpdate(execution, 'h');
+
+ expect(mockGetDescription).toHaveBeenCalledWith('o', 'r', 33, 't');
+ });
+
+ it('when isSingleAction and not issue/PR/push uses singleAction.issue', async () => {
+ mockGetDescription.mockResolvedValue(descriptionWithBlock('i'));
+ mockUpdateDescription.mockResolvedValue(undefined);
+ const execution = minimalExecution({
+ isSingleAction: true,
+ isIssue: false,
+ isPullRequest: false,
+ isPush: false,
+ singleAction: { issue: 999, isIssue: false, isPullRequest: false, isPush: false },
+ });
+
+ await handler.internalUpdate(execution, 'j');
+
+ expect(mockGetDescription).toHaveBeenCalledWith('o', 'r', 999, 't');
+ });
+
+ it('returns undefined when execution is not issue, PR, push or single action', async () => {
+ const execution = minimalExecution({
+ isIssue: false,
+ isPullRequest: false,
+ isPush: false,
+ isSingleAction: false,
+ });
+
+ const result = await handler.internalUpdate(execution, 'content');
+
+ expect(mockGetDescription).not.toHaveBeenCalled();
+ expect(result).toBeUndefined();
+ });
+
+ it('returns undefined when updateContent returns undefined', async () => {
+ mockGetDescription.mockResolvedValue('only start