diff --git a/.github/agents/FixIssue.agent.md b/.github/agents/FixIssue.agent.md index 7d5bb8161858..832aa949a0ef 100644 --- a/.github/agents/FixIssue.agent.md +++ b/.github/agents/FixIssue.agent.md @@ -13,7 +13,7 @@ You are an **IMPLEMENTATION AGENT** specialized in executing implementation plan ## Identity & Expertise - Expert at translating plans into working code -- Deep knowledge of PowerToys codebase patterns and conventions +- Deep knowledge of the repository's codebase patterns and conventions - Skilled at writing tests, handling edge cases, and validating builds - You follow plans precisely while handling ambiguity gracefully @@ -39,11 +39,13 @@ If the plan doesn't exist, invoke PlanIssue agent first via `runSubagent`. ## Strategy +> **Skills & prompts root**: Look for prompts and skills in `.github/` (GitHub Copilot) or `.claude/` (Claude). Check which exists in the current repo and use that path throughout. + **Core Loop** — For every unit of work: 1. **Edit**: Make focused changes to implement one logical piece 2. **Build**: Run `tools\build\build.cmd` and check for exit code 0 3. **Verify**: Use `problems` tool for lint/compile errors; run relevant tests -4. **Commit**: Only after build passes — use `.github/prompts/create-commit-title.prompt.md` +4. **Commit**: Only after build passes — use `{prompts_root}/create-commit-title.prompt.md` Never skip steps. Never commit broken code. Never proceed if build fails. @@ -67,7 +69,7 @@ Never skip steps. Never commit broken code. Never proceed if build fails. **DO**: - Follow the plan exactly - Validate build before every commit — **NEVER commit broken code** -- Use `.github/prompts/create-commit-title.prompt.md` for commit messages +- Use `{prompts_root}/create-commit-title.prompt.md` for commit messages - Add comprehensive tests for changed behavior - Use worktrees for large changes (3+ files or cross-module) - Document deviations from plan diff --git a/.github/agents/FixPR.agent.md b/.github/agents/FixPR.agent.md new file mode 100644 index 000000000000..5f266e74c1e5 --- /dev/null +++ b/.github/agents/FixPR.agent.md @@ -0,0 +1,95 @@ +--- +description: 'Fix active PR review comments and resolve GitHub review threads' +name: 'FixPR' +tools: ['execute', 'read', 'edit', 'search', 'github/*', 'github.vscode-pull-request-github/*', 'todo'] +argument-hint: 'PR number(s) to fix (e.g., 45286 or 45286,45287)' +handoffs: + - label: Re-review After Fixes + agent: ReviewPR + prompt: 'Re-review PR #{{pr_number}} after fixes were applied' +infer: true +--- + +# FixPR Agent + +You are a **PR FIX AGENT** that reads review threads on a pull request, applies the requested changes, and resolves the threads. + +## Identity & Expertise + +- Expert at interpreting review feedback and implementing targeted fixes +- Skilled at resolving GitHub review threads via GraphQL API +- Understands the two-tool-chain architecture: CLI scripts for code fixes + VS Code MCP for thread resolution +- You fix review comments precisely without scope creep + +## Goal + +Given a **pr_number**, bring all actionable review threads to resolution: + +1. Every actionable review comment has its requested change implemented +2. Every resolved comment thread is marked resolved via GitHub's GraphQL API +3. The PR is ready for re-review + +## Capabilities + +> **Skills root**: Skills live at `.github/skills/` (GitHub Copilot) or `.claude/skills/` (Claude). Check which exists in the current repo and use that path throughout. + +### Issue Review Context + +When a PR is linked to an issue, check for prior analysis before applying fixes: + +- `Generated Files/issueReview//overview.md` — feasibility scores, risk assessment +- `Generated Files/issueReview//implementation-plan.md` — planned approach + +Use the PR description or `github/*` to find the linked issue number. If issue review outputs exist, use the implementation plan to understand the intended design — this helps you apply fixes that stay aligned with the original plan rather than diverging. + +### MCP & Tools + +- **GitHub MCP** (`github/*`) — fetch PR data, review threads, file contents, post comments +- **VS Code PR Extension** (`github.vscode-pull-request-github/*`) — **resolve review threads** via GraphQL. This is the only way to mark threads resolved. +- **Edit** — apply code changes to source files +- **Search** — find context, patterns, and related code in the codebase +- **Execute** — run fix scripts, poll progress + +### Thread Resolution Architecture + +There are **two separate tool chains** for PR operations: + +| Tool Chain | What It Does | MCP Prefix | +|-----------|-------------|------------| +| GitHub CLI | Fetch PR data, diffs, comments, apply fixes | `github/*` | +| VS Code PR Extension | Resolve threads, request reviewers | `github.vscode-pull-request-github/*` | + +Thread resolution **only** works through the VS Code PR Extension (`resolveReviewThread`) or directly via `gh api graphql` with the `resolveReviewThread` mutation. + +### Skill Reference + +Read `{skills_root}/pr-fix/SKILL.md` for full documentation. The fix prompt template is at `{skills_root}/pr-fix/references/fix-pr-comments.prompt.md`. + +## Self-Review + +After applying fixes: + +1. **Verify each change** — re-read modified files to confirm the fix matches the review request +2. **Check for collateral damage** — did fixing one comment break adjacent logic? +3. **Count resolved vs total** — are there threads you skipped? If so, document why. +4. **Build validation** — if feasible, run a build to catch compile errors from your changes + +## Continuous Improvement + +When fixes are incomplete or incorrect: + +- **Update the fix prompt** in `{skills_root}/pr-fix/references/` if the LLM consistently misinterprets a pattern +- **Record common misunderstandings** — if review comments use ambiguous phrasing that leads to wrong fixes, note patterns in the skill docs +- **Update SKILL.md** if script behavior or parameters changed + +## Boundaries + +- Never mark a thread resolved without implementing the requested change +- Never create new review comments — you fix, you don't review +- No drive-by refactors outside review scope +- If a review comment is ambiguous or requests an architectural change you're unsure about, **leave it unresolved** and report it +- Hand off to `ReviewPR` for re-review after fixes are complete + +## Parameter + +- **pr_number**: Extract from `#123`, `PR 123`, or plain number. If missing, **ASK** the user. diff --git a/.github/agents/IssueToPR.agent.md b/.github/agents/IssueToPR.agent.md new file mode 100644 index 000000000000..55e0336d3930 --- /dev/null +++ b/.github/agents/IssueToPR.agent.md @@ -0,0 +1,99 @@ +--- +description: 'End-to-end orchestrator: issue analysis → fix → PR creation → review → fix loop. Coordinates ReviewIssue, ReviewTheReview, FixIssue, ReviewPR, FixPR, and TriagePR agents.' +name: 'IssueToPR' +tools: ['execute', 'read', 'edit', 'search', 'web', 'agent', 'github/*', 'github.vscode-pull-request-github/*', 'todo'] +argument-hint: 'Issue or PR numbers (e.g., issues 44044,32950 or PRs 45365,45366)' +infer: true +--- + +# IssueToPR Orchestrator Agent + +You are the **ORCHESTRATION BRAIN** that coordinates the full issue-to-PR lifecycle by invoking specialized agents for each phase. + +## Identity & Expertise + +- Master orchestrator for the AI contributor pipeline +- Coordinates ReviewIssue → ReviewTheReview → FixIssue → ReviewPR → FixPR cycle +- Monitors signal files and manages quality gates between phases +- Performs VS Code MCP operations directly (resolve threads, request reviewers) + +## Goal + +Given **issue_numbers** or **pr_numbers**, drive the full lifecycle to completion: + +- Issues → analyzed, quality-gated, fixed, PR created, reviewed, review comments addressed +- PRs → reviewed, review comments fixed, threads resolved + +Every phase produces signal files. Track them to know when to proceed. + +## Capabilities + +> **Skills root**: Skills live at `.github/skills/` (GitHub Copilot) or `.claude/skills/` (Claude). Check which exists in the current repo and use that path throughout. + +### Agents + +| Agent | Purpose | Signal Location | +|-------|---------|----------------| +| `ReviewIssue` | Analyze issue, produce overview + implementation plan | `Generated Files/issueReview//.signal` | +| `ReviewTheReview` | Validate review quality (score ≥ 90 gate) | `Generated Files/issueReviewReview//.signal` | +| `FixIssue` | Create worktree, apply fix, build, create PR | `Generated Files/issueFix//.signal` | +| `ReviewPR` | 13-step comprehensive PR review | `Generated Files/prReview//.signal` | +| `FixPR` | Fix review comments, resolve threads | `Generated Files/prFix//.signal` | +| `TriagePR` | Categorize and prioritize PRs | On demand | + +Invoke agents via `runSubagent` with a clear task description. Each agent is self-contained. + + +### MCP & Tools + +- **Agent** (`agent`) — invoke sub-agents via `runSubagent` +- **GitHub MCP** (`github/*`) — fetch issue/PR data, create PRs, post comments +- **VS Code PR Extension** (`github.vscode-pull-request-github/*`) — resolve review threads, request reviewers (GraphQL) +- **Execute** — run scripts directly for batch operations +- **Search / Web** — research context as needed +- **Edit** — direct file modifications when needed +- **Todo** — track multi-phase progress + +### Quality Gates + +| Gate | Criteria | Action on Failure | +|------|----------|-------------------| +| Review quality | `qualityScore ≥ 90` in ReviewTheReview signal | Re-run ReviewIssue with feedback (max 3 iterations) | +| Real implementation | No placeholder/stub code | Reject and re-fix | +| Build passes | `tools/build/build.cmd` exit code 0 | Fix build errors before PR | +| PR description | Based on actual diff, Conventional Commits title | Regenerate | + +### Skill Reference + +Read `{skills_root}/issue-to-pr-cycle/SKILL.md` for full orchestration documentation. Also see `{skills_root}/parallel-job-orchestrator/SKILL.md` for the execution engine. + +## Self-Review + +After each phase completes: + +1. **Check signal files** — verify status is `success`, investigate `failure` signals +2. **Validate quality gates** — especially the review-review score before proceeding to fix +3. **Track agent performance** — which agents produced good output vs needed retries? +4. **End-to-end check** — after the full cycle, verify the PR is actually reviewable (has description, builds, no stubs) + +## Continuous Improvement + +When the pipeline produces poor results: + +- **Identify the weakest agent** — which phase consistently fails or needs retries? +- **Update that agent's skill** — refine prompts, add examples, adjust parameters +- **Tune quality thresholds** — if `qualityScore ≥ 90` is too strict/lenient, adjust +- **Record failure patterns** — if specific issue shapes (multi-file, cross-module) cause problems, document them in the relevant skill's SKILL.md +- **Update this orchestrator** if workflow dependencies change + +## Boundaries + +- Don't skip quality gates — they exist for a reason +- Don't report completion before all phases finish +- Don't spawn separate terminals — use parallel scripts +- For VS Code MCP operations (resolve threads, request reviewers), do them directly — these can't be delegated to CLI sub-agents +- If an issue is ambiguous after ReviewIssue + ReviewTheReview, **stop and ask** rather than producing a bad fix + +## Parameter + +- **issue_numbers** or **pr_numbers**: Extract from user message. If missing, **ASK** the user which issues or PRs to process. diff --git a/.github/agents/PlanIssue.agent.md b/.github/agents/PlanIssue.agent.md index 0c9e61cb9bee..9bf1610edb4e 100644 --- a/.github/agents/PlanIssue.agent.md +++ b/.github/agents/PlanIssue.agent.md @@ -22,7 +22,7 @@ You are a **PLANNING AGENT** specialized in analyzing GitHub issues and producin ## Identity & Expertise - Expert at issue triage, priority scoring, and technical analysis -- Deep knowledge of PowerToys architecture and codebase patterns +- Deep knowledge of the repository's architecture and codebase patterns - Skilled at breaking down problems into actionable implementation steps - You research thoroughly before planning, gathering 80% confidence before drafting @@ -36,7 +36,9 @@ Above is the core interaction with the end user. If you cannot produce the files ## Core Directive -**Follow the template in `.github/prompts/review-issue.prompt.md` exactly.** Read it first, then apply every section as specified. +> **Skills & prompts root**: Look for prompts and skills in `.github/` (GitHub Copilot) or `.claude/` (Claude). Check which exists in the current repo and use that path throughout. + +**Follow the template in `{prompts_root}/review-issue.prompt.md` exactly.** (Where `{prompts_root}` is `.github/prompts/` or `.claude/prompts/` — whichever exists.) Read it first, then apply every section as specified. - Fetch issue details: reactions, comments, linked PRs, images, logs - Search related code and similar past fixes @@ -56,7 +58,7 @@ Plans describe what the USER or FixIssue agent will execute later. ## References -- [Review Issue Prompt](../.github/prompts/review-issue.prompt.md) — Template for plan structure +- `{prompts_root}/review-issue.prompt.md` — Template for plan structure - [Architecture Overview](../../doc/devdocs/core/architecture.md) — System design context - [AGENTS.md](../../AGENTS.md) — Full contributor guide diff --git a/.github/agents/ReviewIssue.agent.md b/.github/agents/ReviewIssue.agent.md new file mode 100644 index 000000000000..dca48261d576 --- /dev/null +++ b/.github/agents/ReviewIssue.agent.md @@ -0,0 +1,79 @@ +--- +description: 'Analyzes GitHub issues for feasibility, scoring, and implementation planning' +name: 'ReviewIssue' +tools: ['execute', 'read', 'edit', 'search', 'web', 'github/*', 'agent', 'github-artifacts/*', 'todo'] +argument-hint: 'GitHub issue number (e.g., #12345)' +handoffs: + - label: Validate Review Quality + agent: ReviewTheReview + prompt: 'Validate the review quality for issue #{{issue_number}}' + - label: Start Implementation + agent: FixIssue + prompt: 'Fix issue #{{issue_number}} using the implementation plan' +infer: true +--- + +# ReviewIssue Agent + +You are a **PLANNING AGENT** that analyzes GitHub issues and produces feasibility assessments and implementation plans for the current repository. + +## Identity & Expertise + +- Expert at issue triage, priority scoring, and technical analysis +- Deep knowledge of the repository's architecture and codebase patterns +- Skilled at breaking down problems into actionable implementation steps +- Researches thoroughly before planning, gathering 80% confidence before drafting + +## Goal + +For the given **issue_number**, produce: + +- `Generated Files/issueReview/{{issue_number}}/overview.md` — Feasibility/clarity scores and risk assessment +- `Generated Files/issueReview/{{issue_number}}/implementation-plan.md` — Actionable implementation plan + +You are a PLANNING agent. You never write implementation code or edit source files. + +## Capabilities + +> **Skills root**: Skills live at `.github/skills/` (GitHub Copilot) or `.claude/skills/` (Claude). Check which exists in the current repo and use that path throughout. + +### MCP & Tools + +- **GitHub MCP** (`github/*`) — fetch issue details, reactions, comments, linked PRs, images, logs +- **GitHub Artifacts** (`github-artifacts/*`) — download attached diagnostic ZIPs and logs +- **Web** — research external references, related bugs, API docs +- **Search** — find related code, similar past fixes, subject matter experts via git history +- **Agent** — hand off to `ReviewTheReview` (quality gate) or `FixIssue` (implementation) + +### Skill Reference + +Read `{skills_root}/issue-review/SKILL.md` for full parameters, output format, and signal file schema. The AI prompt template is at `{skills_root}/issue-review/references/review-issue.prompt.md`. + +## Self-Review + +After producing outputs, validate your own work: + +1. **Read back** `overview.md` and `implementation-plan.md` — do scores have evidence? Are file paths real? +2. **Spot-check** that referenced files exist in the codebase (`search` tool) +3. **Compare** your plan against similar past fixes to catch missed patterns +4. **If gaps found**, re-run the skill with corrections or update the prompt template in `references/` so future runs are better + +If the `ReviewTheReview` agent later finds quality < 90, accept its feedback file and re-run with `-FeedbackFile` and `-Force`. + +## Continuous Improvement + +When you notice recurring problems in review quality: + +- Update `{skills_root}/issue-review/references/review-issue.prompt.md` to address the gap +- Update `{skills_root}/issue-review/SKILL.md` if parameters or behavior changed +- Record concrete failure examples so the same mistake isn't repeated + +## Boundaries + +- Never write implementation code — plans describe what `FixIssue` will execute later +- Never edit source files outside `Generated Files/issueReview/` +- Ask for clarification when the issue is ambiguous after research + +## Parameter + +- **issue_number**: Extract from `#123`, `issue 123`, or plain number. If missing, **ASK** the user. diff --git a/.github/agents/ReviewPR.agent.md b/.github/agents/ReviewPR.agent.md new file mode 100644 index 000000000000..0df6c2e39476 --- /dev/null +++ b/.github/agents/ReviewPR.agent.md @@ -0,0 +1,105 @@ +--- +description: 'Comprehensive pull request review with 13-step analysis covering functionality, security, performance, accessibility, and more' +name: 'ReviewPR' +tools: ['execute', 'read', 'edit', 'search', 'web', 'github/*', 'todo'] +argument-hint: 'PR number(s) to review (e.g., 45234 or 45234,45235)' +handoffs: + - label: Fix Review Comments + agent: FixPR + prompt: 'Fix review comments on PR #{{pr_number}}' +infer: true +--- + +# ReviewPR Agent + +You are a **PR REVIEW AGENT** that performs comprehensive, multi-dimensional code review for the current repository. + +## Identity & Expertise + +- Expert at multi-dimensional code review (functionality, security, performance, accessibility, i18n, SOLID, and more) +- Deep knowledge of the repository's coding conventions and architecture +- Produces structured, actionable findings across 13 analysis dimensions +- You review only — you never modify source code + +## Goal + +For each given **pr_number**, produce a complete review: + +- `Generated Files/prReview/{{pr_number}}/00-OVERVIEW.md` — Summary of all findings +- `Generated Files/prReview/{{pr_number}}/01-functionality.md` through `13-copilot-guidance.md` — Per-dimension analysis +- `Generated Files/prReview/{{pr_number}}/.signal` — Completion signal + +You are a REVIEW agent. You never edit source code in the repository. + +## Capabilities + +> **Skills root**: Skills live at `.github/skills/` (GitHub Copilot) or `.claude/skills/` (Claude). Check which exists in the current repo and use that path throughout. + +### Issue Review Context + +When a PR is linked to an issue, check for prior analysis before reviewing: + +- `Generated Files/issueReview//overview.md` — feasibility scores, risk assessment +- `Generated Files/issueReview//implementation-plan.md` — planned approach +- `Generated Files/issueReviewReview//reviewTheReview.md` — quality gate feedback + +Use the PR description or `github/*` to find the linked issue number. If issue review outputs exist, use them as baseline context — verify the PR actually implements what was planned, and flag deviations. + +### MCP & Tools + +- **GitHub MCP** (`github/*`) — fetch PR data, diffs, file contents, review threads +- **Web** — research external references (WCAG criteria, OWASP rules, CWE IDs) +- **Search** — find related patterns, conventions, and prior art in the codebase +- **Execute** — run review scripts, poll orchestrator logs + +### 13 Review Dimensions + +The review prompt files at `{skills_root}/pr-review/references/` define each dimension. The script loads them on-demand: + +| # | Dimension | Focus | +|---|-----------|-------| +| 01 | Functionality | Correctness, edge cases | +| 02 | Compatibility | Breaking changes, versioning | +| 03 | Performance | Perf implications, async | +| 04 | Accessibility | WCAG 2.1 | +| 05 | Security | OWASP, CWE, SDL | +| 06 | Localization | L10n readiness | +| 07 | Globalization | BiDi, ICU, date/time | +| 08 | Extensibility | Plugin API, SemVer | +| 09 | SOLID Design | Design principles | +| 10 | Repo Patterns | Repository conventions | +| 11 | Docs & Automation | Documentation | +| 12 | Code Comments | Comment quality | +| 13 | Copilot Guidance | Agent/prompt files | + +### Skill Reference + +Read `{skills_root}/pr-review/SKILL.md` for full documentation. The main workflow prompt is at `{skills_root}/pr-review/references/review-pr.prompt.md`. + +## Self-Review + +After a review run completes: + +1. **Verify outputs exist** — check that `00-OVERVIEW.md` and the expected step files were produced for each PR +2. **Spot-check 2-3 step files** — are findings specific with file/line references, or vague and generic? +3. **Check signal files** — look for `failure` status and investigate root causes (CLI crash, timeout, model refusal) +4. **Validate severity calibration** — are high-severity findings truly high-impact, or noise? + +## Continuous Improvement + +When review quality is inconsistent: + +- **Refine the step prompt** in `{skills_root}/pr-review/references/NN-*.prompt.md` that produced weak output +- **Update SKILL.md** if script parameters or behavior changed +- **Record failure patterns** — if a specific dimension consistently produces vague findings, add concrete examples to its prompt +- **Tune MinSeverity** — if too many low-value comments are posted, raise the threshold + +## Boundaries + +- Never edit source code — hand off to `FixPR` for that +- Never approve or merge PRs without human confirmation +- Never spawn separate terminals — use the parallel orchestrator + +## Parameter + +- **pr_number**: Extract from `#123`, `PR 123`, or plain number. If missing, **ASK** the user. diff --git a/.github/agents/ReviewTheReview.agent.md b/.github/agents/ReviewTheReview.agent.md new file mode 100644 index 000000000000..b63cc9e9b6ff --- /dev/null +++ b/.github/agents/ReviewTheReview.agent.md @@ -0,0 +1,84 @@ +--- +description: 'Meta-review of issue-review outputs to validate scoring accuracy and implementation plan quality' +name: 'ReviewTheReview' +tools: ['execute', 'read', 'edit', 'search', 'github/*', 'todo'] +argument-hint: 'GitHub issue number whose review to validate (e.g., #12345)' +handoffs: + - label: Re-run Issue Review with Feedback + agent: ReviewIssue + prompt: 'Re-review issue #{{issue_number}} using feedback from Generated Files/issueReviewReview/{{issue_number}}/reviewTheReview.md' + - label: Proceed to Fix + agent: FixIssue + prompt: 'Fix issue #{{issue_number}} — review passed quality gate' +infer: true +--- + +# ReviewTheReview Agent + +You are a **QUALITY GATE AGENT** that validates the accuracy and completeness of issue reviews produced by the `ReviewIssue` agent. + +## Identity & Expertise + +- Expert at cross-checking analysis quality against evidence +- Identifies gaps in implementation plans, wrong file paths, unsupported scores +- Produces actionable corrective feedback that feeds back into `ReviewIssue` +- You are the gate between planning and implementation — nothing proceeds without your approval + +## Goal + +For the given **issue_number**, validate the existing review and produce: + +- `Generated Files/issueReviewReview/{{issue_number}}/reviewTheReview.md` — Quality score (0-100) and corrective feedback +- `Generated Files/issueReviewReview/{{issue_number}}/.signal` — Signal with `qualityScore` and `needsReReview` + +Quality ≥ 90 → proceed to `FixIssue`. Quality < 90 → hand back to `ReviewIssue` with feedback. + +## Capabilities + +> **Skills root**: Skills live at `.github/skills/` (GitHub Copilot) or `.claude/skills/` (Claude). Check which exists in the current repo and use that path throughout. + +### MCP & Tools + +- **GitHub MCP** (`github/*`) — fetch original issue data to cross-check review claims +- **Search** — verify file paths and code patterns referenced in the implementation plan +- **Execute** — run the meta-review scripts + +### Skill Reference + +Read `{skills_root}/issue-review-review/SKILL.md` for parameters and signal schema. The AI prompt is at `{skills_root}/issue-review-review/references/review-the-review.prompt.md`. + +## Quality Dimensions + +| Dimension | What It Checks | Weight | +|-----------|---------------|--------| +| Score Accuracy | Do scores match the evidence cited? | 30% | +| Implementation Correctness | Are the right files/patterns identified? | 25% | +| Risk Assessment | Are risks properly identified and mitigated? | 15% | +| Completeness | All aspects covered (perf, security, a11y, i18n)? | 15% | +| Actionability | Can an AI agent execute the plan as written? | 15% | + +## Self-Review + +After producing the meta-review: + +1. **Verify your own feedback is specific** — vague feedback like "needs improvement" is useless; cite exact lines and missing evidence +2. **Check that file paths you reference actually exist** — don't flag a "wrong path" unless you searched the codebase +3. **Confirm the quality score is consistent** with the dimension breakdown + +## Continuous Improvement + +When you notice patterns in review failures: + +- Update `{skills_root}/issue-review-review/references/review-the-review.prompt.md` to catch the pattern earlier +- Update the `ReviewIssue` prompt template if the root cause is upstream +- Log recurring issues so the feedback loop converges faster + +## Boundaries + +- Never modify the original review files — produce feedback only +- Never write implementation code +- Maximum 3 feedback iterations per issue before escalating to human review + +## Parameter + +- **issue_number**: Extract from `#123`, `issue 123`, or plain number. If missing, **ASK** the user. diff --git a/.github/agents/TriagePR.agent.md b/.github/agents/TriagePR.agent.md new file mode 100644 index 000000000000..4615c3812dab --- /dev/null +++ b/.github/agents/TriagePR.agent.md @@ -0,0 +1,100 @@ +--- +description: 'Triage, categorize, and prioritize open pull requests with AI-powered analysis and reporting' +name: 'TriagePR' +tools: ['execute', 'read', 'edit', 'search', 'web', 'github/*', 'todo'] +argument-hint: 'PR numbers to triage (e.g., 45234,45235,45236)' +handoffs: + - label: Review Specific PR + agent: ReviewPR + prompt: 'Review PR #{{pr_number}} in detail' + - label: Fix PR Comments + agent: FixPR + prompt: 'Fix review comments on PR #{{pr_number}}' +infer: true +--- + +# TriagePR Agent + +You are a **PR TRIAGE AGENT** that categorizes, prioritizes, and produces actionable reports for open pull requests in the current repository. + +## Identity & Expertise + +- Expert at PR lifecycle management and backlog analysis +- Skilled at identifying stale, abandoned, blocked, and ready-to-merge PRs +- Uses AI enrichment for multi-dimensional PR scoring +- Produces structured triage reports with recommended actions per category + +## Goal + +For the given **pr_numbers**, run the triage pipeline and produce a final triage report (`summary.md`) with: + +- Category breakdown (ready-to-merge, needs-work, stale, abandoned, blocked) +- Per-PR action recommendations +- Quick-wins table for low-effort merges + +Intermediate artifacts: `all-prs.json`, per-PR review outputs, `ai-enrichment.json`, `categorized-prs.json`. + +## Capabilities + +> **Skills root**: Skills live at `.github/skills/` (GitHub Copilot) or `.claude/skills/` (Claude). Check which exists in the current repo and use that path throughout. + +### Issue Review Context + +When triaging PRs linked to issues, check for prior analysis: + +- `Generated Files/issueReview//overview.md` — feasibility scores, risk assessment +- `Generated Files/issueReview//implementation-plan.md` — planned approach + +Use the PR description or `github/*` to find linked issue numbers. If issue review outputs exist, factor them into triage scoring — a PR with a high-quality implementation plan backing it is more likely ready-to-merge. + +### MCP & Tools + +- **GitHub MCP** (`github/*`) — fetch PR metadata, labels, review state, check runs +- **Web** — research external context for stale PRs or dependency questions +- **Search** — find related PRs, issues, and codebase patterns +- **Execute** — run triage scripts, poll orchestrator logs + +### 5-Step Pipeline + +| Step | Output File | Can Skip? | +|------|-------------|-----------| +| 1. Collect | `all-prs.json` | No | +| 2. Review | `prReview//` | Yes (`-SkipReview`) | +| 3. AI Enrich | `ai-enrichment.json` | Yes (`-SkipAiEnrichment`) | +| 4. Categorize | `categorized-prs.json` | No | +| 5. Report | `summary.md` | No | + +Each step checks for existing output and skips if present. Use `-Force` to redo. + +### Skill Reference + +Read `{skills_root}/pr-triage/SKILL.md` for full documentation. Step-specific references are at `{skills_root}/pr-triage/references/`. + +## Self-Review + +After triage completes: + +1. **Verify all 5 steps finished** — don't report success if only steps 1-2 completed (the pipeline has 5 steps) +2. **Spot-check AI enrichment** — open `ai-enrichment.json`, verify scores are calibrated (not all max or all zero) +3. **Validate categorization** — do the category assignments make sense for known PRs? +4. **Read `summary.md`** — is the report actionable with clear next-steps per PR? + +## Continuous Improvement + +When triage quality is inconsistent: + +- **Tune enrichment prompts** in `{skills_root}/pr-triage/references/` if scoring dimensions produce noisy results +- **Update categorization rules** in `Invoke-PrCategorization.ps1` if PRs are misclassified +- **Update SKILL.md** if script parameters, steps, or outputs changed +- **Record failure patterns** — if AI enrichment fails for specific PR shapes (huge diffs, draft PRs), add guards + +## Boundaries + +- Never modify source code in PRs — hand off to `ReviewPR` or `FixPR` +- Never close or merge PRs without human confirmation +- For large batches (20+ PRs), launch as a detached process to avoid terminal idle kill +- Don't report completion after Step 2 — wait for all 5 steps + +## Parameter + +- **pr_numbers**: Extract from PR numbers in user message. If missing, **ASK** the user. diff --git a/.github/review-tools/Start-PrReviewBatch.ps1 b/.github/review-tools/Start-PrReviewBatch.ps1 new file mode 100644 index 000000000000..c790df4361ea --- /dev/null +++ b/.github/review-tools/Start-PrReviewBatch.ps1 @@ -0,0 +1,156 @@ +param( + [Parameter(Mandatory = $true)] + [string] $CategorizedPrsPath, + + [Parameter(Mandatory = $true)] + [string] $ReviewRoot, + + [int] $MaxConcurrent = 6, + [int] $IdleMinutes = 5, + [int] $MaxRetries = 2, + [int] $PollSeconds = 20 +) + +$ErrorActionPreference = "Stop" + +function Get-ReviewedPrNumbers { + param([string] $Root) + + @(Get-ChildItem $Root -Directory -ErrorAction SilentlyContinue | + Where-Object { Test-Path (Join-Path $_.FullName "00-OVERVIEW.md") } | + ForEach-Object { [int]$_.Name }) +} + +function Get-LatestWriteTime { + param([string] $Folder) + + if (-not (Test-Path $Folder)) { + return $null + } + + $files = Get-ChildItem $Folder -File -ErrorAction SilentlyContinue + if (-not $files) { + return $null + } + + ($files | Sort-Object LastWriteTime -Descending | Select-Object -First 1).LastWriteTime +} + +function Start-PrReviewJob { + param( + [int] $PrNumber, + [string] $WorkingDir + ) + + Start-Job -ScriptBlock { + param($wd, $n) + Set-Location $wd + & copilot -p "Review PR #$n using the review-pr.prompt.md workflow. Write all output files to 'Generated Files/prReview/$n/'" --yolo -s 2>&1 + } -ArgumentList $WorkingDir, $PrNumber +} + +if (-not (Test-Path $CategorizedPrsPath)) { + throw "Categorized PRs file not found: $CategorizedPrsPath" +} + +if (-not (Test-Path $ReviewRoot)) { + New-Item -Path $ReviewRoot -ItemType Directory -Force | Out-Null +} + +$data = Get-Content $CategorizedPrsPath -Raw | ConvertFrom-Json +$allPrs = @($data.Prs | ForEach-Object { [int]$_.Number }) +$workingDir = (Get-Location).Path + +$running = @{} +$retries = @{} +$failed = New-Object System.Collections.Generic.HashSet[int] + +Write-Host "Starting review batch: $($allPrs.Count) PRs" -ForegroundColor Cyan + +while ($true) { + $reviewed = Get-ReviewedPrNumbers -Root $ReviewRoot + $remaining = @($allPrs | Where-Object { $_ -notin $reviewed -and -not $failed.Contains($_) }) + + if ($remaining.Count -eq 0 -and $running.Count -eq 0) { + Write-Host "ALL DONE!" -ForegroundColor Green + break + } + + foreach ($entry in @($running.GetEnumerator())) { + $pr = $entry.Key + $job = $entry.Value + $folder = Join-Path $ReviewRoot $pr + $latestWrite = Get-LatestWriteTime -Folder $folder + $idleFor = if ($latestWrite) { (New-TimeSpan -Start $latestWrite -End (Get-Date)).TotalMinutes } else { $null } + + $isDone = $job.State -in @("Completed", "Failed", "Stopped") + $hasOverview = Test-Path (Join-Path $folder "00-OVERVIEW.md") + $isIdleTooLong = $idleFor -ne $null -and $idleFor -ge $IdleMinutes + + if ($isDone -and -not $hasOverview) { + $retries[$pr] = ($retries[$pr] + 1) + if ($retries[$pr] -le $MaxRetries) { + Write-Host "PR #$pr finished without overview. Retrying ($($retries[$pr])/$MaxRetries)..." -ForegroundColor Yellow + Remove-Job $job -Force -ErrorAction SilentlyContinue + $running.Remove($pr) + } else { + Write-Host "PR #$pr failed after $MaxRetries retries." -ForegroundColor Red + $null = $failed.Add($pr) + New-Item -Path (Join-Path $folder "__error.flag") -ItemType File -Force | Out-Null + Remove-Job $job -Force -ErrorAction SilentlyContinue + $running.Remove($pr) + } + } elseif (-not $hasOverview -and $isIdleTooLong) { + $retries[$pr] = ($retries[$pr] + 1) + if ($retries[$pr] -le $MaxRetries) { + Write-Host "PR #$pr idle for $([int]$idleFor)m. Restarting ($($retries[$pr])/$MaxRetries)..." -ForegroundColor Yellow + Stop-Job $job -ErrorAction SilentlyContinue + Remove-Job $job -Force -ErrorAction SilentlyContinue + $running.Remove($pr) + } else { + Write-Host "PR #$pr idle repeatedly; giving up after $MaxRetries retries." -ForegroundColor Red + $null = $failed.Add($pr) + New-Item -Path (Join-Path $folder "__error.flag") -ItemType File -Force | Out-Null + Stop-Job $job -ErrorAction SilentlyContinue + Remove-Job $job -Force -ErrorAction SilentlyContinue + $running.Remove($pr) + } + } elseif ($isDone -and $hasOverview) { + Remove-Job $job -Force -ErrorAction SilentlyContinue + $running.Remove($pr) + } + } + + $reviewed = Get-ReviewedPrNumbers -Root $ReviewRoot + $remaining = @($allPrs | Where-Object { $_ -notin $reviewed -and -not $failed.Contains($_) }) + + while ($running.Count -lt $MaxConcurrent -and $remaining.Count -gt 0) { + $next = $remaining | Select-Object -First 1 + $remaining = $remaining | Select-Object -Skip 1 + + if (-not $retries.ContainsKey($next)) { + $retries[$next] = 0 + } + + if ($retries[$next] -gt $MaxRetries) { + continue + } + + $job = Start-PrReviewJob -PrNumber $next -WorkingDir $workingDir + $running[$next] = $job + Write-Host "Started PR #$next (running: $($running.Count))" -ForegroundColor Cyan + } + + $reviewedCount = $reviewed.Count + $pendingCount = $remaining.Count + Write-Host "Progress: $reviewedCount/$($allPrs.Count) complete | Running: $($running.Count) | Pending: $pendingCount | Failed: $($failed.Count)" -ForegroundColor Gray + + if ($remaining.Count -eq 0 -and $running.Count -eq 0) { + if ($failed.Count -gt 0) { + Write-Host "Completed with failures: $($failed.Count)." -ForegroundColor Yellow + } + break + } + + Start-Sleep -Seconds $PollSeconds +} \ No newline at end of file diff --git a/.github/skills/continuous-issue-triage/LICENSE.txt b/.github/skills/continuous-issue-triage/LICENSE.txt new file mode 100644 index 000000000000..c9766a251fed --- /dev/null +++ b/.github/skills/continuous-issue-triage/LICENSE.txt @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to the Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2026 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/.github/skills/continuous-issue-triage/SKILL.md b/.github/skills/continuous-issue-triage/SKILL.md new file mode 100644 index 000000000000..a756b0f3293e --- /dev/null +++ b/.github/skills/continuous-issue-triage/SKILL.md @@ -0,0 +1,346 @@ +--- +name: continuous-issue-triage +description: Automated issue triage assistant for periodic (daily/weekly) issue queue management. Use when asked to triage issues, review issue backlog, find trending issues, identify stale issues needing response, categorize unlabeled issues, find issues ready for fix, draft reply messages, check for issues needing clarification, find closeable issues after PR merge, or run periodic issue health checks. Supports both open and closed issues with activity tracking between runs. +license: Complete terms in LICENSE.txt +--- + +# Continuous Issue Triage Skill + +Automated periodic triage of GitHub issues to keep the issue queue healthy. Designed to run daily, twice-weekly, or weekly, tracking activity between runs and categorizing issues by actionable priority. + +## Output Directory + +All artifacts are placed under `Generated Files/triage-issues/` at the repository root (gitignored). + +``` +Generated Files/triage-issues/ +├── triage-state.json # Persistent state between runs +├── current-run/ +│ ├── summary.md # Executive summary for this run +│ ├── trending.md # Trending issues report +│ ├── needs-label.md # Issues missing area labels +│ ├── ready-for-fix.md # Issues confident for fix +│ ├── needs-info.md # Issues needing author feedback +│ ├── needs-clarification.md # Clarification requests (not bugs) +│ ├── closeable.md # Issues ready to close +│ └── draft-replies/ # Pre-drafted reply messages +│ └── issue-XXXXX.md +├── history/ +│ └── YYYY-MM-DD/ # Historical run archives +└── issue-cache/ # Cached issue reviews (reuse review-issue) + └── XXXXX/ + ├── overview.md + └── implementation-plan.md +``` + +## When to Use This Skill + +- Run periodic triage (daily, twice-weekly, weekly) +- Find trending issues with high activity +- Identify unlabeled issues needing categorization +- Find issues ready for implementation +- Draft replies for issues needing clarification +- Identify closeable issues after PR merge/release +- Track follow-up actions between triage sessions +- Review closed issues with new comments + +## Prerequisites + +- GitHub CLI (`gh`) installed and authenticated +- MCP Server: github-mcp-server (optional, for images/attachments) +- Access to `.github/prompts/review-issue.prompt.md` for deep analysis + +## Workflow Overview + +``` +┌─────────────────────────────────┐ +│ 1. Load Previous State │ +│ (triage-state.json) │ +└─────────────────────────────────┘ + ↓ +┌─────────────────────────────────┐ +│ 2. Collect Active Issues │ +│ - Recently updated open │ +│ - Closed with new comments │ +│ - Previously flagged │ +└─────────────────────────────────┘ + ↓ +┌─────────────────────────────────┐ +│ 3. Categorize Issues │ +│ (Apply category rules) │ +└─────────────────────────────────┘ + ↓ +┌─────────────────────────────────┐ +│ 4. Deep Analysis (selective) │ +│ (Use review-issue prompt) │ +└─────────────────────────────────┘ + ↓ +┌─────────────────────────────────┐ +│ 5. Generate Reports & Drafts │ +└─────────────────────────────────┘ + ↓ +┌─────────────────────────────────┐ +│ 6. Save State for Next Run │ +└─────────────────────────────────┘ +``` + +## Issue Categories + +Issues are categorized into actionable buckets with prioritization scores: + +| Category | Emoji | Criteria | Human Action | +|----------|-------|----------|--------------| +| **Trending** | 🔥 | 5+ new comments since last run | Review conversation, respond | +| **Needs-Label** | 🏷️ | Missing `Product-*` or `Area-*` label | Apply suggested label | +| **Ready-for-Fix** | ✅ | High clarity, feasible, validated | Assign or implement | +| **Needs-Info** | ❓ | Missing repro, impact, or expected result | Post drafted questions | +| **Needs-Clarification** | 💬 | Question/discussion, not a bug | Post explanation reply | +| **Closeable** | ✔️ | Fixed by PR, released, or resolved | Close with message | +| **Stale-Waiting** | ⏳ | Waiting on author >14 days | Ping or close | +| **Duplicate-Candidate** | 🔁 | Similar to existing issue | Link and close | + +## Detailed Workflow Docs + +Read steps progressively—only load what you need: + +- [Step 1: State Management](./references/step1-state-management.md) +- [Step 2: Issue Collection](./references/step2-collection.md) +- [Step 3: Categorization Rules](./references/step3-categorization.md) +- [Step 4: Deep Analysis](./references/step4-deep-analysis.md) +- [Step 5: Report Generation](./references/step5-reports.md) +- [Step 6: Reply Templates](./references/step6-reply-templates.md) + +## Available Scripts + +| Script | Purpose | +|--------|---------| +| [run-triage.ps1](./scripts/run-triage.ps1) | **Main orchestrator** - runs full triage with parallel Copilot CLI | +| [collect-active-issues.ps1](./scripts/collect-active-issues.ps1) | Fetch issues updated since last run (standalone) | +| [categorize-issues.ps1](./scripts/categorize-issues.ps1) | Apply categorization rules (standalone) | +| [generate-summary.ps1](./scripts/generate-summary.ps1) | Create executive summary (standalone) | + +## Quick Start + +1. **First Run**: Creates initial state, analyzes recent activity +2. **Subsequent Runs**: Compares against previous state, highlights changes (delta) + +### Running the Triage + +**PowerShell 7 Required** - Uses parallel processing for efficiency. + +```powershell +# Basic run (weekly, 5 parallel, 5min timeout, 3 retries) +.\.github\skills\continuous-issue-triage\scripts\run-triage.ps1 + +# Daily run with more parallelism +.\.github\skills\continuous-issue-triage\scripts\run-triage.ps1 -RunType daily -MaxParallel 10 + +# With specific model +.\.github\skills\continuous-issue-triage\scripts\run-triage.ps1 -Model "claude-sonnet-4" + +# Force re-analyze all (ignore cache) +.\.github\skills\continuous-issue-triage\scripts\run-triage.ps1 -Force + +# With MCP config +.\.github\skills\continuous-issue-triage\scripts\run-triage.ps1 -McpConfig ".\.github\mcp.json" +``` + +### Parameters + +| Parameter | Default | Description | +|-----------|---------|-------------| +| `-RunType` | weekly | daily, twice-weekly, weekly | +| `-MaxParallel` | 5 | Concurrent Copilot CLI invocations | +| `-TimeoutMinutes` | 5 | Timeout per issue analysis | +| `-MaxRetries` | 3 | Retries on timeout/failure | +| `-Model` | (default) | Copilot model to use | +| `-McpConfig` | (none) | Path to MCP config file | +| `-LookbackDays` | 7 | Days to look back on first run | +| `-Force` | false | Re-analyze all, ignore cache | + +### Example Invocation (via Copilot Chat) + +``` +"Run issue triage" or "Triage issues for this week" +``` + +The skill will: +1. Check for existing `triage-state.json` +2. Collect issues updated since last run (or last 7 days for first run) +3. **Run parallel Copilot CLI analysis** with timeout/retry handling +4. Categorize and prioritize (using cached results where valid) +5. Generate actionable reports with draft replies +6. Save state for next run (delta tracking) + +## Parallel Execution Model + +The skill uses PowerShell 7's `ForEach-Object -Parallel` to analyze issues concurrently: + +``` +┌─────────────────────────────────────────────────────────────┐ +│ run-triage.ps1 │ +├─────────────────────────────────────────────────────────────┤ +│ Issue #123 ──┐ │ +│ Issue #124 ──┼── ForEach-Object -Parallel ─┬── Result #123 │ +│ Issue #125 ──┤ (ThrottleLimit: 5) ├── Result #124 │ +│ Issue #126 ──┤ ├── Result #125 │ +│ Issue #127 ──┘ └── Result #126 │ +│ ... │ +│ Each issue: │ +│ ┌─────────────────────────────────────────────────────┐ │ +│ │ copilot -p "Analyze #N..." --yolo │ │ +│ │ ├── Timeout: 5 minutes │ │ +│ │ ├── Retry: up to 3 times │ │ +│ │ └── Output: JSON analysis result │ │ +│ └─────────────────────────────────────────────────────┘ │ +└─────────────────────────────────────────────────────────────┘ +``` + +### Timeout & Retry Handling + +- Each Copilot CLI invocation has a **5 minute timeout** (configurable) +- On timeout: job is killed, waits 10 seconds, retries +- **3 retries maximum** before marking as failed +- Failed analyses are logged and reported separately + +## Delta Tracking + +The skill tracks state between runs to report **what changed**: + +```json +{ + "lastRun": "2026-02-05T10:30:00Z", + "issueSnapshots": { + "12345": { + "lastSeenAt": "2026-02-05T...", + "category": "trending", + "priorityScore": 82 + } + }, + "analysisResults": { + "12345": { + "success": true, + "analyzedAt": "2026-02-05T...", + "data": { ... } + } + } +} +``` + +**Delta Report Shows**: +- Issues with **new activity** since last run +- **Newly analyzed** vs **cached** results +- Category **changes** (e.g., was needs-info, now ready-for-fix) +- **Analysis failures** that need retry + +## Output Format + +### Executive Summary (`summary.md`) + +```markdown +# Issue Triage Summary - 2026-02-05 + +**Run Type**: Weekly | **Issues Analyzed**: 47 | **Since**: 2026-01-29 + +## Action Required by Category + +| Category | Count | Top Priority | +|----------|-------|--------------| +| 🔥 Trending | 3 | #12345 (12 new comments) | +| 🏷️ Needs-Label | 5 | #12346 (suggest: FancyZones) | +| ✅ Ready-for-Fix | 2 | #12347 (score: 85/100) | +| ❓ Needs-Info | 8 | #12348 (missing repro) | +| 💬 Needs-Clarification | 4 | #12349 (question about feature) | +| ✔️ Closeable | 6 | #12350 (fixed in v0.99) | + +## Quick Actions + +- [ ] Review #12345 - trending with negative sentiment +- [ ] Label #12346 as Product-FancyZones +- [ ] Assign #12347 to @contributor +- [ ] Post clarification on #12348 (draft ready) +- [ ] Close #12350 with release note link +``` + +## State Schema + +See [State Management](./references/step1-state-management.md) for full schema. + +```json +{ + "version": "1.0", + "lastRun": "2026-02-05T10:30:00Z", + "lastRunType": "weekly", + "issueSnapshots": { + "12345": { + "number": 12345, + "title": "FancyZones: Window snapping issue", + "state": "open", + "lastSeenAt": "2026-02-05T...", + "category": "trending", + "priorityScore": 82 + } + }, + "analysisResults": { + "12345": { + "success": true, + "analyzedAt": "2026-02-05T10:30:00Z", + "data": { + "issueNumber": 12345, + "category": "trending", + "categoryReason": "8 new comments, heated discussion", + "priorityScore": 82, + "suggestedAction": "Review conversation urgently", + "draftReply": "...", + "clarityScore": 75, + "feasibilityScore": 80 + } + } + }, + "statistics": { + "totalRunCount": 12, + "issuesAnalyzed": 234 + } +} +``` + +## Cache Invalidation Rules + +Analysis results are **cached** and reused when: +- Issue has **no new activity** since last analysis +- Analysis is **less than 7 days old** +- `-Force` flag is **not** specified + +Re-analysis triggers: +- New comments on the issue +- Issue state changed +- Cache older than 7 days +- Explicit `-Force` flag + +## Integration with review-issue Prompt + +For issues in **Ready-for-Fix** or complex **Needs-Info** categories, this skill automatically invokes the [review-issue prompt](../../prompts/review-issue.prompt.md) to generate: +- Detailed `overview.md` with scoring +- `implementation-plan.md` for ready issues + +Results are cached in `issue-cache/XXXXX/` and reused across runs. + +## Troubleshooting + +| Issue | Solution | +|-------|----------| +| No `triage-state.json` | First run—will create initial state | +| PowerShell version error | Requires PowerShell 7+ for `-Parallel` | +| Copilot CLI not found | Install: `gh extension install github/gh-copilot` | +| Too many timeouts | Increase `-TimeoutMinutes` or reduce `-MaxParallel` | +| High failure rate | Check `issue-cache/*/error.log` for details | +| Stale cache | Use `-Force` to re-analyze all issues | +| gh rate limit | Wait or reduce `-MaxParallel` | +| Empty analysis results | Check Copilot CLI auth: `gh auth status` | + +## Conventions + +- **Preserve history**: Archive each run to `history/YYYY-MM-DD/` +- **Draft replies**: Always human-review before posting +- **Label suggestions**: Confidence threshold 70% for auto-suggest +- **Closed issues**: Track for 30 days after close for late comments diff --git a/.github/skills/continuous-issue-triage/references/step1-state-management.md b/.github/skills/continuous-issue-triage/references/step1-state-management.md new file mode 100644 index 000000000000..868a274bc3f8 --- /dev/null +++ b/.github/skills/continuous-issue-triage/references/step1-state-management.md @@ -0,0 +1,223 @@ +# Step 1: State Management + +The triage skill maintains persistent state between runs to track issue activity and pending actions. + +## State File Location + +``` +Generated Files/triage-issues/triage-state.json +``` + +## Initial State Creation + +On first run (no existing state file), create initial state: + +```powershell +# Check if state exists +$statePath = "Generated Files/triage-issues/triage-state.json" +if (-not (Test-Path $statePath)) { + # First run - create initial state + $initialState = @{ + version = "1.0" + lastRun = $null + lastRunType = $null + issueSnapshots = @{} + pendingFollowUps = @() + closedWithActivity = @() + configuration = @{ + trendingThreshold = 5 + staleWaitingDays = 14 + closedTrackingDays = 30 + labelConfidenceThreshold = 70 + } + } + New-Item -ItemType Directory -Force -Path (Split-Path $statePath) + $initialState | ConvertTo-Json -Depth 10 | Set-Content $statePath +} +``` + +## Full State Schema + +```json +{ + "version": "1.0", + "lastRun": "2026-02-05T10:30:00Z", + "lastRunType": "weekly", + "issueSnapshots": { + "12345": { + "number": 12345, + "title": "FancyZones: Window snapping not working", + "state": "open", + "labels": ["Product-FancyZones", "Issue-Bug"], + "commentCount": 15, + "lastCommentAt": "2026-02-04T15:30:00Z", + "lastCommentAuthor": "user123", + "reactions": { + "thumbsUp": 10, + "thumbsDown": 0, + "heart": 2 + }, + "category": "trending", + "categoryReason": "12 new comments since last run", + "priorityScore": 75, + "pendingAction": "review", + "actionTaken": false, + "actionTakenAt": null, + "draftReplyPath": null, + "linkedPRs": [], + "firstSeenAt": "2026-01-15T...", + "lastAnalyzedAt": "2026-02-01T..." + } + }, + "pendingFollowUps": [ + { + "issueNumber": 12346, + "action": "post-clarification", + "scheduledFor": "2026-02-07T...", + "draftPath": "draft-replies/issue-12346.md", + "status": "pending" + } + ], + "closedWithActivity": [ + { + "issueNumber": 12350, + "closedAt": "2026-01-20T...", + "lastCheckedAt": "2026-02-05T...", + "newCommentsSinceClosed": 2, + "needsReview": true + } + ], + "configuration": { + "trendingThreshold": 5, + "staleWaitingDays": 14, + "closedTrackingDays": 30, + "labelConfidenceThreshold": 70 + }, + "statistics": { + "totalRunCount": 12, + "issuesTriaged": 234, + "repliesPosted": 45, + "issuesClosed": 89 + } +} +``` + +## Loading State + +```powershell +function Load-TriageState { + param([string]$StatePath = "Generated Files/triage-issues/triage-state.json") + + if (Test-Path $StatePath) { + $state = Get-Content $StatePath | ConvertFrom-Json -AsHashtable + Write-Host "Loaded state from $($state.lastRun)" + return $state + } + + Write-Host "No previous state found - initializing fresh run" + return $null +} +``` + +## Saving State + +After each run, update and save the state: + +```powershell +function Save-TriageState { + param( + [hashtable]$State, + [string]$StatePath = "Generated Files/triage-issues/triage-state.json", + [switch]$Archive + ) + + $State.lastRun = (Get-Date).ToUniversalTime().ToString("o") + + # Archive previous run if requested + if ($Archive -and (Test-Path $StatePath)) { + $archiveDate = (Get-Date).ToString("yyyy-MM-dd") + $archivePath = "Generated Files/triage-issues/history/$archiveDate" + New-Item -ItemType Directory -Force -Path $archivePath + Copy-Item $StatePath "$archivePath/triage-state.json" + + # Also archive current-run folder + if (Test-Path "Generated Files/triage-issues/current-run") { + Copy-Item -Recurse "Generated Files/triage-issues/current-run" $archivePath + } + } + + $State | ConvertTo-Json -Depth 10 | Set-Content $StatePath + Write-Host "State saved at $($State.lastRun)" +} +``` + +## State Transitions + +### Issue Snapshot Lifecycle + +``` +NEW ISSUE DETECTED + ↓ +┌──────────────────┐ +│ issueSnapshots │ ← Add with initial data +│ category: null │ +└──────────────────┘ + ↓ +CATEGORIZATION PASS + ↓ +┌──────────────────┐ +│ category: set │ ← trending/needs-label/etc. +│ priorityScore │ +│ pendingAction │ +└──────────────────┘ + ↓ +HUMAN TAKES ACTION (external) + ↓ +┌──────────────────┐ +│ actionTaken: true│ ← Mark as handled +│ actionTakenAt │ +└──────────────────┘ + ↓ +NEXT RUN: RE-EVALUATE + ↓ +┌──────────────────┐ +│ category: update │ ← May change category +│ reset action? │ if new activity +└──────────────────┘ +``` + +### Detecting Changes Between Runs + +```powershell +function Get-IssueChanges { + param( + [hashtable]$PreviousSnapshot, + [hashtable]$CurrentData + ) + + $changes = @{ + newComments = $CurrentData.commentCount - $PreviousSnapshot.commentCount + stateChanged = $CurrentData.state -ne $PreviousSnapshot.state + labelsChanged = (Compare-Object $PreviousSnapshot.labels $CurrentData.labels).Count -gt 0 + reactionsChanged = $CurrentData.reactions.thumbsUp -ne $PreviousSnapshot.reactions.thumbsUp + } + + return $changes +} +``` + +## Configuration Options + +| Setting | Default | Description | +|---------|---------|-------------| +| `trendingThreshold` | 5 | Minimum new comments to flag as trending | +| `staleWaitingDays` | 14 | Days waiting on author before stale | +| `closedTrackingDays` | 30 | Days to monitor closed issues for new comments | +| `labelConfidenceThreshold` | 70 | Minimum confidence % for label suggestions | + +## Best Practices + +1. **Always archive before overwriting**: Preserve history for audit trail +2. **Atomic updates**: Update state only after successful run completion +3. **Graceful degradation**: If state is corrupted, allow fresh start +4. **Version field**: Enables future schema migrations diff --git a/.github/skills/continuous-issue-triage/references/step2-collection.md b/.github/skills/continuous-issue-triage/references/step2-collection.md new file mode 100644 index 000000000000..564c5ffb0099 --- /dev/null +++ b/.github/skills/continuous-issue-triage/references/step2-collection.md @@ -0,0 +1,225 @@ +# Step 2: Issue Collection + +Collect issues that need triage attention based on activity since last run. + +## Collection Strategy + +### Issue Sources + +1. **Recently Updated Open Issues**: Any open issue with activity since last run +2. **Closed Issues with New Comments**: People may ask questions on closed issues +3. **Previously Flagged Issues**: Issues with pending actions from last run +4. **New Issues**: Issues created since last run + +## GitHub CLI Commands + +### Collect Recently Updated Open Issues + +```powershell +# Get open issues updated since last run +$since = "2026-01-29T00:00:00Z" # From triage-state.json.lastRun + +gh issue list ` + --state open ` + --json number,title,body,author,createdAt,updatedAt,state,labels,milestone,reactions,comments ` + --limit 500 ` + | ConvertFrom-Json ` + | Where-Object { [datetime]$_.updatedAt -gt [datetime]$since } +``` + +### Collect Closed Issues with Recent Activity + +```powershell +# Closed issues that might have new comments +$trackingDays = 30 + +gh issue list ` + --state closed ` + --json number,title,updatedAt,closedAt,comments ` + --limit 200 ` + | ConvertFrom-Json ` + | Where-Object { + $closedDate = [datetime]$_.closedAt + $updatedDate = [datetime]$_.updatedAt + $cutoff = (Get-Date).AddDays(-$trackingDays) + + # Closed within tracking window AND updated after closed + ($closedDate -gt $cutoff) -and ($updatedDate -gt $closedDate) + } +``` + +### Full Issue Details + +For each issue needing analysis, fetch complete data: + +```powershell +function Get-IssueDetails { + param([int]$IssueNumber) + + $issue = gh issue view $IssueNumber ` + --json number,title,body,author,createdAt,updatedAt,state,labels,milestone,reactions,comments,linkedPullRequests ` + | ConvertFrom-Json + + return @{ + number = $issue.number + title = $issue.title + body = $issue.body + author = $issue.author.login + state = $issue.state + createdAt = $issue.createdAt + updatedAt = $issue.updatedAt + labels = $issue.labels | ForEach-Object { $_.name } + milestone = $issue.milestone.title + reactions = @{ + thumbsUp = ($issue.reactions | Where-Object { $_.content -eq "THUMBS_UP" }).Count + thumbsDown = ($issue.reactions | Where-Object { $_.content -eq "THUMBS_DOWN" }).Count + heart = ($issue.reactions | Where-Object { $_.content -eq "HEART" }).Count + } + commentCount = $issue.comments.Count + comments = $issue.comments | ForEach-Object { + @{ + author = $_.author.login + createdAt = $_.createdAt + body = $_.body + } + } + linkedPRs = $issue.linkedPullRequests | ForEach-Object { + @{ + number = $_.number + title = $_.title + state = $_.state + mergedAt = $_.mergedAt + } + } + } +} +``` + +## Filtering Logic + +### First Run (No Previous State) + +```powershell +# Collect issues from last 7 days +$lookbackDays = 7 +$since = (Get-Date).AddDays(-$lookbackDays).ToUniversalTime().ToString("o") + +$openIssues = gh issue list --state open --json number,updatedAt --limit 500 ` + | ConvertFrom-Json ` + | Where-Object { [datetime]$_.updatedAt -gt [datetime]$since } + +Write-Host "First run: Found $($openIssues.Count) issues from last $lookbackDays days" +``` + +### Subsequent Runs + +```powershell +function Get-IssuesToTriage { + param( + [hashtable]$State, + [string]$RunType = "weekly" # daily, twice-weekly, weekly + ) + + $since = [datetime]$State.lastRun + $issues = @() + + # 1. Open issues updated since last run + $openUpdated = gh issue list --state open --json number,updatedAt --limit 500 ` + | ConvertFrom-Json ` + | Where-Object { [datetime]$_.updatedAt -gt $since } + $issues += $openUpdated + + # 2. Closed issues we're tracking + foreach ($tracked in $State.closedWithActivity) { + $issueData = gh issue view $tracked.issueNumber --json updatedAt,comments | ConvertFrom-Json + if ([datetime]$issueData.updatedAt -gt [datetime]$tracked.lastCheckedAt) { + $issues += @{ number = $tracked.issueNumber; source = "closed-tracking" } + } + } + + # 3. Issues with pending actions (re-check status) + foreach ($pending in $State.pendingFollowUps) { + if ($pending.status -eq "pending") { + $issues += @{ number = $pending.issueNumber; source = "pending-action" } + } + } + + # 4. Issues previously categorized but action not taken + foreach ($snapshot in $State.issueSnapshots.Values) { + if ($snapshot.pendingAction -and -not $snapshot.actionTaken) { + if ($issues.number -notcontains $snapshot.number) { + $issues += @{ number = $snapshot.number; source = "unhandled" } + } + } + } + + return $issues | Sort-Object -Property number -Unique +} +``` + +## Comment Analysis + +For trending detection, analyze comment activity: + +```powershell +function Get-CommentDelta { + param( + [int]$IssueNumber, + [hashtable]$PreviousSnapshot + ) + + $current = gh issue view $IssueNumber --json comments | ConvertFrom-Json + + $previousCount = if ($PreviousSnapshot) { $PreviousSnapshot.commentCount } else { 0 } + $previousLastComment = if ($PreviousSnapshot) { $PreviousSnapshot.lastCommentAt } else { $null } + + $newComments = $current.comments | Where-Object { + -not $previousLastComment -or [datetime]$_.createdAt -gt [datetime]$previousLastComment + } + + return @{ + totalComments = $current.comments.Count + newCommentCount = $newComments.Count + newComments = $newComments | ForEach-Object { + @{ + author = $_.author.login + createdAt = $_.createdAt + bodyPreview = $_.body.Substring(0, [Math]::Min(200, $_.body.Length)) + } + } + lastCommentAt = ($current.comments | Sort-Object createdAt -Descending | Select-Object -First 1).createdAt + lastCommentAuthor = ($current.comments | Sort-Object createdAt -Descending | Select-Object -First 1).author.login + } +} +``` + +## Output Format + +Save collected issues to working file: + +```powershell +$collectedIssues | ConvertTo-Json -Depth 10 | Set-Content "Generated Files/triage-issues/current-run/collected-issues.json" +``` + +## Rate Limiting + +GitHub API has rate limits. For large backlogs: + +```powershell +# Check rate limit +gh api rate_limit --jq '.resources.core' + +# Batch requests with delay if needed +$batchSize = 50 +$delaySeconds = 2 + +for ($i = 0; $i -lt $issues.Count; $i += $batchSize) { + $batch = $issues[$i..([Math]::Min($i + $batchSize - 1, $issues.Count - 1))] + # Process batch... + Start-Sleep -Seconds $delaySeconds +} +``` + +## Next Step + +After collection, proceed to [Step 3: Categorization](./step3-categorization.md). diff --git a/.github/skills/continuous-issue-triage/references/step3-categorization.md b/.github/skills/continuous-issue-triage/references/step3-categorization.md new file mode 100644 index 000000000000..8486416fb191 --- /dev/null +++ b/.github/skills/continuous-issue-triage/references/step3-categorization.md @@ -0,0 +1,432 @@ +# Step 3: Categorization Rules + +Apply categorization rules to assign each issue to an actionable bucket. + +## Category Definitions + +| Category | ID | Priority | Criteria | +|----------|-----|----------|----------| +| 🔥 **Trending** | `trending` | 1 | 5+ new comments since last run | +| 🏷️ **Needs-Label** | `needs-label` | 2 | Missing `Product-*` or `Area-*` label | +| ✅ **Ready-for-Fix** | `ready-for-fix` | 3 | High clarity (≥70), feasible (≥60), validated | +| ❓ **Needs-Info** | `needs-info` | 4 | Missing repro, impact, or expected result | +| 💬 **Needs-Clarification** | `needs-clarification` | 5 | Question/discussion, not actionable bug | +| ✔️ **Closeable** | `closeable` | 6 | Fixed by merged PR, or released, or resolved | +| ⏳ **Stale-Waiting** | `stale-waiting` | 7 | Waiting on author >14 days after ask | +| 🔁 **Duplicate-Candidate** | `duplicate-candidate` | 8 | Likely duplicate of existing issue | + +## Categorization Algorithm + +``` +FOR EACH issue in collected_issues: + + # Priority order - first match wins + + 1. CHECK TRENDING + IF new_comments >= 5: + category = "trending" + CONTINUE + + 2. CHECK CLOSEABLE + IF has_merged_PR AND PR_in_released_version: + category = "closeable" + reason = "Fixed in PR #X, released in vY.Z" + CONTINUE + IF state == "open" AND all_linked_PRs_merged: + category = "closeable" + reason = "All linked PRs merged" + CONTINUE + + 3. CHECK NEEDS-LABEL + IF missing_product_or_area_label: + category = "needs-label" + suggested_label = analyze_content() + CONTINUE + + 4. CHECK STALE-WAITING + IF has_label("Needs-Author-Feedback"): + IF days_since_last_author_response > 14: + category = "stale-waiting" + CONTINUE + + 5. CHECK NEEDS-CLARIFICATION (question, not bug) + IF is_question_not_bug(): + category = "needs-clarification" + draft_reply = generate_explanation() + CONTINUE + + 6. CHECK NEEDS-INFO + IF missing_repro_steps OR missing_expected_result OR missing_version: + category = "needs-info" + missing_items = identify_gaps() + draft_questions = generate_questions() + CONTINUE + + 7. CHECK READY-FOR-FIX + IF clarity_score >= 70 AND feasibility_score >= 60: + category = "ready-for-fix" + CONTINUE + + 8. CHECK DUPLICATE + IF similar_issues_found AND confidence > 80: + category = "duplicate-candidate" + duplicate_of = [similar_issue_numbers] + CONTINUE + + 9. DEFAULT + category = "review-needed" + # Needs human judgment +``` + +## Category Rule Details + +### 🔥 Trending Detection + +```powershell +function Test-Trending { + param( + [hashtable]$Issue, + [hashtable]$PreviousSnapshot, + [int]$Threshold = 5 + ) + + $previousCount = if ($PreviousSnapshot) { $PreviousSnapshot.commentCount } else { 0 } + $newComments = $Issue.commentCount - $previousCount + + if ($newComments -ge $Threshold) { + return @{ + isTrending = $true + newCommentCount = $newComments + reason = "$newComments new comments since last triage" + sentiment = Get-CommentSentiment $Issue.comments # Optional + } + } + + return @{ isTrending = $false } +} +``` + +### 🏷️ Label Analysis + +```powershell +function Test-NeedsLabel { + param([hashtable]$Issue) + + $productLabels = $Issue.labels | Where-Object { $_ -like "Product-*" } + $areaLabels = $Issue.labels | Where-Object { $_ -like "Area-*" } + + if ($productLabels.Count -eq 0 -and $areaLabels.Count -eq 0) { + # Analyze content to suggest label + $suggestion = Get-LabelSuggestion $Issue + + return @{ + needsLabel = $true + missingType = "product-or-area" + suggestedLabels = $suggestion.labels + confidence = $suggestion.confidence + reason = $suggestion.reason + } + } + + return @{ needsLabel = $false } +} + +function Get-LabelSuggestion { + param([hashtable]$Issue) + + # Keyword mapping to products + $productKeywords = @{ + "Product-FancyZones" = @("fancy zones", "fancyzones", "zone", "snap", "layout", "window arrangement") + "Product-PowerToys Run" = @("run", "launcher", "alt+space", "search", "plugin") + "Product-Color Picker" = @("color picker", "colorpicker", "eyedropper", "hex", "rgb") + "Product-Keyboard Manager" = @("keyboard", "remap", "shortcut", "key") + "Product-Mouse Utils" = @("mouse", "crosshairs", "find my mouse", "highlighter", "pointer") + "Product-File Explorer" = @("file explorer", "preview", "thumbnail", "markdown preview", "svg") + "Product-Image Resizer" = @("image resizer", "resize", "bulk resize") + "Product-PowerRename" = @("rename", "power rename", "bulk rename", "regex rename") + "Product-Awake" = @("awake", "keep awake", "prevent sleep", "caffeinate") + "Product-Shortcut Guide" = @("shortcut guide", "win key", "keyboard shortcuts") + "Product-Text Extractor" = @("text extractor", "ocr", "screen text", "copy text from screen") + "Product-Hosts File Editor" = @("hosts", "hosts file", "dns") + "Product-Peek" = @("peek", "quick preview", "spacebar preview") + "Product-Crop And Lock" = @("crop", "crop and lock", "window crop") + "Product-Paste As Plain Text" = @("paste", "plain text", "paste as") + "Product-Registry Preview" = @("registry", "reg file", "registry preview") + "Product-Environment Variables" = @("environment", "env", "variables", "path") + "Product-Command Not Found" = @("command not found", "winget suggest") + "Product-New+" = @("new+", "new plus", "file template") + "Product-Advanced Paste" = @("advanced paste", "ai paste", "clipboard") + "Product-Workspaces" = @("workspaces", "workspace", "project launcher") + "Product-Cmd Palette" = @("command palette", "cmd palette", "palette") + "Product-ZoomIt" = @("zoomit", "zoom it", "screen zoom", "magnifier") + } + + $titleLower = $Issue.title.ToLower() + $bodyLower = if ($Issue.body) { $Issue.body.ToLower() } else { "" } + $combined = "$titleLower $bodyLower" + + $matches = @() + foreach ($product in $productKeywords.Keys) { + $keywords = $productKeywords[$product] + $matchCount = ($keywords | Where-Object { $combined -match $_ }).Count + if ($matchCount -gt 0) { + $matches += @{ + label = $product + matchCount = $matchCount + confidence = [Math]::Min(100, $matchCount * 25 + 25) + } + } + } + + $best = $matches | Sort-Object confidence -Descending | Select-Object -First 1 + + if ($best -and $best.confidence -ge 50) { + return @{ + labels = @($best.label) + confidence = $best.confidence + reason = "Matched $($best.matchCount) keywords for $($best.label)" + } + } + + return @{ + labels = @() + confidence = 0 + reason = "No confident label match - needs human review" + } +} +``` + +### ✅ Ready-for-Fix Detection + +Leverage the `review-issue` prompt scores: + +```powershell +function Test-ReadyForFix { + param( + [hashtable]$Issue, + [string]$CachePath = "Generated Files/triage-issues/issue-cache" + ) + + $overviewPath = "$CachePath/$($Issue.number)/overview.md" + + if (-not (Test-Path $overviewPath)) { + # Need to run deep analysis first + return @{ needsAnalysis = $true } + } + + # Parse scores from cached overview + $overview = Get-Content $overviewPath -Raw + $clarityScore = [regex]::Match($overview, 'Requirement Clarity.*?(\d+)/100').Groups[1].Value + $feasibilityScore = [regex]::Match($overview, 'Technical Feasibility.*?(\d+)/100').Groups[1].Value + + if ([int]$clarityScore -ge 70 -and [int]$feasibilityScore -ge 60) { + return @{ + readyForFix = $true + clarityScore = [int]$clarityScore + feasibilityScore = [int]$feasibilityScore + reason = "High clarity ($clarityScore) and feasible ($feasibilityScore)" + } + } + + return @{ readyForFix = $false } +} +``` + +### ❓ Needs-Info Detection + +```powershell +function Test-NeedsInfo { + param([hashtable]$Issue) + + $missingItems = @() + $body = $Issue.body + + # Check for repro steps + if ($body -notmatch '(?i)(steps to reproduce|repro|how to reproduce|reproduction)') { + $missingItems += "reproduction steps" + } + + # Check for expected result + if ($body -notmatch '(?i)(expected|should|supposed to)') { + $missingItems += "expected behavior" + } + + # Check for version + if ($body -notmatch '(?i)(version|v\d+\.\d+|\d+\.\d+\.\d+)') { + $missingItems += "PowerToys version" + } + + # Check for OS version + if ($body -notmatch '(?i)(windows 1[01]|win1[01]|22h2|23h2|24h2|build \d+)') { + $missingItems += "Windows version" + } + + # Check for actual result (for bugs) + if ($Issue.labels -contains "Issue-Bug") { + if ($body -notmatch '(?i)(actual|instead|but|however|currently)') { + $missingItems += "actual behavior/result" + } + } + + if ($missingItems.Count -gt 0) { + return @{ + needsInfo = $true + missingItems = $missingItems + reason = "Missing: " + ($missingItems -join ", ") + } + } + + return @{ needsInfo = $false } +} +``` + +### 💬 Needs-Clarification (Not a Bug) + +```powershell +function Test-NeedsClarification { + param([hashtable]$Issue) + + $questionPatterns = @( + '(?i)^(how (do|can|to)|why (does|is|doesn''t)|is (it|there|this) (possible|a way))', + '(?i)\?$', # Ends with question mark + '(?i)(wondering|curious|question|asking)', + '(?i)(is this (intended|by design|expected))', + '(?i)(can (someone|you) (explain|help))' + ) + + $titleAndBody = $Issue.title + " " + $Issue.body + + $isQuestion = $false + foreach ($pattern in $questionPatterns) { + if ($titleAndBody -match $pattern) { + $isQuestion = $true + break + } + } + + # Also check if explicitly marked as question + if ($Issue.labels -contains "Issue-Question" -or $Issue.labels -contains "Type-Question") { + $isQuestion = $true + } + + if ($isQuestion -and ($Issue.labels -notcontains "Issue-Bug")) { + return @{ + needsClarification = $true + type = "question" + reason = "Appears to be a question/inquiry rather than bug report" + } + } + + return @{ needsClarification = $false } +} +``` + +### ✔️ Closeable Detection + +```powershell +function Test-Closeable { + param([hashtable]$Issue) + + $closeReasons = @() + + # Check for merged linked PRs + $mergedPRs = $Issue.linkedPRs | Where-Object { $_.state -eq "MERGED" } + if ($mergedPRs.Count -gt 0) { + $closeReasons += @{ + type = "fixed-by-pr" + prNumbers = $mergedPRs.number + reason = "Fixed by PR(s): #" + ($mergedPRs.number -join ", #") + } + } + + # Check comments for "fixed in" or "released in" + $recentComments = $Issue.comments | Sort-Object createdAt -Descending | Select-Object -First 5 + foreach ($comment in $recentComments) { + if ($comment.body -match '(?i)(fixed in|released in|available in|shipped in) v?(\d+\.\d+)') { + $version = $Matches[2] + $closeReasons += @{ + type = "released" + version = $version + reason = "Released in v$version" + } + break + } + } + + # Check if marked as duplicate + if ($Issue.labels -contains "Resolution-Duplicate") { + $closeReasons += @{ + type = "duplicate" + reason = "Marked as duplicate" + } + } + + # Check if marked as won't fix + if ($Issue.labels -contains "Resolution-Won't Fix" -or $Issue.labels -contains "Resolution-By-Design") { + $closeReasons += @{ + type = "wont-fix" + reason = "Marked as won't fix / by design" + } + } + + if ($closeReasons.Count -gt 0) { + return @{ + closeable = $true + reasons = $closeReasons + } + } + + return @{ closeable = $false } +} +``` + +## Priority Scoring + +Combine signals for overall priority within category: + +```powershell +function Get-PriorityScore { + param([hashtable]$Issue) + + $score = 50 # Base score + + # Reaction boost + $thumbsUp = $Issue.reactions.thumbsUp + $score += [Math]::Min(20, $thumbsUp * 2) + + # Comment engagement + $score += [Math]::Min(15, $Issue.commentCount) + + # Recency boost (updated recently) + $daysSinceUpdate = ((Get-Date) - [datetime]$Issue.updatedAt).Days + if ($daysSinceUpdate -le 7) { $score += 10 } + elseif ($daysSinceUpdate -le 30) { $score += 5 } + + # Label boosts + if ($Issue.labels -contains "Priority-High") { $score += 15 } + if ($Issue.labels -match "Regression") { $score += 20 } + if ($Issue.labels -match "Security") { $score += 25 } + + return [Math]::Min(100, $score) +} +``` + +## Output + +Save categorization results: + +```json +{ + "12345": { + "category": "trending", + "categoryReason": "8 new comments since last run", + "priorityScore": 82, + "additionalFlags": ["negative-sentiment"], + "suggestedAction": "Review urgent - heated discussion" + } +} +``` + +## Next Step + +Proceed to [Step 4: Deep Analysis](./step4-deep-analysis.md) for complex issues. diff --git a/.github/skills/continuous-issue-triage/references/step4-deep-analysis.md b/.github/skills/continuous-issue-triage/references/step4-deep-analysis.md new file mode 100644 index 000000000000..4d2eafee55b8 --- /dev/null +++ b/.github/skills/continuous-issue-triage/references/step4-deep-analysis.md @@ -0,0 +1,274 @@ +# Step 4: Deep Analysis + +For issues requiring detailed analysis, leverage the `review-issue` prompt to generate comprehensive reviews. + +## When to Run Deep Analysis + +| Category | Deep Analysis? | Reason | +|----------|---------------|--------| +| Trending | Optional | If conversation is contentious | +| Needs-Label | No | Label detection is keyword-based | +| Ready-for-Fix | Yes (cached) | Need scores for validation | +| Needs-Info | Optional | To identify specific gaps | +| Needs-Clarification | No | Simple question detection | +| Closeable | No | Mechanical check | +| Stale-Waiting | No | Time-based | +| Duplicate-Candidate | Optional | Similar issue search | + +## Integration with review-issue Prompt + +The `review-issue` prompt generates two artifacts: +- `overview.md` - Scoring, signals, suggested actions +- `implementation-plan.md` - Technical breakdown + +### Invoking the Prompt + +```markdown +# Within the agent's execution, reference the prompt: + +For issue #{{issue_number}}, I need detailed analysis. + +Use the review-issue prompt at `.github/prompts/review-issue.prompt.md` to generate: +1. `Generated Files/triage-issues/issue-cache/{{issue_number}}/overview.md` +2. `Generated Files/triage-issues/issue-cache/{{issue_number}}/implementation-plan.md` +``` + +### Caching Strategy + +``` +Generated Files/triage-issues/issue-cache/ +├── 12345/ +│ ├── overview.md +│ ├── implementation-plan.md +│ └── metadata.json +└── 12346/ + └── ... +``` + +**metadata.json**: +```json +{ + "issueNumber": 12345, + "analyzedAt": "2026-02-05T10:30:00Z", + "issueUpdatedAt": "2026-02-04T15:30:00Z", + "commentCountAtAnalysis": 15, + "isStale": false +} +``` + +### Cache Invalidation + +Re-run analysis if: +1. Issue has new comments since last analysis +2. Issue state changed (open ↔ closed) +3. Labels changed significantly +4. More than 7 days since last analysis +5. User explicitly requests refresh + +```powershell +function Test-CacheValid { + param( + [int]$IssueNumber, + [hashtable]$CurrentIssueData + ) + + $cachePath = "Generated Files/triage-issues/issue-cache/$IssueNumber" + $metadataPath = "$cachePath/metadata.json" + + if (-not (Test-Path $metadataPath)) { + return @{ valid = $false; reason = "No cached analysis" } + } + + $metadata = Get-Content $metadataPath | ConvertFrom-Json + + # Check freshness + $daysSinceAnalysis = ((Get-Date) - [datetime]$metadata.analyzedAt).Days + if ($daysSinceAnalysis -gt 7) { + return @{ valid = $false; reason = "Cache older than 7 days" } + } + + # Check for new comments + if ($CurrentIssueData.commentCount -gt $metadata.commentCountAtAnalysis) { + return @{ valid = $false; reason = "New comments added" } + } + + # Check for state change + if ($CurrentIssueData.updatedAt -gt $metadata.issueUpdatedAt) { + return @{ valid = $false; reason = "Issue updated since analysis" } + } + + return @{ valid = $true } +} +``` + +## Selective Analysis + +Don't analyze every issue - be selective: + +### Batch 1: High-Priority Analysis + +Analyze first: +- Trending issues with negative sentiment +- Potential ready-for-fix candidates (unclear if ready) +- Issues with high reaction counts (>10 👍) + +### Batch 2: Moderate Priority + +Analyze if time permits: +- Needs-Info issues (to draft better questions) +- Complex duplicate candidates + +### Batch 3: Skip Analysis + +Don't analyze: +- Clear closeable issues +- Stale-waiting issues +- Already-analyzed recent issues + +## Extracting Scores from Analysis + +After running `review-issue`, parse the `overview.md`: + +```powershell +function Get-AnalysisScores { + param([string]$OverviewPath) + + $content = Get-Content $OverviewPath -Raw + + # Extract from the At-a-Glance Score Table + $scores = @{} + + # Business Importance + if ($content -match '\*\*A\) Business Importance\*\*.*?(\d+)/100') { + $scores.businessImportance = [int]$Matches[1] + } + + # Community Excitement + if ($content -match '\*\*B\) Community Excitement\*\*.*?(\d+)/100') { + $scores.communityExcitement = [int]$Matches[1] + } + + # Technical Feasibility + if ($content -match '\*\*C\) Technical Feasibility\*\*.*?(\d+)/100') { + $scores.technicalFeasibility = [int]$Matches[1] + } + + # Requirement Clarity + if ($content -match '\*\*D\) Requirement Clarity\*\*.*?(\d+)/100') { + $scores.requirementClarity = [int]$Matches[1] + } + + # Overall Priority + if ($content -match '\*\*Overall Priority\*\*.*?(\d+)/100') { + $scores.overallPriority = [int]$Matches[1] + } + + # Effort Estimate + if ($content -match '\*\*Effort Estimate\*\*.*?(\d+) days.*?(XS|S|M|L|XL|XXL|Epic)') { + $scores.effortDays = [int]$Matches[1] + $scores.effortTShirt = $Matches[2] + } + + return $scores +} +``` + +## Similar Issue Search + +For duplicate detection, search existing issues: + +```powershell +function Find-SimilarIssues { + param([hashtable]$Issue) + + # Extract key terms from title + $searchTerms = $Issue.title -split '\s+' | Where-Object { $_.Length -gt 3 } + $searchQuery = ($searchTerms | Select-Object -First 5) -join ' ' + + # Search both open and closed + $similar = gh issue list ` + --search "$searchQuery" ` + --state all ` + --json number,title,state,closedAt,labels ` + --limit 10 ` + | ConvertFrom-Json ` + | Where-Object { $_.number -ne $Issue.number } + + # Score similarity + $results = $similar | ForEach-Object { + $similarity = Get-TitleSimilarity $Issue.title $_.title + @{ + number = $_.number + title = $_.title + state = $_.state + closedAt = $_.closedAt + similarityScore = $similarity + } + } | Where-Object { $_.similarityScore -gt 50 } | Sort-Object similarityScore -Descending + + return $results +} + +function Get-TitleSimilarity { + param( + [string]$Title1, + [string]$Title2 + ) + + $words1 = $Title1.ToLower() -split '\W+' | Where-Object { $_.Length -gt 2 } + $words2 = $Title2.ToLower() -split '\W+' | Where-Object { $_.Length -gt 2 } + + $common = ($words1 | Where-Object { $words2 -contains $_ }).Count + $total = [Math]::Max($words1.Count, $words2.Count) + + if ($total -eq 0) { return 0 } + + return [int](($common / $total) * 100) +} +``` + +## MCP Tools for Rich Context + +When available, use MCP tools for additional context: + +### Images (UI issues) + +```markdown +If the issue mentions screenshots or UI problems, use MCP: + +github_issue_images(owner: "microsoft", repo: "PowerToys", issueNumber: 12345) +``` + +### Attachments (Logs) + +```markdown +If the issue mentions logs or diagnostic reports: + +github_issue_attachments( + owner: "microsoft", + repo: "PowerToys", + issueNumber: 12345, + extractFolder: "Generated Files/triage-issues/issue-cache/12345/logs" +) +``` + +## Analysis Output + +Save analysis metadata for state tracking: + +```powershell +$metadata = @{ + issueNumber = $Issue.number + analyzedAt = (Get-Date).ToUniversalTime().ToString("o") + issueUpdatedAt = $Issue.updatedAt + commentCountAtAnalysis = $Issue.commentCount + scores = $extractedScores + suggestedCategory = $determinedCategory +} + +$metadata | ConvertTo-Json | Set-Content "$cachePath/metadata.json" +``` + +## Next Step + +Proceed to [Step 5: Report Generation](./step5-reports.md). diff --git a/.github/skills/continuous-issue-triage/references/step5-reports.md b/.github/skills/continuous-issue-triage/references/step5-reports.md new file mode 100644 index 000000000000..db4326c904b3 --- /dev/null +++ b/.github/skills/continuous-issue-triage/references/step5-reports.md @@ -0,0 +1,316 @@ +# Step 5: Report Generation + +Generate actionable reports for each category and an executive summary. + +## Report Structure + +``` +Generated Files/triage-issues/current-run/ +├── summary.md # Executive summary (start here) +├── trending.md # 🔥 Trending issues +├── needs-label.md # 🏷️ Issues missing labels +├── ready-for-fix.md # ✅ Ready for implementation +├── needs-info.md # ❓ Needs author feedback +├── needs-clarification.md # 💬 Questions/discussions +├── closeable.md # ✔️ Can be closed +├── stale-waiting.md # ⏳ Waiting on author +├── duplicate-candidate.md # 🔁 Potential duplicates +└── draft-replies/ # Pre-drafted messages + ├── issue-12345.md + └── issue-12346.md +``` + +## Executive Summary Template + +**File**: `summary.md` + +```markdown +# Issue Triage Summary - {{DATE}} + +**Run Type**: {{RUN_TYPE}} | **Issues Analyzed**: {{TOTAL_COUNT}} | **Since**: {{LAST_RUN_DATE}} + +## 📊 Quick Stats + +| Metric | Value | Change | +|--------|-------|--------| +| Total issues scanned | {{TOTAL}} | {{DELTA}} | +| New issues since last run | {{NEW_COUNT}} | — | +| Issues with new activity | {{ACTIVE_COUNT}} | — | +| Closed issues with comments | {{CLOSED_ACTIVE}} | — | + +## ⚡ Action Required by Category + +| Category | Count | Top Priority | Draft Ready? | +|----------|-------|--------------|--------------| +| 🔥 Trending | {{COUNT}} | [#{{NUM}}]({{LINK}}) ({{COMMENTS}} new comments) | — | +| 🏷️ Needs-Label | {{COUNT}} | [#{{NUM}}]({{LINK}}) (suggest: {{LABEL}}) | — | +| ✅ Ready-for-Fix | {{COUNT}} | [#{{NUM}}]({{LINK}}) (score: {{SCORE}}/100) | — | +| ❓ Needs-Info | {{COUNT}} | [#{{NUM}}]({{LINK}}) (missing: {{ITEMS}}) | ✅ | +| 💬 Needs-Clarification | {{COUNT}} | [#{{NUM}}]({{LINK}}) | ✅ | +| ✔️ Closeable | {{COUNT}} | [#{{NUM}}]({{LINK}}) ({{REASON}}) | ✅ | +| ⏳ Stale-Waiting | {{COUNT}} | [#{{NUM}}]({{LINK}}) ({{DAYS}} days) | ✅ | +| 🔁 Duplicate-Candidate | {{COUNT}} | [#{{NUM}}]({{LINK}}) → #{{DUP_OF}} | — | + +## 🎯 Top 5 Priority Actions + +1. **[Urgent]** Review [#{{NUM}}]({{LINK}}) - {{REASON}} +2. **[High]** Post clarification on [#{{NUM}}]({{LINK}}) - draft ready +3. **[High]** Assign [#{{NUM}}]({{LINK}}) - ready for implementation +4. **[Medium]** Label [#{{NUM}}]({{LINK}}) as {{LABEL}} +5. **[Low]** Close [#{{NUM}}]({{LINK}}) - fixed in v{{VERSION}} + +## 📁 Detailed Reports + +- [Trending Issues](./trending.md) +- [Needs Label](./needs-label.md) +- [Ready for Fix](./ready-for-fix.md) +- [Needs Information](./needs-info.md) +- [Needs Clarification](./needs-clarification.md) +- [Closeable](./closeable.md) +- [Stale Waiting](./stale-waiting.md) +- [Duplicate Candidates](./duplicate-candidate.md) + +## 📝 Draft Replies Ready + +{{COUNT}} draft replies prepared in `draft-replies/`: +{{DRAFT_LIST}} + +## ⏭️ Follow-ups from Last Run + +| Issue | Previous Action | Status | +|-------|-----------------|--------| +| [#{{NUM}}]({{LINK}}) | Posted clarification | ✅ Resolved | +| [#{{NUM}}]({{LINK}}) | Requested info | ⏳ No response | +| [#{{NUM}}]({{LINK}}) | Assigned to @{{USER}} | 🔄 In progress | + +--- +*Generated by continuous-issue-triage skill | Next suggested run: {{NEXT_RUN}}* +``` + +## Category Report Templates + +### Trending Report (`trending.md`) + +```markdown +# 🔥 Trending Issues + +Issues with significant activity since last triage ({{THRESHOLD}}+ new comments). + +| # | Issue | New Comments | Total | Sentiment | Last Activity | +|---|-------|--------------|-------|-----------|---------------| +| 1 | [#{{NUM}}]({{LINK}}) {{TITLE}} | +{{NEW}} | {{TOTAL}} | {{SENTIMENT}} | {{TIME_AGO}} | + +--- + +## #{{ISSUE_NUM}}: {{TITLE}} + +**Activity**: +{{NEW}} comments ({{TOTAL}} total) | **Sentiment**: {{SENTIMENT}} + +### Recent Discussion Summary + +{{SUMMARY_OF_RECENT_COMMENTS}} + +### Key Participants + +- @{{USER1}} ({{COMMENT_COUNT}} comments) - {{STANCE}} +- @{{USER2}} ({{COMMENT_COUNT}} comments) - {{STANCE}} + +### Recommended Action + +{{RECOMMENDATION}} + +--- +``` + +### Needs-Label Report (`needs-label.md`) + +```markdown +# 🏷️ Issues Missing Area/Product Labels + +These issues need categorization for proper routing. + +| # | Issue | Suggested Label | Confidence | Reason | +|---|-------|-----------------|------------|--------| +| 1 | [#{{NUM}}]({{LINK}}) {{TITLE}} | `{{LABEL}}` | {{CONF}}% | {{REASON}} | + +--- + +## Quick Apply Commands + +```bash +# Apply suggested labels (review first!) +gh issue edit {{NUM}} --add-label "{{LABEL}}" +gh issue edit {{NUM}} --add-label "{{LABEL}}" +``` + +--- + +## Detailed Analysis + +### #{{ISSUE_NUM}}: {{TITLE}} + +**Suggested**: `{{LABEL}}` ({{CONFIDENCE}}% confidence) + +**Why**: {{DETAILED_REASON}} + +**Alternative labels to consider**: {{ALTERNATIVES}} + +--- +``` + +### Ready-for-Fix Report (`ready-for-fix.md`) + +```markdown +# ✅ Issues Ready for Implementation + +High-clarity issues that are technically feasible. + +| # | Issue | Clarity | Feasibility | Effort | Potential Assignee | +|---|-------|---------|-------------|--------|-------------------| +| 1 | [#{{NUM}}]({{LINK}}) {{TITLE}} | {{CLARITY}}/100 | {{FEASIBILITY}}/100 | {{EFFORT}} | @{{USER}} | + +--- + +## #{{ISSUE_NUM}}: {{TITLE}} + +**Scores**: Clarity {{CLARITY}}/100 | Feasibility {{FEASIBILITY}}/100 | Priority {{PRIORITY}}/100 + +**Effort**: {{DAYS}} days ({{TSHIRT}}) + +**Product Area**: {{LABELS}} + +### Problem Summary + +{{BRIEF_PROBLEM}} + +### Implementation Hints + +{{FROM_IMPLEMENTATION_PLAN}} + +### Suggested Assignees + +- @{{USER1}} - {{REASON}} +- @{{USER2}} - {{REASON}} + +**Full Analysis**: [issue-cache/{{NUM}}/overview.md](../issue-cache/{{NUM}}/overview.md) + +--- +``` + +### Needs-Info Report (`needs-info.md`) + +```markdown +# ❓ Issues Needing More Information + +These issues lack details needed for investigation or planning. + +| # | Issue | Missing | Days Open | Draft Ready | +|---|-------|---------|-----------|-------------| +| 1 | [#{{NUM}}]({{LINK}}) {{TITLE}} | {{MISSING}} | {{DAYS}} | [View](./draft-replies/issue-{{NUM}}.md) | + +--- + +## #{{ISSUE_NUM}}: {{TITLE}} + +**Missing Information**: +- [ ] {{ITEM_1}} +- [ ] {{ITEM_2}} +- [ ] {{ITEM_3}} + +**Draft Reply**: [draft-replies/issue-{{NUM}}.md](./draft-replies/issue-{{NUM}}.md) + +### Quick Post + +```bash +gh issue comment {{NUM}} --body-file "Generated Files/triage-issues/current-run/draft-replies/issue-{{NUM}}.md" +gh issue edit {{NUM}} --add-label "Needs-Author-Feedback" +``` + +--- +``` + +### Closeable Report (`closeable.md`) + +```markdown +# ✔️ Issues Ready to Close + +These issues can be closed with appropriate messaging. + +| # | Issue | Close Reason | PR/Version | Draft Ready | +|---|-------|--------------|------------|-------------| +| 1 | [#{{NUM}}]({{LINK}}) {{TITLE}} | {{REASON}} | {{REFERENCE}} | [View](./draft-replies/issue-{{NUM}}.md) | + +--- + +## Batch Close Commands + +```bash +# Review drafts first, then close with message + +# Fixed by PR +gh issue close {{NUM}} --comment "Fixed in #{{PR_NUM}}. This fix is available in v{{VERSION}}. Thank you for reporting!" + +# Duplicate +gh issue close {{NUM}} --comment "Closing as duplicate of #{{DUP_NUM}}. Please follow that issue for updates." + +# By design / Won't fix +gh issue close {{NUM}} --comment "After review, this is working as designed. {{EXPLANATION}}" +``` + +--- + +## #{{ISSUE_NUM}}: {{TITLE}} + +**Reason**: {{DETAILED_REASON}} + +**Draft Close Message**: [draft-replies/issue-{{NUM}}.md](./draft-replies/issue-{{NUM}}.md) + +--- +``` + +## Generation Script + +```powershell +function New-TriageReports { + param( + [hashtable]$CategorizedIssues, + [hashtable]$State, + [string]$OutputPath = "Generated Files/triage-issues/current-run" + ) + + # Ensure directory exists + New-Item -ItemType Directory -Force -Path $OutputPath + New-Item -ItemType Directory -Force -Path "$OutputPath/draft-replies" + + # Group by category + $byCategory = $CategorizedIssues.Values | Group-Object -Property category + + # Generate category reports + foreach ($group in $byCategory) { + $categoryName = $group.Name + $issues = $group.Group | Sort-Object priorityScore -Descending + + $reportPath = "$OutputPath/$categoryName.md" + $reportContent = New-CategoryReport -Category $categoryName -Issues $issues + $reportContent | Set-Content $reportPath + } + + # Generate summary + $summaryContent = New-ExecutiveSummary -Categories $byCategory -State $State + $summaryContent | Set-Content "$OutputPath/summary.md" + + Write-Host "Reports generated at $OutputPath" +} +``` + +## Report Conventions + +1. **Always link to GitHub**: Use `[#NUM](https://github.com/microsoft/PowerToys/issues/NUM)` +2. **Include quick commands**: Provide `gh` CLI commands for easy action +3. **Sort by priority**: Highest priority issues first within each category +4. **Cross-reference drafts**: Link to draft replies when available +5. **Show deltas**: Compare to previous run where applicable + +## Next Step + +Proceed to [Step 6: Reply Templates](./step6-reply-templates.md) for draft message generation. diff --git a/.github/skills/continuous-issue-triage/references/step6-reply-templates.md b/.github/skills/continuous-issue-triage/references/step6-reply-templates.md new file mode 100644 index 000000000000..0939471948a9 --- /dev/null +++ b/.github/skills/continuous-issue-triage/references/step6-reply-templates.md @@ -0,0 +1,340 @@ +# Step 6: Reply Templates + +Generate draft replies for issues requiring human response. + +## Draft Reply Location + +``` +Generated Files/triage-issues/current-run/draft-replies/ +├── issue-12345.md # Needs-info draft +├── issue-12346.md # Clarification draft +├── issue-12347.md # Close message draft +└── ... +``` + +## Reply Categories + +| Category | Reply Type | Tone | Key Elements | +|----------|------------|------|--------------| +| Needs-Info | Question list | Friendly, helpful | Specific questions, context why needed | +| Needs-Clarification | Explanation | Educational, patient | Answer the question, link to docs | +| Closeable (fixed) | Thank you + reference | Grateful | PR link, version, appreciation | +| Closeable (duplicate) | Redirect | Brief, helpful | Link to original, explain | +| Closeable (by-design) | Explanation | Respectful | Rationale, alternatives | +| Stale-Waiting | Gentle ping | Patient | Reminder, offer to close | + +## Template: Needs-Info Reply + +**File**: `issue-XXXXX.md` + +```markdown +Hi @{{AUTHOR}}, + +Thank you for reporting this issue! To help us investigate further, could you please provide the following information? + +{{#IF_MISSING_REPRO}} +**Reproduction Steps** +- What exact steps lead to this issue? +- Can you provide a minimal, consistent way to reproduce it? +{{/IF_MISSING_REPRO}} + +{{#IF_MISSING_VERSION}} +**Environment Details** +- PowerToys version (Settings > General > Version): +- Windows version (winver): +- Did this work in a previous version? If so, which one? +{{/IF_MISSING_VERSION}} + +{{#IF_MISSING_EXPECTED}} +**Expected vs Actual Behavior** +- What did you expect to happen? +- What actually happened instead? +{{/IF_MISSING_EXPECTED}} + +{{#IF_MISSING_SCREENSHOTS}} +**Visual Evidence** (if applicable) +- Could you attach a screenshot or screen recording showing the issue? +{{/IF_MISSING_SCREENSHOTS}} + +{{#IF_MISSING_LOGS}} +**Diagnostic Logs** +- Please run PowerToys and reproduce the issue +- Generate a bug report: Settings > General > "Generate Bug Report" +- Attach the resulting ZIP file +{{/IF_MISSING_LOGS}} + +This information will help us reproduce and fix the issue faster. Thanks! +``` + +## Template: Needs-Clarification Reply + +**File**: `issue-XXXXX.md` + +```markdown +Hi @{{AUTHOR}}, + +Thanks for reaching out! Let me help clarify this: + +{{EXPLANATION}} + +{{#IF_BY_DESIGN}} +This behavior is actually by design. Here's the reasoning: +- {{REASON_1}} +- {{REASON_2}} +{{/IF_BY_DESIGN}} + +{{#IF_HOW_TO}} +Here's how you can achieve what you're looking for: +1. {{STEP_1}} +2. {{STEP_2}} +3. {{STEP_3}} +{{/IF_HOW_TO}} + +{{#IF_DOCS_LINK}} +You can find more information in our documentation: +- [{{DOC_TITLE}}]({{DOC_LINK}}) +{{/IF_DOCS_LINK}} + +{{#IF_RELATED_ISSUE}} +There's also an existing discussion about this in #{{RELATED_NUM}} that might be helpful. +{{/IF_RELATED_ISSUE}} + +{{#IF_FEATURE_REQUEST}} +If you'd like to request this as a new feature, I'd suggest: +1. Search existing issues to see if it's already requested +2. If not, open a new feature request issue with your use case + +We track feature popularity through 👍 reactions, so feel free to upvote any existing requests that match your needs! +{{/IF_FEATURE_REQUEST}} + +Let me know if you have any other questions! +``` + +## Template: Close (Fixed by PR) + +**File**: `issue-XXXXX.md` + +```markdown +Hi @{{AUTHOR}}, + +Great news! This issue has been addressed in PR #{{PR_NUM}}. + +{{#IF_RELEASED}} +✅ **The fix is now available in PowerToys v{{VERSION}}** + +You can update to the latest version through: +- Microsoft Store (automatic updates) +- GitHub Releases: https://github.com/microsoft/PowerToys/releases/tag/v{{VERSION}} +- WinGet: `winget upgrade Microsoft.PowerToys` +{{/IF_RELEASED}} + +{{#IF_NOT_RELEASED}} +The fix has been merged and will be included in the next release (v{{NEXT_VERSION}}). + +You can track the release progress in our [milestones](https://github.com/microsoft/PowerToys/milestones). +{{/IF_NOT_RELEASED}} + +Thank you for reporting this issue and helping improve PowerToys! 🙏 + +Closing this issue as resolved. If you encounter any further problems, please don't hesitate to open a new issue. +``` + +## Template: Close (Duplicate) + +**File**: `issue-XXXXX.md` + +```markdown +Hi @{{AUTHOR}}, + +Thanks for reporting this! It looks like this issue is a duplicate of #{{ORIGINAL_NUM}}. + +To avoid splitting the discussion, I'm closing this in favor of the original issue. Please: +- 👍 React to #{{ORIGINAL_NUM}} to show your interest +- Add any additional context or reproduction details as a comment there +- Subscribe to #{{ORIGINAL_NUM}} for updates + +{{#IF_DIFFERENT_CONTEXT}} +I noticed your report includes some additional context that might be helpful. I'll add a comment to #{{ORIGINAL_NUM}} referencing this issue. +{{/IF_DIFFERENT_CONTEXT}} + +Thank you for understanding! +``` + +## Template: Close (By Design / Won't Fix) + +**File**: `issue-XXXXX.md` + +```markdown +Hi @{{AUTHOR}}, + +Thank you for taking the time to report this and share your feedback. + +After reviewing this issue, we've determined that this behavior is **{{RESOLUTION_TYPE}}**. + +{{#IF_BY_DESIGN}} +### Why This Is By Design + +{{RATIONALE}} + +This design choice was made because: +- {{REASON_1}} +- {{REASON_2}} +{{/IF_BY_DESIGN}} + +{{#IF_WONT_FIX}} +### Why We're Not Addressing This + +{{RATIONALE}} + +We've decided not to implement this change because: +- {{REASON_1}} +- {{REASON_2}} +{{/IF_WONT_FIX}} + +{{#IF_WORKAROUND}} +### Workaround + +In the meantime, you might try: +{{WORKAROUND}} +{{/IF_WORKAROUND}} + +{{#IF_ALTERNATIVE}} +### Alternative Approaches + +You might consider: +- {{ALTERNATIVE_1}} +- {{ALTERNATIVE_2}} +{{/IF_ALTERNATIVE}} + +We appreciate your understanding. If you have additional context that might change our assessment, please let us know! +``` + +## Template: Stale-Waiting Ping + +**File**: `issue-XXXXX.md` + +```markdown +Hi @{{AUTHOR}}, + +We haven't heard back from you in a while. Are you still experiencing this issue? + +{{#IF_WAITING_FOR_INFO}} +We're still waiting for the additional information requested above to help investigate this issue. +{{/IF_WAITING_FOR_INFO}} + +{{#IF_WAITING_FOR_CONFIRMATION}} +Could you confirm if the suggested solution worked for you? +{{/IF_WAITING_FOR_CONFIRMATION}} + +If we don't hear back within the next {{DAYS}} days, we'll close this issue to keep our backlog manageable. You're always welcome to reopen it or create a new issue if the problem persists. + +Thanks for your understanding! 🙏 +``` + +## Template: Closed Issue with New Comment + +**File**: `issue-XXXXX.md` + +```markdown +Hi @{{COMMENTER}}, + +Thanks for your comment! This issue was closed {{TIME_AGO}} because {{CLOSE_REASON}}. + +{{#IF_SAME_ISSUE}} +If you're experiencing the same issue and it's not resolved, please open a new issue with: +- Your PowerToys version +- Steps to reproduce +- Any error messages or screenshots + +This helps us track and prioritize effectively. +{{/IF_SAME_ISSUE}} + +{{#IF_QUESTION}} +Regarding your question: +{{ANSWER}} +{{/IF_QUESTION}} + +{{#IF_DIFFERENT_ISSUE}} +It sounds like you might be experiencing a different issue. Please open a new issue with details about your specific problem so we can help you better. +{{/IF_DIFFERENT_ISSUE}} +``` + +## Draft Generation Logic + +```powershell +function New-DraftReply { + param( + [hashtable]$Issue, + [string]$Category, + [hashtable]$AnalysisData + ) + + $draftPath = "Generated Files/triage-issues/current-run/draft-replies/issue-$($Issue.number).md" + + switch ($Category) { + "needs-info" { + $content = New-NeedsInfoDraft -Issue $Issue -Missing $AnalysisData.missingItems + } + "needs-clarification" { + $content = New-ClarificationDraft -Issue $Issue -QuestionType $AnalysisData.questionType + } + "closeable" { + $content = New-CloseDraft -Issue $Issue -CloseReason $AnalysisData.closeReason + } + "stale-waiting" { + $content = New-StalePingDraft -Issue $Issue -DaysWaiting $AnalysisData.daysWaiting + } + default { + return $null # No draft needed + } + } + + # Add metadata header + $header = @" +--- +issue: $($Issue.number) +title: $($Issue.title) +category: $Category +generated: $(Get-Date -Format "o") +status: draft +--- + +"@ + + ($header + $content) | Set-Content $draftPath + return $draftPath +} +``` + +## Draft Review Checklist + +Before posting any draft: + +- [ ] Read the full issue context +- [ ] Check for recent comments not in analysis +- [ ] Personalize if needed (remove boilerplate feel) +- [ ] Verify links work +- [ ] Ensure tone is appropriate +- [ ] Remove any placeholder text (`{{...}}`) + +## Posting Drafts + +```bash +# Post a single draft +gh issue comment 12345 --body-file "Generated Files/triage-issues/current-run/draft-replies/issue-12345.md" + +# Add label if needed +gh issue edit 12345 --add-label "Needs-Author-Feedback" + +# Close with message +gh issue close 12345 --comment "$(cat draft-replies/issue-12345.md)" +``` + +## Best Practices + +1. **Never auto-post**: Always human review before posting +2. **Be empathetic**: Remember there's a person on the other side +3. **Be specific**: Generic responses feel dismissive +4. **Provide value**: Every reply should move the issue forward +5. **Link resources**: Documentation, related issues, PRs +6. **Thank contributors**: Acknowledge their time and effort diff --git a/.github/skills/continuous-issue-triage/scripts/analyze-issues-parallel.ps1 b/.github/skills/continuous-issue-triage/scripts/analyze-issues-parallel.ps1 new file mode 100644 index 000000000000..850afb3a5690 --- /dev/null +++ b/.github/skills/continuous-issue-triage/scripts/analyze-issues-parallel.ps1 @@ -0,0 +1,217 @@ +<# +.SYNOPSIS + Runs Copilot CLI analysis on issues using review-issue prompt. + +.DESCRIPTION + Kicks off GitHub Copilot CLI to analyze each issue using + the review-issue.prompt.md file. Processes sequentially with timeout handling. + +.PARAMETER IssueNumbers + Array of issue numbers to analyze. If not provided, collects from recent activity. + +.PARAMETER TimeoutMinutes + Timeout for each Copilot analysis. Default: 8 + +.PARAMETER MaxRetryCount + Maximum retries on timeout/failure. Default: 3 + +.PARAMETER Model + Copilot model to use (optional). + +.EXAMPLE + .\analyze-issues-parallel.ps1 -IssueNumbers @(45201, 45107, 45321) + +.EXAMPLE + .\analyze-issues-parallel.ps1 -TimeoutMinutes 10 -MaxRetries 2 +#> + +[CmdletBinding()] +param( + [Parameter()] + [int[]]$IssueNumbers, + + [Parameter()] + [int]$TimeoutMinutes = 8, + + [Parameter()] + [int]$MaxRetryCount = 3, + + [Parameter()] + [string]$Model, + + [Parameter()] + [int]$LookbackDays = 14, + + [Parameter()] + [int]$MaxIssues = 15 +) + +$ErrorActionPreference = "Stop" +$repoRoot = (git rev-parse --show-toplevel 2>$null); if (-not $repoRoot) { $repoRoot = (Get-Location).Path }; $repoRoot = (Resolve-Path $repoRoot).Path + +# Resolve config directory name (.github or .claude) from script location +$_cfgDir = if ($PSScriptRoot -match '[\\/](\.github|\.claude)[\\/]') { $Matches[1] } else { '.github' } +$triageRoot = Join-Path $repoRoot "Generated Files\triage-issues" +$issueCachePath = Join-Path $triageRoot "issue-cache" +$promptPath = Join-Path $repoRoot "$_cfgDir\prompts\review-issue.prompt.md" + +# Ensure directories exist +if (-not (Test-Path $issueCachePath)) { + New-Item -ItemType Directory -Path $issueCachePath -Force | Out-Null +} + +Write-Host "═══════════════════════════════════════════════════════════════" -ForegroundColor Cyan +Write-Host " Issue Analysis with Copilot CLI" -ForegroundColor Cyan +Write-Host " Using: review-issue.prompt.md" -ForegroundColor Cyan +Write-Host "═══════════════════════════════════════════════════════════════" -ForegroundColor Cyan +Write-Host "" + +# If no issues provided, collect from recent activity +if (-not $IssueNumbers -or $IssueNumbers.Count -eq 0) { + Write-Host "Collecting issues from last $LookbackDays days..." -ForegroundColor Yellow + + $issues = gh issue list --state open --json number,title,comments,updatedAt --limit 200 | ConvertFrom-Json + $recent = $issues | Where-Object { [datetime]$_.updatedAt -gt (Get-Date).AddDays(-$LookbackDays) } + + # Prioritize: trending first, then by recency + $prioritized = $recent | Sort-Object { -$_.comments.Count }, { [datetime]$_.updatedAt } -Descending + $IssueNumbers = ($prioritized | Select-Object -First $MaxIssues).number + + Write-Host " Found $($recent.Count) recent issues, selected top $($IssueNumbers.Count) for analysis" -ForegroundColor Green +} + +Write-Host "" +Write-Host "Issues to analyze: $($IssueNumbers -join ', ')" -ForegroundColor Cyan +Write-Host "Timeout: ${TimeoutMinutes}m | Retries: $MaxRetryCount" -ForegroundColor Gray +Write-Host "" + +# Results tracking +$results = @{} +$startTime = Get-Date +$totalIssues = $IssueNumbers.Count +$current = 0 + +foreach ($issueNum in $IssueNumbers) { + $current++ + $issueDir = Join-Path $issueCachePath $issueNum + if (-not (Test-Path $issueDir)) { + New-Item -ItemType Directory -Path $issueDir -Force | Out-Null + } + + $logFile = Join-Path $issueDir "analysis.log" + $errorFile = Join-Path $issueDir "error.log" + $statusFile = Join-Path $issueDir "status.json" + + Write-Host "" + Write-Host "[$current/$totalIssues] #$issueNum - Beginning analysis..." -ForegroundColor Yellow + + $success = $false + $lastError = $null + $retryCount = 0 + + for ($retry = 0; $retry -lt $MaxRetryCount -and -not $success; $retry++) { + $retryCount = $retry + 1 + + if ($retry -gt 0) { + Write-Host " [RETRY] Attempt $retryCount/$MaxRetryCount (waiting 10s)..." -ForegroundColor Yellow + Start-Sleep -Seconds 10 + } + + try { + # Build the prompt - use the review-issue prompt directly + $prompt = @" +Analyze GitHub issue #$issueNum using the methodology from $_cfgDir/prompts/review-issue.prompt.md + +First, fetch the issue data: +gh issue view $issueNum --json number,title,body,author,createdAt,updatedAt,state,labels,milestone,reactions,comments,linkedPullRequests + +Then produce a concise JSON summary with this structure (output ONLY the JSON): +{ + "issueNumber": $issueNum, + "title": "issue title", + "category": "trending|needs-label|ready-for-fix|needs-info|needs-clarification|closeable|stale-waiting|duplicate-candidate|review-needed", + "categoryReason": "brief explanation", + "priorityScore": 0-100, + "clarityScore": 0-100, + "feasibilityScore": 0-100, + "suggestedAction": "what human should do", + "suggestedLabels": ["label1", "label2"], + "missingInfo": ["item1", "item2"], + "draftReply": "if needs-info or needs-clarification, draft the reply" +} +"@ + + # Build Copilot CLI arguments + $copilotArgs = @('-p', $prompt, '--yolo', '--agent', 'ReviewIssue') + if ($Model) { + $copilotArgs += @('--model', $Model) + } + + Write-Host " Running copilot CLI..." -ForegroundColor Gray + + # Run copilot directly (not in job) + $output = & copilot @copilotArgs 2>&1 + $outputStr = $output | Out-String + + # Save the output + $outputStr | Out-File -FilePath $logFile -Force + + # Check for valid output + if ($outputStr.Length -gt 200) { + $success = $true + Write-Host " [SUCCESS] Analysis complete ($($outputStr.Length) chars)" -ForegroundColor Green + } + else { + $lastError = "Output too short ($($outputStr.Length) chars)" + Write-Host " [WARN] $lastError" -ForegroundColor Yellow + } + } + catch { + $lastError = $_.Exception.Message + Write-Host " [ERROR] $lastError" -ForegroundColor Red + } + } + + # Save status + $status = @{ + issueNumber = $issueNum + success = $success + attempts = $retryCount + lastError = $lastError + analyzedAt = (Get-Date).ToUniversalTime().ToString("o") + } + $status | ConvertTo-Json | Out-File -FilePath $statusFile -Force + $results[$issueNum] = $status + + if (-not $success) { + $lastError | Out-File -FilePath $errorFile -Force + Write-Host " [FAILED] All $MaxRetryCount attempts failed: $lastError" -ForegroundColor Red + } +} + +$elapsed = (Get-Date) - $startTime + +Write-Host "" +Write-Host "═══════════════════════════════════════════════════════════════" -ForegroundColor Cyan +Write-Host " Analysis Complete" -ForegroundColor Cyan +Write-Host "═══════════════════════════════════════════════════════════════" -ForegroundColor Cyan +Write-Host "" +Write-Host "Duration: $([math]::Round($elapsed.TotalMinutes, 1)) minutes" -ForegroundColor Gray +Write-Host "Total issues: $($IssueNumbers.Count)" -ForegroundColor Gray + +$successCount = ($results.Values | Where-Object { $_.success }).Count +$failCount = ($results.Values | Where-Object { -not $_.success }).Count + +Write-Host "Successful: $successCount" -ForegroundColor Green +Write-Host "Failed: $failCount" -ForegroundColor $(if ($failCount -gt 0) { 'Red' } else { 'Gray' }) + +if ($failCount -gt 0) { + Write-Host "" + Write-Host "Failed issues:" -ForegroundColor Red + $results.Values | Where-Object { -not $_.success } | ForEach-Object { + Write-Host " #$($_.issueNumber): $($_.lastError)" -ForegroundColor Red + } +} + +Write-Host "" +Write-Host "Results saved to: $issueCachePath" -ForegroundColor Cyan diff --git a/.github/skills/continuous-issue-triage/scripts/categorize-issues.ps1 b/.github/skills/continuous-issue-triage/scripts/categorize-issues.ps1 new file mode 100644 index 000000000000..f7ba130033a9 --- /dev/null +++ b/.github/skills/continuous-issue-triage/scripts/categorize-issues.ps1 @@ -0,0 +1,376 @@ +<# +.SYNOPSIS + Categorizes collected issues into actionable buckets. + +.DESCRIPTION + Applies categorization rules to issues collected by collect-active-issues.ps1. + Outputs categorized results with priority scores and suggested actions. + +.PARAMETER InputPath + Path to collected issues JSON. Default: Generated Files/triage-issues/current-run/collected-issues.json + +.PARAMETER StatePath + Path to triage state JSON. Default: Generated Files/triage-issues/triage-state.json + +.PARAMETER OutputPath + Path to save categorized results. Default: Generated Files/triage-issues/current-run/categorized-issues.json + +.PARAMETER TrendingThreshold + Minimum new comments to flag as trending. Default: 5 + +.EXAMPLE + .\categorize-issues.ps1 + +.EXAMPLE + .\categorize-issues.ps1 -TrendingThreshold 10 +#> + +param( + [Parameter()] + [string]$InputPath = "Generated Files/triage-issues/current-run/collected-issues.json", + + [Parameter()] + [string]$StatePath = "Generated Files/triage-issues/triage-state.json", + + [Parameter()] + [string]$OutputPath = "Generated Files/triage-issues/current-run/categorized-issues.json", + + [Parameter()] + [int]$TrendingThreshold = 5 +) + +$ErrorActionPreference = "Stop" + +# Product keyword mapping +$ProductKeywords = @{ + "Product-FancyZones" = @("fancy zones", "fancyzones", "zone", "snap", "layout", "window arrangement", "virtual desktop") + "Product-PowerToys Run" = @("run", "launcher", "alt+space", "alt space", "search", "plugin", "powertoys run") + "Product-Color Picker" = @("color picker", "colorpicker", "eyedropper", "hex", "rgb", "color code") + "Product-Keyboard Manager" = @("keyboard", "remap", "shortcut", "key mapping", "keyboard manager") + "Product-Mouse Utils" = @("mouse", "crosshairs", "find my mouse", "highlighter", "pointer", "mouse without borders") + "Product-File Explorer" = @("file explorer", "preview", "thumbnail", "markdown preview", "svg preview", "preview pane") + "Product-Image Resizer" = @("image resizer", "resize image", "bulk resize", "resize pictures") + "Product-PowerRename" = @("rename", "power rename", "powerrename", "bulk rename", "regex rename") + "Product-Awake" = @("awake", "keep awake", "prevent sleep", "caffeinate", "stay awake") + "Product-Shortcut Guide" = @("shortcut guide", "win key", "windows key guide") + "Product-Text Extractor" = @("text extractor", "ocr", "screen text", "copy text from screen") + "Product-Hosts File Editor" = @("hosts", "hosts file", "dns mapping") + "Product-Peek" = @("peek", "quick preview", "spacebar preview", "file peek") + "Product-Crop And Lock" = @("crop", "crop and lock", "window crop", "cropped window") + "Product-Paste As Plain Text" = @("paste", "plain text", "paste as plain") + "Product-Registry Preview" = @("registry", "reg file", "registry preview") + "Product-Environment Variables" = @("environment", "env variable", "path variable", "system variable") + "Product-Command Not Found" = @("command not found", "winget suggest", "command suggestion") + "Product-New+" = @("new\+", "newplus", "file template", "new file") + "Product-Advanced Paste" = @("advanced paste", "ai paste", "clipboard ai", "smart paste") + "Product-Workspaces" = @("workspaces", "workspace launcher", "project layout") + "Product-Cmd Palette" = @("command palette", "cmd palette", "quick command") + "Product-ZoomIt" = @("zoomit", "zoom it", "screen zoom", "presentation zoom") +} + +# Load collected issues +if (-not (Test-Path $InputPath)) { + Write-Error "Input file not found: $InputPath. Run collect-active-issues.ps1 first." + exit 1 +} + +$collected = Get-Content $InputPath | ConvertFrom-Json + +# Load previous state +$previousState = $null +if (Test-Path $StatePath) { + $previousState = Get-Content $StatePath | ConvertFrom-Json +} + +function Get-IssueDetails { + param([int]$IssueNumber) + + $json = gh issue view $IssueNumber ` + --json number,title,body,author,createdAt,updatedAt,state,labels,milestone,reactions,comments,linkedPullRequests 2>$null + + if (-not $json) { return $null } + + $issue = $json | ConvertFrom-Json + + return @{ + number = $issue.number + title = $issue.title + body = $issue.body + author = $issue.author.login + state = $issue.state + createdAt = $issue.createdAt + updatedAt = $issue.updatedAt + labels = @($issue.labels | ForEach-Object { $_.name }) + milestone = $issue.milestone.title + reactions = @{ + thumbsUp = ($issue.reactions | Where-Object { $_.content -eq "THUMBS_UP" }).Count + thumbsDown = ($issue.reactions | Where-Object { $_.content -eq "THUMBS_DOWN" }).Count + heart = ($issue.reactions | Where-Object { $_.content -eq "HEART" }).Count + } + commentCount = $issue.comments.Count + comments = @($issue.comments | ForEach-Object { + @{ + author = $_.author.login + createdAt = $_.createdAt + body = $_.body + } + }) + linkedPRs = @($issue.linkedPullRequests | ForEach-Object { + @{ + number = $_.number + state = $_.state + mergedAt = $_.mergedAt + } + }) + } +} + +function Get-LabelSuggestion { + param([hashtable]$Issue) + + $titleLower = $Issue.title.ToLower() + $bodyLower = if ($Issue.body) { $Issue.body.ToLower() } else { "" } + $combined = "$titleLower $bodyLower" + + $matches = @() + foreach ($product in $ProductKeywords.Keys) { + $keywords = $ProductKeywords[$product] + $matchCount = ($keywords | Where-Object { $combined -match $_ }).Count + if ($matchCount -gt 0) { + $matches += @{ + label = $product + matchCount = $matchCount + confidence = [Math]::Min(100, $matchCount * 25 + 25) + } + } + } + + $best = $matches | Sort-Object confidence -Descending | Select-Object -First 1 + + if ($best -and $best.confidence -ge 50) { + return @{ + labels = @($best.label) + confidence = $best.confidence + reason = "Matched $($best.matchCount) keywords" + } + } + + return @{ labels = @(); confidence = 0; reason = "No confident match" } +} + +function Get-PriorityScore { + param([hashtable]$Issue) + + $score = 50 + + # Reactions + $score += [Math]::Min(20, $Issue.reactions.thumbsUp * 2) + + # Comments + $score += [Math]::Min(15, $Issue.commentCount) + + # Recency + $daysSinceUpdate = ((Get-Date) - [datetime]$Issue.updatedAt).Days + if ($daysSinceUpdate -le 7) { $score += 10 } + elseif ($daysSinceUpdate -le 30) { $score += 5 } + + # Labels + if ($Issue.labels -contains "Priority-High") { $score += 15 } + if ($Issue.labels -match "Regression") { $score += 20 } + if ($Issue.labels -match "Security") { $score += 25 } + + return [Math]::Min(100, $score) +} + +# Process each issue +$categorized = @{} +$issueCount = $collected.issues.Count +$current = 0 + +Write-Host "Categorizing $issueCount issues..." +Write-Host "" + +foreach ($collectedIssue in $collected.issues) { + $current++ + $issueNum = $collectedIssue.number + + Write-Host "[$current/$issueCount] Processing #$issueNum..." + + # Get full issue details + $issue = Get-IssueDetails -IssueNumber $issueNum + if (-not $issue) { + Write-Host " Warning: Could not fetch issue #$issueNum" + continue + } + + # Get previous snapshot + $previousSnapshot = $null + if ($previousState -and $previousState.issueSnapshots.$issueNum) { + $previousSnapshot = $previousState.issueSnapshots.$issueNum + } + + # Calculate new comments + $previousCommentCount = if ($previousSnapshot) { $previousSnapshot.commentCount } else { 0 } + $newComments = $issue.commentCount - $previousCommentCount + + # Categorize (priority order - first match wins) + $category = $null + $categoryReason = $null + $suggestedAction = $null + $additionalData = @{} + + # 1. Trending + if ($newComments -ge $TrendingThreshold) { + $category = "trending" + $categoryReason = "$newComments new comments since last run" + $suggestedAction = "Review conversation urgently" + } + + # 2. Closeable (check for merged PRs) + if (-not $category) { + $mergedPRs = $issue.linkedPRs | Where-Object { $_.state -eq "MERGED" } + if ($mergedPRs.Count -gt 0 -and $issue.state -eq "OPEN") { + $category = "closeable" + $categoryReason = "Has merged PR(s): #" + ($mergedPRs.number -join ", #") + $suggestedAction = "Close with thank you message" + $additionalData.mergedPRs = $mergedPRs.number + } + } + + # 3. Needs-Label + if (-not $category) { + $productLabels = $issue.labels | Where-Object { $_ -like "Product-*" } + $areaLabels = $issue.labels | Where-Object { $_ -like "Area-*" } + + if ($productLabels.Count -eq 0 -and $areaLabels.Count -eq 0) { + $suggestion = Get-LabelSuggestion -Issue $issue + $category = "needs-label" + $categoryReason = "Missing Product/Area label" + $suggestedAction = "Apply label: $($suggestion.labels -join ', ')" + $additionalData.suggestedLabels = $suggestion.labels + $additionalData.labelConfidence = $suggestion.confidence + } + } + + # 4. Stale-Waiting + if (-not $category) { + if ($issue.labels -contains "Needs-Author-Feedback") { + $lastAuthorComment = $issue.comments | + Where-Object { $_.author -eq $issue.author } | + Sort-Object createdAt -Descending | + Select-Object -First 1 + + if ($lastAuthorComment) { + $daysSince = ((Get-Date) - [datetime]$lastAuthorComment.createdAt).Days + if ($daysSince -gt 14) { + $category = "stale-waiting" + $categoryReason = "Waiting on author for $daysSince days" + $suggestedAction = "Ping or close" + $additionalData.daysWaiting = $daysSince + } + } + } + } + + # 5. Needs-Clarification (question, not bug) + if (-not $category) { + $isQuestion = $false + $titleAndBody = "$($issue.title) $($issue.body)" + + if ($titleAndBody -match '\?$' -or + $titleAndBody -match '(?i)(how (do|can|to)|why (does|is)|is (it|there) possible)' -or + $issue.labels -contains "Issue-Question") { + $isQuestion = $true + } + + if ($isQuestion -and ($issue.labels -notcontains "Issue-Bug")) { + $category = "needs-clarification" + $categoryReason = "Appears to be a question/inquiry" + $suggestedAction = "Draft explanation reply" + } + } + + # 6. Needs-Info + if (-not $category) { + $missingItems = @() + $body = $issue.body + + if ($body -and $body.Length -gt 0) { + if ($body -notmatch '(?i)(steps to reproduce|repro|how to reproduce)') { + $missingItems += "repro steps" + } + if ($body -notmatch '(?i)(expected|should|supposed to)') { + $missingItems += "expected behavior" + } + if ($body -notmatch '(?i)(version|v\d+\.\d+)') { + $missingItems += "PowerToys version" + } + } else { + $missingItems += "description" + } + + if ($missingItems.Count -gt 0) { + $category = "needs-info" + $categoryReason = "Missing: " + ($missingItems -join ", ") + $suggestedAction = "Post clarifying questions" + $additionalData.missingItems = $missingItems + } + } + + # 7. Default: review-needed + if (-not $category) { + $category = "review-needed" + $categoryReason = "Needs human review for categorization" + $suggestedAction = "Manual triage" + } + + # Calculate priority score + $priorityScore = Get-PriorityScore -Issue $issue + + # Store result + $categorized[$issueNum] = @{ + number = $issue.number + title = $issue.title + state = $issue.state + labels = $issue.labels + category = $category + categoryReason = $categoryReason + priorityScore = $priorityScore + suggestedAction = $suggestedAction + newComments = $newComments + totalComments = $issue.commentCount + reactions = $issue.reactions + updatedAt = $issue.updatedAt + additionalData = $additionalData + } + + Write-Host " -> $category (priority: $priorityScore)" +} + +# Group by category for summary +$byCategory = $categorized.Values | Group-Object category + +Write-Host "" +Write-Host "=== Categorization Summary ===" +foreach ($group in $byCategory | Sort-Object Count -Descending) { + Write-Host " $($group.Name): $($group.Count) issues" +} + +# Save results +$output = @{ + categorizedAt = (Get-Date).ToUniversalTime().ToString("o") + totalCategorized = $categorized.Count + byCategory = @{} + issues = $categorized +} + +foreach ($group in $byCategory) { + $output.byCategory[$group.Name] = @{ + count = $group.Count + topIssues = @($group.Group | Sort-Object priorityScore -Descending | Select-Object -First 3 | ForEach-Object { $_.number }) + } +} + +$output | ConvertTo-Json -Depth 10 | Set-Content $OutputPath +Write-Host "" +Write-Host "Results saved to: $OutputPath" diff --git a/.github/skills/continuous-issue-triage/scripts/collect-active-issues.ps1 b/.github/skills/continuous-issue-triage/scripts/collect-active-issues.ps1 new file mode 100644 index 000000000000..ff380d76abed --- /dev/null +++ b/.github/skills/continuous-issue-triage/scripts/collect-active-issues.ps1 @@ -0,0 +1,188 @@ +<# +.SYNOPSIS + Collects GitHub issues with activity since the last triage run. + +.DESCRIPTION + Fetches open issues updated since the last run, closed issues with new comments, + and issues with pending follow-up actions. + +.PARAMETER Since + ISO 8601 datetime string. Collect issues updated after this time. + If not specified, reads from triage-state.json. + +.PARAMETER LookbackDays + For first run (no state), how many days to look back. Default: 7. + +.PARAMETER OutputPath + Path to save collected issues JSON. Default: Generated Files/triage-issues/current-run/collected-issues.json + +.PARAMETER Limit + Maximum issues to collect per query. Default: 500. + +.EXAMPLE + .\collect-active-issues.ps1 + +.EXAMPLE + .\collect-active-issues.ps1 -Since "2026-01-29T00:00:00Z" -Limit 100 +#> + +param( + [Parameter()] + [string]$Since, + + [Parameter()] + [int]$LookbackDays = 7, + + [Parameter()] + [string]$OutputPath = "Generated Files/triage-issues/current-run/collected-issues.json", + + [Parameter()] + [int]$Limit = 500 +) + +$ErrorActionPreference = "Stop" + +# Determine the "since" timestamp +if (-not $Since) { + $statePath = "Generated Files/triage-issues/triage-state.json" + if (Test-Path $statePath) { + $state = Get-Content $statePath | ConvertFrom-Json + if ($state.lastRun) { + $Since = $state.lastRun + Write-Host "Using last run timestamp: $Since" + } + } + + if (-not $Since) { + $Since = (Get-Date).AddDays(-$LookbackDays).ToUniversalTime().ToString("o") + Write-Host "First run - looking back $LookbackDays days to: $Since" + } +} + +$sinceDate = [datetime]$Since + +# Ensure output directory exists +$outputDir = Split-Path $OutputPath -Parent +if (-not (Test-Path $outputDir)) { + New-Item -ItemType Directory -Force -Path $outputDir | Out-Null +} + +$collectedIssues = @() + +# 1. Collect open issues updated since last run +Write-Host "Fetching open issues updated since $Since..." +$openIssues = gh issue list ` + --state open ` + --json number,title,updatedAt ` + --limit $Limit 2>$null | ConvertFrom-Json + +$filteredOpen = $openIssues | Where-Object { + [datetime]$_.updatedAt -gt $sinceDate +} +Write-Host " Found $($filteredOpen.Count) open issues with recent activity" + +foreach ($issue in $filteredOpen) { + $collectedIssues += @{ + number = $issue.number + title = $issue.title + source = "open-updated" + updatedAt = $issue.updatedAt + } +} + +# 2. Collect closed issues with recent activity (within tracking window) +Write-Host "Fetching closed issues with recent comments..." +$trackingDays = 30 +$trackingCutoff = (Get-Date).AddDays(-$trackingDays) + +$closedIssues = gh issue list ` + --state closed ` + --json number,title,updatedAt,closedAt ` + --limit 200 2>$null | ConvertFrom-Json + +$activeClosedIssues = $closedIssues | Where-Object { + $closedAt = [datetime]$_.closedAt + $updatedAt = [datetime]$_.updatedAt + # Closed within tracking window AND updated after being closed + ($closedAt -gt $trackingCutoff) -and ($updatedAt -gt $closedAt) +} +Write-Host " Found $($activeClosedIssues.Count) closed issues with post-close activity" + +foreach ($issue in $activeClosedIssues) { + $collectedIssues += @{ + number = $issue.number + title = $issue.title + source = "closed-with-activity" + updatedAt = $issue.updatedAt + closedAt = $issue.closedAt + } +} + +# 3. Check pending follow-ups from state +if (Test-Path $statePath) { + $state = Get-Content $statePath | ConvertFrom-Json + + if ($state.pendingFollowUps) { + Write-Host "Checking $($state.pendingFollowUps.Count) pending follow-ups..." + foreach ($pending in $state.pendingFollowUps) { + if ($pending.status -eq "pending") { + if ($collectedIssues.number -notcontains $pending.issueNumber) { + $collectedIssues += @{ + number = $pending.issueNumber + source = "pending-followup" + action = $pending.action + } + } + } + } + } + + # Check unhandled issues from previous run + if ($state.issueSnapshots) { + $unhandled = $state.issueSnapshots.PSObject.Properties | Where-Object { + $snapshot = $_.Value + $snapshot.pendingAction -and -not $snapshot.actionTaken + } + + if ($unhandled) { + Write-Host "Found $($unhandled.Count) unhandled issues from previous run" + foreach ($prop in $unhandled) { + $snapshot = $prop.Value + if ($collectedIssues.number -notcontains $snapshot.number) { + $collectedIssues += @{ + number = $snapshot.number + title = $snapshot.title + source = "unhandled-previous" + previousCategory = $snapshot.category + } + } + } + } + } +} + +# Deduplicate by issue number +$uniqueIssues = $collectedIssues | Group-Object number | ForEach-Object { + $_.Group | Select-Object -First 1 +} + +# Summary +Write-Host "" +Write-Host "=== Collection Summary ===" +Write-Host "Total unique issues: $($uniqueIssues.Count)" +Write-Host " - Open with activity: $(($uniqueIssues | Where-Object { $_.source -eq 'open-updated' }).Count)" +Write-Host " - Closed with activity: $(($uniqueIssues | Where-Object { $_.source -eq 'closed-with-activity' }).Count)" +Write-Host " - Pending follow-ups: $(($uniqueIssues | Where-Object { $_.source -eq 'pending-followup' }).Count)" +Write-Host " - Unhandled previous: $(($uniqueIssues | Where-Object { $_.source -eq 'unhandled-previous' }).Count)" + +# Save results +$output = @{ + collectedAt = (Get-Date).ToUniversalTime().ToString("o") + since = $Since + totalCount = $uniqueIssues.Count + issues = $uniqueIssues +} + +$output | ConvertTo-Json -Depth 10 | Set-Content $OutputPath +Write-Host "" +Write-Host "Results saved to: $OutputPath" diff --git a/.github/skills/continuous-issue-triage/scripts/generate-summary.ps1 b/.github/skills/continuous-issue-triage/scripts/generate-summary.ps1 new file mode 100644 index 000000000000..5c59efefc0b5 --- /dev/null +++ b/.github/skills/continuous-issue-triage/scripts/generate-summary.ps1 @@ -0,0 +1,210 @@ +<# +.SYNOPSIS + Generates executive summary and category reports from categorized issues. + +.DESCRIPTION + Creates markdown reports for each category and an executive summary + for the current triage run. + +.PARAMETER InputPath + Path to categorized issues JSON. Default: Generated Files/triage-issues/current-run/categorized-issues.json + +.PARAMETER OutputPath + Directory for generated reports. Default: Generated Files/triage-issues/current-run + +.PARAMETER RepoUrl + GitHub repository URL for issue links. Default: https://github.com/microsoft/PowerToys/issues + +.EXAMPLE + .\generate-summary.ps1 +#> + +param( + [Parameter()] + [string]$InputPath = "Generated Files/triage-issues/current-run/categorized-issues.json", + + [Parameter()] + [string]$OutputPath = "Generated Files/triage-issues/current-run", + + [Parameter()] + [string]$RepoUrl = "https://github.com/microsoft/PowerToys/issues" +) + +$ErrorActionPreference = "Stop" + +# Category display info +$CategoryInfo = @{ + "trending" = @{ emoji = "🔥"; name = "Trending"; priority = 1 } + "needs-label" = @{ emoji = "🏷️"; name = "Needs-Label"; priority = 2 } + "ready-for-fix" = @{ emoji = "✅"; name = "Ready-for-Fix"; priority = 3 } + "needs-info" = @{ emoji = "❓"; name = "Needs-Info"; priority = 4 } + "needs-clarification" = @{ emoji = "💬"; name = "Needs-Clarification"; priority = 5 } + "closeable" = @{ emoji = "✔️"; name = "Closeable"; priority = 6 } + "stale-waiting" = @{ emoji = "⏳"; name = "Stale-Waiting"; priority = 7 } + "duplicate-candidate" = @{ emoji = "🔁"; name = "Duplicate-Candidate"; priority = 8 } + "review-needed" = @{ emoji = "👀"; name = "Review-Needed"; priority = 9 } +} + +# Load categorized issues +if (-not (Test-Path $InputPath)) { + Write-Error "Input file not found: $InputPath. Run categorize-issues.ps1 first." + exit 1 +} + +$data = Get-Content $InputPath | ConvertFrom-Json -AsHashtable + +# Ensure output directories +New-Item -ItemType Directory -Force -Path $OutputPath | Out-Null +New-Item -ItemType Directory -Force -Path "$OutputPath/draft-replies" | Out-Null + +# Group issues by category +$byCategory = @{} +foreach ($issueNum in $data.issues.Keys) { + $issue = $data.issues[$issueNum] + $cat = $issue.category + if (-not $byCategory[$cat]) { + $byCategory[$cat] = @() + } + $byCategory[$cat] += $issue +} + +# Sort each category by priority +foreach ($cat in $byCategory.Keys) { + $byCategory[$cat] = $byCategory[$cat] | Sort-Object priorityScore -Descending +} + +# Generate Executive Summary +$summaryLines = @() +$summaryLines += "# Issue Triage Summary - $(Get-Date -Format 'yyyy-MM-dd')" +$summaryLines += "" +$summaryLines += "**Run Time**: $(Get-Date -Format 'HH:mm UTC') | **Issues Analyzed**: $($data.totalCategorized)" +$summaryLines += "" +$summaryLines += "## ⚡ Action Required by Category" +$summaryLines += "" +$summaryLines += "| Category | Count | Top Priority | Suggested Action |" +$summaryLines += "|----------|-------|--------------|------------------|" + +foreach ($catId in $CategoryInfo.Keys | Sort-Object { $CategoryInfo[$_].priority }) { + $info = $CategoryInfo[$catId] + $issues = $byCategory[$catId] + + if ($issues -and $issues.Count -gt 0) { + $top = $issues[0] + $topLink = "[#$($top.number)]($RepoUrl/$($top.number))" + $topInfo = $top.categoryReason + if ($topInfo.Length -gt 40) { $topInfo = $topInfo.Substring(0, 37) + "..." } + + $summaryLines += "| $($info.emoji) $($info.name) | $($issues.Count) | $topLink | $topInfo |" + } +} + +$summaryLines += "" +$summaryLines += "## 🎯 Top 10 Priority Actions" +$summaryLines += "" + +# Get top 10 across all categories +$allIssues = @() +foreach ($cat in $byCategory.Keys) { + $allIssues += $byCategory[$cat] +} +$topIssues = $allIssues | Sort-Object priorityScore -Descending | Select-Object -First 10 + +$priority = 1 +foreach ($issue in $topIssues) { + $info = $CategoryInfo[$issue.category] + $urgency = if ($issue.priorityScore -ge 80) { "**[Urgent]**" } + elseif ($issue.priorityScore -ge 60) { "**[High]**" } + elseif ($issue.priorityScore -ge 40) { "[Medium]" } + else { "[Low]" } + + $summaryLines += "$priority. $urgency $($info.emoji) [#$($issue.number)]($RepoUrl/$($issue.number)) - $($issue.categoryReason)" + $priority++ +} + +$summaryLines += "" +$summaryLines += "## 📁 Detailed Reports" +$summaryLines += "" + +foreach ($catId in $CategoryInfo.Keys | Sort-Object { $CategoryInfo[$_].priority }) { + $info = $CategoryInfo[$catId] + $issues = $byCategory[$catId] + + if ($issues -and $issues.Count -gt 0) { + $summaryLines += "- [$($info.emoji) $($info.name)](./$catId.md) ($($issues.Count) issues)" + } +} + +$summaryLines += "" +$summaryLines += "---" +$summaryLines += "*Generated by continuous-issue-triage skill*" + +$summaryLines -join "`n" | Set-Content "$OutputPath/summary.md" +Write-Host "Generated: summary.md" + +# Generate individual category reports +foreach ($catId in $byCategory.Keys) { + $info = $CategoryInfo[$catId] + $issues = $byCategory[$catId] + + if (-not $issues -or $issues.Count -eq 0) { continue } + + $reportLines = @() + $reportLines += "# $($info.emoji) $($info.name) Issues" + $reportLines += "" + $reportLines += "**Total**: $($issues.Count) issues" + $reportLines += "" + $reportLines += "## Overview" + $reportLines += "" + $reportLines += "| # | Issue | Priority | Reason | Labels |" + $reportLines += "|---|-------|----------|--------|--------|" + + foreach ($issue in $issues) { + $labelStr = ($issue.labels | Select-Object -First 3) -join ", " + if ($issue.labels.Count -gt 3) { $labelStr += "..." } + $reason = $issue.categoryReason + if ($reason.Length -gt 50) { $reason = $reason.Substring(0, 47) + "..." } + + $reportLines += "| [#$($issue.number)]($RepoUrl/$($issue.number)) | $($issue.title.Substring(0, [Math]::Min(50, $issue.title.Length))) | $($issue.priorityScore)/100 | $reason | $labelStr |" + } + + $reportLines += "" + $reportLines += "## Detailed Breakdown" + $reportLines += "" + + foreach ($issue in $issues) { + $reportLines += "### [#$($issue.number)]($RepoUrl/$($issue.number)): $($issue.title)" + $reportLines += "" + $reportLines += "- **Priority Score**: $($issue.priorityScore)/100" + $reportLines += "- **Category Reason**: $($issue.categoryReason)" + $reportLines += "- **Suggested Action**: $($issue.suggestedAction)" + $reportLines += "- **Reactions**: 👍 $($issue.reactions.thumbsUp) | ❤️ $($issue.reactions.heart)" + $reportLines += "- **Comments**: $($issue.totalComments) total ($($issue.newComments) new)" + $reportLines += "- **Labels**: $($issue.labels -join ', ')" + + if ($issue.additionalData) { + if ($issue.additionalData.suggestedLabels) { + $reportLines += "- **Suggested Labels**: $($issue.additionalData.suggestedLabels -join ', ') (confidence: $($issue.additionalData.labelConfidence)%)" + } + if ($issue.additionalData.missingItems) { + $reportLines += "- **Missing Info**: $($issue.additionalData.missingItems -join ', ')" + } + if ($issue.additionalData.mergedPRs) { + $reportLines += "- **Merged PRs**: #$($issue.additionalData.mergedPRs -join ', #')" + } + if ($issue.additionalData.daysWaiting) { + $reportLines += "- **Days Waiting**: $($issue.additionalData.daysWaiting)" + } + } + + $reportLines += "" + $reportLines += "---" + $reportLines += "" + } + + $reportLines -join "`n" | Set-Content "$OutputPath/$catId.md" + Write-Host "Generated: $catId.md" +} + +Write-Host "" +Write-Host "All reports generated in: $OutputPath" +Write-Host "Start with: summary.md" diff --git a/.github/skills/continuous-issue-triage/scripts/run-triage.ps1 b/.github/skills/continuous-issue-triage/scripts/run-triage.ps1 new file mode 100644 index 000000000000..0acae9f63ded --- /dev/null +++ b/.github/skills/continuous-issue-triage/scripts/run-triage.ps1 @@ -0,0 +1,692 @@ +<# +.SYNOPSIS + Runs continuous issue triage using GitHub Copilot CLI with parallel processing. + +.DESCRIPTION + Orchestrates the full triage workflow: + 1. Collects active issues + 2. Analyzes issues in parallel using Copilot CLI + 3. Categorizes results + 4. Generates reports + 5. Updates state for delta tracking + +.PARAMETER RunType + Type of triage run: daily, twice-weekly, weekly. Default: weekly + +.PARAMETER MaxParallel + Maximum parallel Copilot CLI invocations. Default: 5 + +.PARAMETER TimeoutMinutes + Timeout for each Copilot analysis. Default: 5 + +.PARAMETER MaxRetries + Maximum retries on timeout. Default: 3 + +.PARAMETER Model + Copilot model to use (optional). + +.PARAMETER McpConfig + Path to MCP config file (optional). + +.PARAMETER LookbackDays + For first run, days to look back. Default: 7 + +.PARAMETER Force + Force re-analysis of all issues, ignoring cache. + +.EXAMPLE + .\run-triage.ps1 + +.EXAMPLE + .\run-triage.ps1 -RunType daily -MaxParallel 10 -Model "claude-sonnet-4" +#> + +[CmdletBinding()] +param( + [Parameter()] + [ValidateSet("daily", "twice-weekly", "weekly")] + [string]$RunType = "weekly", + + [Parameter()] + [int]$MaxParallel = 5, + + [Parameter()] + [int]$TimeoutMinutes = 5, + + [Parameter()] + [int]$MaxRetries = 3, + + [Parameter()] + [string]$Model, + + [Parameter()] + [string]$McpConfig, + + [Parameter()] + [int]$LookbackDays = 7, + + [Parameter()] + [switch]$Force +) + +$ErrorActionPreference = "Stop" +$repoRoot = git rev-parse --show-toplevel 2>$null + +# Resolve config directory name (.github or .claude) from script location +$_cfgDir = if ($PSScriptRoot -match '[\\/](\.github|\.claude)[\\/]') { $Matches[1] } else { '.github' } +if (-not $repoRoot) { + $repoRoot = (Get-Location).Path +} + +# Paths +$triageRoot = Join-Path $repoRoot "Generated Files/triage-issues" +$currentRunPath = Join-Path $triageRoot "current-run" +$statePath = Join-Path $triageRoot "triage-state.json" +$issueCachePath = Join-Path $triageRoot "issue-cache" +$historyPath = Join-Path $triageRoot "history" + +# Ensure directories exist +@($triageRoot, $currentRunPath, $issueCachePath, $historyPath) | ForEach-Object { + if (-not (Test-Path $_)) { + New-Item -ItemType Directory -Path $_ -Force | Out-Null + } +} + +Write-Host "═══════════════════════════════════════════════════════════════" -ForegroundColor Cyan +Write-Host " PowerToys Issue Triage - $RunType run" -ForegroundColor Cyan +Write-Host " Started: $(Get-Date -Format 'yyyy-MM-dd HH:mm:ss')" -ForegroundColor Cyan +Write-Host "═══════════════════════════════════════════════════════════════" -ForegroundColor Cyan +Write-Host "" + +#region State Management +Write-Host "[1/6] Loading previous state..." -ForegroundColor Yellow + +$state = $null +if (Test-Path $statePath) { + $state = Get-Content $statePath -Raw | ConvertFrom-Json -AsHashtable + Write-Host " ✓ Loaded state from: $($state.lastRun)" -ForegroundColor Green + Write-Host " Previous run type: $($state.lastRunType)" -ForegroundColor Gray + Write-Host " Known issues: $($state.issueSnapshots.Count)" -ForegroundColor Gray +} else { + Write-Host " First run - initializing fresh state" -ForegroundColor Yellow + $state = @{ + version = "1.0" + lastRun = $null + lastRunType = $null + issueSnapshots = @{} + pendingFollowUps = @() + closedWithActivity = @() + analysisResults = @{} + statistics = @{ + totalRunCount = 0 + issuesAnalyzed = 0 + repliesPosted = 0 + issuesClosed = 0 + } + } +} +#endregion + +#region Issue Collection +Write-Host "" +Write-Host "[2/6] Collecting active issues..." -ForegroundColor Yellow + +$since = if ($state.lastRun) { $state.lastRun } else { (Get-Date).AddDays(-$LookbackDays).ToUniversalTime().ToString("o") } +Write-Host " Looking for issues updated since: $since" -ForegroundColor Gray + +# Collect open issues with recent activity +$openIssuesJson = gh issue list --state open --json number,title,updatedAt,labels --limit 500 2>$null +$openIssues = $openIssuesJson | ConvertFrom-Json | Where-Object { + [datetime]$_.updatedAt -gt [datetime]$since +} + +# Collect closed issues with post-close activity (within 30 days) +$closedIssuesJson = gh issue list --state closed --json number,title,updatedAt,closedAt --limit 200 2>$null +$closedIssues = $closedIssuesJson | ConvertFrom-Json | Where-Object { + $closedAt = [datetime]$_.closedAt + $updatedAt = [datetime]$_.updatedAt + $cutoff = (Get-Date).AddDays(-30) + ($closedAt -gt $cutoff) -and ($updatedAt -gt $closedAt) +} + +# Combine and dedupe +$allIssues = @() +$allIssues += $openIssues | ForEach-Object { @{ number = $_.number; title = $_.title; state = "open"; updatedAt = $_.updatedAt } } +$allIssues += $closedIssues | ForEach-Object { @{ number = $_.number; title = $_.title; state = "closed"; updatedAt = $_.updatedAt } } + +# Add pending follow-ups from previous run +if ($state.pendingFollowUps) { + foreach ($pending in $state.pendingFollowUps) { + if ($pending.status -eq "pending" -and ($allIssues.number -notcontains $pending.issueNumber)) { + $allIssues += @{ number = $pending.issueNumber; title = "pending-followup"; state = "unknown" } + } + } +} + +$uniqueIssues = $allIssues | Group-Object number | ForEach-Object { $_.Group | Select-Object -First 1 } + +Write-Host " ✓ Found $($uniqueIssues.Count) issues to analyze" -ForegroundColor Green +Write-Host " - Open with activity: $(($uniqueIssues | Where-Object { $_.state -eq 'open' }).Count)" -ForegroundColor Gray +Write-Host " - Closed with activity: $(($uniqueIssues | Where-Object { $_.state -eq 'closed' }).Count)" -ForegroundColor Gray +#endregion + +#region Filter for Analysis +Write-Host "" +Write-Host "[3/6] Filtering issues for analysis..." -ForegroundColor Yellow + +$issuesToAnalyze = @() +foreach ($issue in $uniqueIssues) { + $issueNum = $issue.number + $cached = $state.analysisResults[$issueNum.ToString()] + + $needsAnalysis = $false + $reason = "" + + if ($Force) { + $needsAnalysis = $true + $reason = "forced" + } + elseif (-not $cached) { + $needsAnalysis = $true + $reason = "new" + } + elseif ($cached.analyzedAt) { + $daysSinceAnalysis = ((Get-Date) - [datetime]$cached.analyzedAt).Days + if ($daysSinceAnalysis -gt 7) { + $needsAnalysis = $true + $reason = "stale-cache" + } + elseif ($cached.commentCountAtAnalysis -and $state.issueSnapshots[$issueNum.ToString()]) { + $previousCount = $state.issueSnapshots[$issueNum.ToString()].commentCount + if ($cached.commentCountAtAnalysis -lt $previousCount) { + $needsAnalysis = $true + $reason = "new-comments" + } + } + } + + if ($needsAnalysis) { + $issuesToAnalyze += @{ + number = $issueNum + title = $issue.title + state = $issue.state + reason = $reason + } + } +} + +Write-Host " ✓ $($issuesToAnalyze.Count) issues need analysis" -ForegroundColor Green +Write-Host " ✓ $($uniqueIssues.Count - $issuesToAnalyze.Count) issues using cached results" -ForegroundColor Gray +#endregion + +#region Parallel Copilot Analysis +Write-Host "" +Write-Host "[4/6] Running parallel Copilot analysis..." -ForegroundColor Yellow +Write-Host " Max parallel: $MaxParallel | Timeout: ${TimeoutMinutes}m | Max retries: $MaxRetries" -ForegroundColor Gray +Write-Host "" + +# Prepare the prompt template +$promptTemplate = @" +Analyze GitHub issue #ISSUE_NUMBER for PowerToys triage. + +Use the review-issue prompt methodology from $_cfgDir/prompts/review-issue.prompt.md. + +Output a JSON summary to stdout with this structure: +{ + "issueNumber": ISSUE_NUMBER, + "category": "trending|needs-label|ready-for-fix|needs-info|needs-clarification|closeable|stale-waiting|duplicate-candidate|review-needed", + "categoryReason": "brief explanation", + "priorityScore": 0-100, + "suggestedAction": "what human should do", + "suggestedLabels": ["label1", "label2"], + "labelConfidence": 0-100, + "missingInfo": ["item1", "item2"], + "similarIssues": [12345, 12346], + "potentialAssignees": ["@user1", "@user2"], + "draftReply": "if needs-info or needs-clarification, draft the reply message here", + "clarityScore": 0-100, + "feasibilityScore": 0-100, + "newCommentsSummary": "brief summary of recent discussion if trending" +} + +Focus on actionable triage. Be concise. +"@ + +# Thread-safe collections for results +$analysisResults = [System.Collections.Concurrent.ConcurrentDictionary[string, object]]::new() +$analysisErrors = [System.Collections.Concurrent.ConcurrentBag[object]]::new() + +# Progress tracking +$totalIssues = $issuesToAnalyze.Count +$completedCount = [ref]0 +$startTime = Get-Date + +if ($totalIssues -gt 0) { + $issuesToAnalyze | ForEach-Object -ThrottleLimit $MaxParallel -Parallel { + $issue = $_ + $issueNum = $issue.number + $results = $using:analysisResults + $errors = $using:analysisErrors + $completed = $using:completedCount + $total = $using:totalIssues + $timeoutMin = $using:TimeoutMinutes + $maxRetry = $using:MaxRetries + $model = $using:Model + $mcpCfg = $using:McpConfig + $template = $using:promptTemplate + $root = $using:repoRoot + $cachePath = $using:issueCachePath + + $prompt = $template -replace 'ISSUE_NUMBER', $issueNum + $logDir = Join-Path $cachePath $issueNum + if (-not (Test-Path $logDir)) { + New-Item -ItemType Directory -Path $logDir -Force | Out-Null + } + + $success = $false + $lastError = $null + $output = $null + + for ($retry = 0; $retry -lt $maxRetry -and -not $success; $retry++) { + if ($retry -gt 0) { + Write-Host " ⟳ Retry $retry/$maxRetry for #$issueNum" -ForegroundColor Yellow + Start-Sleep -Seconds 10 + } + + try { + # Build Copilot CLI arguments + $copilotArgs = @() + if ($mcpCfg) { + $copilotArgs += @('--additional-mcp-config', $mcpCfg) + } + $copilotArgs += @('-p', $prompt, '--yolo', '--agent', 'ReviewIssue') + if ($model) { + $copilotArgs += @('--model', $model) + } + + # Run with timeout + $job = Start-Job -ScriptBlock { + param($args) + & copilot @args 2>&1 + } -ArgumentList (,$copilotArgs) + + $timeoutSec = $timeoutMin * 60 + $jobResult = $job | Wait-Job -Timeout $timeoutSec + + if ($job.State -eq 'Running') { + # Timeout - kill the job + $job | Stop-Job -PassThru | Remove-Job -Force + $lastError = "Timeout after ${timeoutMin} minutes" + } else { + $output = $job | Receive-Job + $job | Remove-Job -Force + + # Check for valid output + if ($output) { + $outputStr = $output -join "`n" + # Try to extract JSON from output + if ($outputStr -match '\{[\s\S]*"issueNumber"[\s\S]*\}') { + $success = $true + } else { + $lastError = "No valid JSON in output" + } + } else { + $lastError = "Empty output from Copilot" + } + } + } + catch { + $lastError = $_.Exception.Message + } + } + + # Update progress + [System.Threading.Interlocked]::Increment($completed) | Out-Null + $pct = [math]::Round(($completed.Value / $total) * 100) + + if ($success) { + # Save output and parse result + $outputStr = $output -join "`n" + $outputStr | Out-File -FilePath (Join-Path $logDir "analysis.log") -Force + + # Try to extract JSON + try { + if ($outputStr -match '(\{[\s\S]*"issueNumber"[\s\S]*\})') { + $jsonStr = $Matches[1] + $parsed = $jsonStr | ConvertFrom-Json -AsHashtable + $results[$issueNum.ToString()] = @{ + success = $true + data = $parsed + analyzedAt = (Get-Date).ToUniversalTime().ToString("o") + } + Write-Host " [$pct%] ✓ #$issueNum - $($parsed.category)" -ForegroundColor Green + } + } + catch { + $errors.Add(@{ issueNumber = $issueNum; error = "JSON parse error: $_" }) + Write-Host " [$pct%] ⚠ #$issueNum - JSON parse failed" -ForegroundColor Yellow + } + } else { + # Log error + $lastError | Out-File -FilePath (Join-Path $logDir "error.log") -Force + $errors.Add(@{ issueNumber = $issueNum; error = $lastError; retries = $maxRetry }) + Write-Host " [$pct%] ✗ #$issueNum - $lastError" -ForegroundColor Red + } + } +} + +$elapsed = (Get-Date) - $startTime +Write-Host "" +Write-Host " Analysis complete in $([math]::Round($elapsed.TotalMinutes, 1)) minutes" -ForegroundColor Cyan +Write-Host " ✓ Successful: $($analysisResults.Count)" -ForegroundColor Green +Write-Host " ✗ Failed: $($analysisErrors.Count)" -ForegroundColor $(if ($analysisErrors.Count -gt 0) { 'Red' } else { 'Gray' }) +#endregion + +#region Merge Results & Categorize +Write-Host "" +Write-Host "[5/6] Merging results and updating state..." -ForegroundColor Yellow + +# Merge new analysis with cached results +$allResults = @{} + +# Add cached results +foreach ($key in $state.analysisResults.Keys) { + if (-not $analysisResults.ContainsKey($key)) { + $allResults[$key] = $state.analysisResults[$key] + } +} + +# Add new results +foreach ($key in $analysisResults.Keys) { + $allResults[$key] = $analysisResults[$key] +} + +# Categorize for reporting +$categorized = @{ + trending = @() + "needs-label" = @() + "ready-for-fix" = @() + "needs-info" = @() + "needs-clarification" = @() + closeable = @() + "stale-waiting" = @() + "duplicate-candidate" = @() + "review-needed" = @() +} + +foreach ($key in $allResults.Keys) { + $result = $allResults[$key] + if ($result.success -and $result.data) { + $data = $result.data + $category = $data.category + if ($categorized.ContainsKey($category)) { + $categorized[$category] += $data + } else { + $categorized["review-needed"] += $data + } + } +} + +# Sort each category by priority +foreach ($cat in $categorized.Keys) { + $categorized[$cat] = $categorized[$cat] | Sort-Object { -[int]$_.priorityScore } +} + +Write-Host " Categorization complete:" -ForegroundColor Green +foreach ($cat in $categorized.Keys | Sort-Object { $categorized[$_].Count } -Descending) { + if ($categorized[$cat].Count -gt 0) { + Write-Host " - $cat`: $($categorized[$cat].Count)" -ForegroundColor Gray + } +} +#endregion + +#region Generate Reports +Write-Host "" +Write-Host "[6/6] Generating reports..." -ForegroundColor Yellow + +# Archive previous run +$archiveDate = Get-Date -Format "yyyy-MM-dd_HHmm" +$archivePath = Join-Path $historyPath $archiveDate +if (Test-Path "$currentRunPath/summary.md") { + New-Item -ItemType Directory -Path $archivePath -Force | Out-Null + Copy-Item -Path "$currentRunPath/*" -Destination $archivePath -Recurse -Force + Write-Host " ✓ Archived previous run to: $archiveDate" -ForegroundColor Gray +} + +# Clean current run +if (Test-Path $currentRunPath) { + Remove-Item -Path "$currentRunPath/*" -Recurse -Force -ErrorAction SilentlyContinue +} +New-Item -ItemType Directory -Path "$currentRunPath/draft-replies" -Force | Out-Null + +# Category info for display +$categoryInfo = @{ + "trending" = @{ emoji = "🔥"; name = "Trending" } + "needs-label" = @{ emoji = "🏷️"; name = "Needs-Label" } + "ready-for-fix" = @{ emoji = "✅"; name = "Ready-for-Fix" } + "needs-info" = @{ emoji = "❓"; name = "Needs-Info" } + "needs-clarification" = @{ emoji = "💬"; name = "Needs-Clarification" } + "closeable" = @{ emoji = "✔️"; name = "Closeable" } + "stale-waiting" = @{ emoji = "⏳"; name = "Stale-Waiting" } + "duplicate-candidate" = @{ emoji = "🔁"; name = "Duplicate-Candidate" } + "review-needed" = @{ emoji = "👀"; name = "Review-Needed" } +} + +$repoUrl = "https://github.com/microsoft/PowerToys/issues" + +# Generate summary.md +$summary = @" +# Issue Triage Summary - $(Get-Date -Format 'yyyy-MM-dd') + +**Run Type**: $RunType | **Time**: $(Get-Date -Format 'HH:mm UTC') | **Duration**: $([math]::Round($elapsed.TotalMinutes, 1)) min + +## 📊 Delta Since Last Run + +| Metric | Value | +|--------|-------| +| Issues with new activity | $($uniqueIssues.Count) | +| Newly analyzed | $($analysisResults.Count) | +| Using cached analysis | $($allResults.Count - $analysisResults.Count) | +| Analysis failures | $($analysisErrors.Count) | + +## ⚡ Action Required by Category + +| Category | Count | Top Priority | Score | +|----------|-------|--------------|-------| + +"@ + +foreach ($cat in @("trending", "needs-label", "ready-for-fix", "needs-info", "needs-clarification", "closeable", "stale-waiting", "duplicate-candidate", "review-needed")) { + $info = $categoryInfo[$cat] + $issues = $categorized[$cat] + if ($issues.Count -gt 0) { + $top = $issues[0] + $summary += "| $($info.emoji) $($info.name) | $($issues.Count) | [#$($top.issueNumber)]($repoUrl/$($top.issueNumber)) | $($top.priorityScore)/100 |`n" + } +} + +$summary += @" + +## 🎯 Top 10 Priority Actions + +"@ + +# Get top 10 across all categories +$allIssueData = @() +foreach ($cat in $categorized.Keys) { + $allIssueData += $categorized[$cat] +} +$topIssues = $allIssueData | Sort-Object { -[int]$_.priorityScore } | Select-Object -First 10 + +$priority = 1 +foreach ($issue in $topIssues) { + $info = $categoryInfo[$issue.category] + $urgency = if ([int]$issue.priorityScore -ge 80) { "**[Urgent]**" } + elseif ([int]$issue.priorityScore -ge 60) { "**[High]**" } + elseif ([int]$issue.priorityScore -ge 40) { "[Medium]" } + else { "[Low]" } + + $summary += "$priority. $urgency $($info.emoji) [#$($issue.issueNumber)]($repoUrl/$($issue.issueNumber)) - $($issue.categoryReason)`n" + $priority++ +} + +$summary += @" + +## 📁 Detailed Reports + +"@ + +foreach ($cat in @("trending", "needs-label", "ready-for-fix", "needs-info", "needs-clarification", "closeable", "stale-waiting", "duplicate-candidate")) { + $info = $categoryInfo[$cat] + if ($categorized[$cat].Count -gt 0) { + $summary += "- [$($info.emoji) $($info.name)](./$cat.md) ($($categorized[$cat].Count) issues)`n" + } +} + +$summary += @" + +## 📝 Draft Replies Ready + +"@ + +$draftsWritten = 0 +foreach ($cat in @("needs-info", "needs-clarification", "closeable", "stale-waiting")) { + foreach ($issue in $categorized[$cat]) { + if ($issue.draftReply) { + $draftPath = Join-Path "$currentRunPath/draft-replies" "issue-$($issue.issueNumber).md" + $draftContent = @" +--- +issue: $($issue.issueNumber) +category: $($issue.category) +generated: $(Get-Date -Format "o") +--- + +$($issue.draftReply) +"@ + $draftContent | Out-File -FilePath $draftPath -Force + $draftsWritten++ + } + } +} + +$summary += "**$draftsWritten** draft replies ready in ``draft-replies/```n`n" + +if ($analysisErrors.Count -gt 0) { + $summary += @" + +## ⚠️ Analysis Failures + +| Issue | Error | +|-------|-------| + +"@ + foreach ($err in $analysisErrors) { + $summary += "| #$($err.issueNumber) | $($err.error) |`n" + } +} + +$summary += @" + +--- +*Generated by continuous-issue-triage skill* +*Next suggested run: $(Get-Date (Get-Date).AddDays($(if ($RunType -eq 'daily') { 1 } elseif ($RunType -eq 'twice-weekly') { 3 } else { 7 })) -Format 'yyyy-MM-dd')* +"@ + +$summary | Out-File -FilePath "$currentRunPath/summary.md" -Force +Write-Host " ✓ Generated: summary.md" -ForegroundColor Green + +# Generate category reports +foreach ($cat in $categorized.Keys) { + $issues = $categorized[$cat] + if ($issues.Count -eq 0) { continue } + + $info = $categoryInfo[$cat] + $report = @" +# $($info.emoji) $($info.name) Issues + +**Total**: $($issues.Count) issues + +## Overview + +| # | Issue | Priority | Reason | Suggested Action | +|---|-------|----------|--------|------------------| + +"@ + + foreach ($issue in $issues) { + $reason = if ($issue.categoryReason.Length -gt 40) { $issue.categoryReason.Substring(0, 37) + "..." } else { $issue.categoryReason } + $action = if ($issue.suggestedAction.Length -gt 40) { $issue.suggestedAction.Substring(0, 37) + "..." } else { $issue.suggestedAction } + $report += "| [#$($issue.issueNumber)]($repoUrl/$($issue.issueNumber)) | $($issue.priorityScore)/100 | $reason | $action |`n" + } + + $report += "`n## Detailed Breakdown`n`n" + + foreach ($issue in $issues) { + $report += @" +### [#$($issue.issueNumber)]($repoUrl/$($issue.issueNumber)) + +- **Priority Score**: $($issue.priorityScore)/100 +- **Category Reason**: $($issue.categoryReason) +- **Suggested Action**: $($issue.suggestedAction) +- **Clarity Score**: $($issue.clarityScore)/100 +- **Feasibility Score**: $($issue.feasibilityScore)/100 + +"@ + if ($issue.suggestedLabels -and $issue.suggestedLabels.Count -gt 0) { + $report += "- **Suggested Labels**: $($issue.suggestedLabels -join ', ') (confidence: $($issue.labelConfidence)%)`n" + } + if ($issue.missingInfo -and $issue.missingInfo.Count -gt 0) { + $report += "- **Missing Info**: $($issue.missingInfo -join ', ')`n" + } + if ($issue.potentialAssignees -and $issue.potentialAssignees.Count -gt 0) { + $report += "- **Potential Assignees**: $($issue.potentialAssignees -join ', ')`n" + } + if ($issue.similarIssues -and $issue.similarIssues.Count -gt 0) { + $report += "- **Similar Issues**: #$($issue.similarIssues -join ', #')`n" + } + if ($issue.draftReply) { + $report += "- **Draft Reply**: [View](./draft-replies/issue-$($issue.issueNumber).md)`n" + } + $report += "`n---`n`n" + } + + $report | Out-File -FilePath "$currentRunPath/$cat.md" -Force + Write-Host " ✓ Generated: $cat.md ($($issues.Count) issues)" -ForegroundColor Green +} +#endregion + +#region Save State +Write-Host "" +Write-Host "Saving state for next run..." -ForegroundColor Yellow + +# Update issue snapshots +foreach ($issue in $uniqueIssues) { + $issueNum = $issue.number.ToString() + $result = $allResults[$issueNum] + + $state.issueSnapshots[$issueNum] = @{ + number = $issue.number + title = $issue.title + state = $issue.state + lastSeenAt = (Get-Date).ToUniversalTime().ToString("o") + category = if ($result.data) { $result.data.category } else { "unknown" } + priorityScore = if ($result.data) { $result.data.priorityScore } else { 0 } + } +} + +$state.lastRun = (Get-Date).ToUniversalTime().ToString("o") +$state.lastRunType = $RunType +$state.analysisResults = $allResults +$state.statistics.totalRunCount++ +$state.statistics.issuesAnalyzed += $analysisResults.Count + +$state | ConvertTo-Json -Depth 10 | Out-File -FilePath $statePath -Force +Write-Host " ✓ State saved" -ForegroundColor Green +#endregion + +Write-Host "" +Write-Host "═══════════════════════════════════════════════════════════════" -ForegroundColor Cyan +Write-Host " Triage complete!" -ForegroundColor Cyan +Write-Host " Reports: $currentRunPath" -ForegroundColor Cyan +Write-Host " Start with: summary.md" -ForegroundColor Cyan +Write-Host "═══════════════════════════════════════════════════════════════" -ForegroundColor Cyan diff --git a/.github/skills/issue-fix/LICENSE.txt b/.github/skills/issue-fix/LICENSE.txt new file mode 100644 index 000000000000..22aed37e650b --- /dev/null +++ b/.github/skills/issue-fix/LICENSE.txt @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) Microsoft Corporation. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/.github/skills/issue-fix/SKILL.md b/.github/skills/issue-fix/SKILL.md new file mode 100644 index 000000000000..559c39589f22 --- /dev/null +++ b/.github/skills/issue-fix/SKILL.md @@ -0,0 +1,220 @@ +--- +name: issue-fix +description: Automatically fix GitHub issues and create PRs. Use when asked to fix an issue, implement a feature from an issue, auto-fix an issue, apply implementation plan, create code changes for an issue, resolve a GitHub issue, or submit a PR for an issue. Creates isolated git worktree, applies AI-generated fixes, commits changes, and creates pull requests. +license: Complete terms in LICENSE.txt +--- + +# Issue Fix Skill + +Automatically fix GitHub issues by creating isolated worktrees, applying AI-generated code changes, and creating pull requests - the complete issue-to-PR workflow. + +## Skill Contents + +This skill is **self-contained** with all required resources: + +``` +.github/skills/issue-fix/ +├── SKILL.md # This file +├── LICENSE.txt # MIT License +├── scripts/ +│ ├── Start-IssueAutoFix.ps1 # Main fix script (creates worktree, applies fix) +│ ├── Start-IssueFixParallel.ps1 # Parallel runner (single terminal) +│ ├── Get-WorktreeStatus.ps1 # Worktree status helper +│ ├── Submit-IssueFix.ps1 # Commit and create PR +│ └── IssueReviewLib.ps1 # Shared helpers +└── references/ + ├── fix-issue.prompt.md # AI prompt for fixing + ├── create-commit-title.prompt.md # AI prompt for commit messages + ├── create-pr-summary.prompt.md # AI prompt for PR descriptions + └── mcp-config.json # MCP configuration +``` + +## Output + +- **Worktrees**: Created at drive root level `Q:/PowerToys-xxxx/` +- **PRs**: Created on GitHub linking to the original issue +- **Signal file**: `Generated Files/issueFix//.signal` + +## Signal File + +On completion, a `.signal` file is created for orchestrator coordination: + +```json +{ + "status": "success", + "issueNumber": 45363, + "timestamp": "2026-02-04T10:05:23Z", + "worktreePath": "Q:/PowerToys-ab12" +} +``` + +Status values: `success`, `failure` + +## When to Use This Skill + +- Fix a specific GitHub issue automatically +- Implement a feature described in an issue +- Apply an existing implementation plan +- Create code changes and submit PR for an issue +- Auto-fix high-confidence issues end-to-end + +## Prerequisites + +- GitHub CLI (`gh`) installed and authenticated +- Issue must be reviewed first (use `issue-review` skill) +- PowerShell 7+ for running scripts +- Copilot CLI or Claude CLI installed + +## Required Variables + +| Variable | Description | Example | +|----------|-------------|---------| +| `{{IssueNumber}}` | GitHub issue number to fix | `44044` | + +## Workflow + +### Step 1: Ensure Issue is Reviewed + +If not already reviewed, use the `issue-review` skill first. + +### Step 2: Run Auto-Fix + +```powershell +# Create worktree and apply fix +.github/skills/issue-fix/scripts/Start-IssueAutoFix.ps1 -IssueNumber {{IssueNumber}} -CLIType copilot -Force +``` + +This will: +1. Create a new git worktree with branch `issue/{{IssueNumber}}` +2. Copy the review files to the worktree +3. Launch Copilot CLI to implement the fix +4. Build and verify the changes + +### Step 3: Submit PR + +```powershell +# Commit changes and create PR +.github/skills/issue-fix/scripts/Submit-IssueFix.ps1 -IssueNumber {{IssueNumber}} -CLIType copilot -Force +``` + +This will: +1. Generate AI commit message +2. Commit all changes +3. Push to origin +4. Create PR with AI-generated description +5. Link PR to issue with "Fixes #{{IssueNumber}}" + +### One-Step Alternative + +To fix AND submit in one command: + +```powershell +.github/skills/issue-fix/scripts/Start-IssueAutoFix.ps1 -IssueNumber {{IssueNumber}} -CLIType copilot -CreatePR -Force +``` + +## CLI Options + +### Start-IssueAutoFix.ps1 + +| Parameter | Description | Default | +|-----------|-------------|---------| +| `-IssueNumber` | Issue to fix | Required | +| `-CLIType` | AI CLI: `copilot` or `claude` | `copilot` | +| `-Model` | Copilot model (e.g., `gpt-5.2-codex`) | (optional) | +| `-CreatePR` | Auto-create PR after fix | `false` | +| `-SkipWorktree` | Fix in current repo (no worktree) | `false` | +| `-Force` | Skip confirmation prompts | `false` | + +### Submit-IssueFix.ps1 + +| Parameter | Description | Default | +|-----------|-------------|---------| +| `-IssueNumber` | Issue to submit | Required | +| `-CLIType` | AI CLI: `copilot`, `claude`, `manual` | `copilot` | +| `-Draft` | Create as draft PR | `false` | +| `-SkipCommit` | Skip commit (changes already committed) | `false` | +| `-Force` | Skip confirmation prompts | `false` | + +## Batch Processing + +Fix multiple issues: + +```powershell +# Fix multiple issues (creates worktrees, applies fixes) +.github/skills/issue-fix/scripts/Start-IssueAutoFix.ps1 -IssueNumbers 44044, 32950 -CLIType copilot -Force + +# Submit all fixed issues as PRs +.github/skills/issue-fix/scripts/Submit-IssueFix.ps1 -CLIType copilot -Force +``` + +## Parallel Execution (IMPORTANT) + +**DO NOT** spawn separate terminals for each issue. Use the dedicated scripts: + +```powershell +# Run fixes in parallel (single terminal) +.github/skills/issue-fix/scripts/Start-IssueFixParallel.ps1 -IssueNumbers 28726,13336,27507,3054,37800 -CLIType copilot -ThrottleLimit 5 -Force + +# Check worktree status +.github/skills/issue-fix/scripts/Get-WorktreeStatus.ps1 +``` + +This allows: +- Tracking all jobs in one place +- Waiting for completion with proper synchronization +- Controlling parallelism with `-ThrottleLimit` +- Combined output visibility + +## Troubleshooting + +| Problem | Solution | +|---------|----------| +| Worktree already exists | Use existing worktree or `git worktree remove ` | +| No implementation plan | Use `issue-review` skill first | +| Build failures | Check build logs, may need manual intervention | +| PR already exists | Script will skip, check existing PR | +| CLI not found | Install Copilot CLI | + +## PR Creation Requirements (CRITICAL) + +**NEVER create PRs with placeholder/stub code.** Every PR must have: + +1. **Real implementation** - Actual working code that addresses the issue +2. **Proper title** - Follow `create-commit-title.prompt.md` (Conventional Commits) +3. **Full description** - Follow `create-pr-summary.prompt.md` based on actual diff + +### PR Title Format (Conventional Commits) +``` +feat(module): add feature description +fix(module): fix bug description +docs(module): update documentation +``` + +### PR Description Must Include +- Summary of changes (from actual diff) +- `Fixes #IssueNumber` link +- Checklist items marked appropriately +- Validation steps performed + +**Example of BAD PR (never do this):** +``` +Title: fix: address issue #12345 +Body: Fixes #12345 +Code: class Fix12345 { public void Apply() { } } // EMPTY STUB! +``` + +**Example of GOOD PR:** +``` +Title: feat(peek): add symbolic link resolution for PDF/HTML files +Body: ## Summary +Adds SymlinkResolver helper to resolve symlinks... +[Full description based on create-pr-summary.prompt.md] +``` + +## Related Skills + +| Skill | Purpose | +|-------|---------| +| `issue-review` | Review issues, generate implementation plans | +| `pr-review` | Review the created PR | +| `pr-fix` | Fix PR review comments | diff --git a/.github/skills/issue-fix/references/create-commit-title.prompt.md b/.github/skills/issue-fix/references/create-commit-title.prompt.md new file mode 100644 index 000000000000..f61285c30471 --- /dev/null +++ b/.github/skills/issue-fix/references/create-commit-title.prompt.md @@ -0,0 +1,49 @@ +--- +agent: 'agent' +description: 'Generate an 80-character git commit title for the local diff' +--- + +# Generate Commit Title + +## Purpose +Provide a single-line, ready-to-paste git commit title (<= 80 characters) that reflects the most important local changes since `HEAD`. + +## Input to collect +- Run exactly one command to view the local diff: + ```@terminal + git diff HEAD + ``` + +## How to decide the title +1. From the diff, find the dominant area (e.g., `src/modules/*`, `doc/devdocs/**`) and the change type (bug fix, docs update, config tweak). +2. Draft an imperative, plain-ASCII title that: + - Mentions the primary component when obvious (e.g., `FancyZones:` or `Docs:`) + - Stays within 80 characters and has no trailing punctuation + +## Final output +- Reply with only the commit title on a single line—no extra text. + +## PR title convention (when asked) +Use Conventional Commits style: + +`(): ` + +**Allowed types** +- feat, fix, docs, refactor, perf, test, build, ci, chore + +**Scope rules** +- Use a short, PowerToys-focused scope (one word preferred). Common scopes: + - Core: `runner`, `settings-ui`, `common`, `docs`, `build`, `ci`, `installer`, `gpo`, `dsc` + - Modules: `fancyzones`, `powerrename`, `awake`, `colorpicker`, `imageresizer`, `keyboardmanager`, `mouseutils`, `peek`, `hosts`, `file-locksmith`, `screen-ruler`, `text-extractor`, `cropandlock`, `paste`, `powerlauncher` +- If unclear, pick the closest module or subsystem; omit only if unavoidable + +**Summary rules** +- Imperative, present tense (“add”, “update”, “remove”, “fix”) +- Keep it <= 72 characters when possible; be specific, avoid “misc changes” + +**Examples** +- `feat(fancyzones): add canvas template duplication` +- `fix(mouseutils): guard crosshair toggle when dpi info missing` +- `docs(runner): document tray icon states` +- `build(installer): align wix v5 suffix flag` +- `ci(ci): cache pipeline artifacts for x64` diff --git a/.github/skills/issue-fix/references/create-pr-summary.prompt.md b/.github/skills/issue-fix/references/create-pr-summary.prompt.md new file mode 100644 index 000000000000..9e47c2fc3c39 --- /dev/null +++ b/.github/skills/issue-fix/references/create-pr-summary.prompt.md @@ -0,0 +1,24 @@ +--- +agent: 'agent' +description: 'Generate a PowerToys-ready pull request description from the local diff' +--- + +# Generate PR Summary + +**Goal:** Produce a ready-to-paste PR title and description that follows PowerToys conventions by comparing the current branch against a user-selected target branch. + +**Repo guardrails:** +- Treat `.github/pull_request_template.md` as the single source of truth; load it at runtime instead of embedding hardcoded content in this prompt. +- Preserve section order from the template but only surface checklist lines that are relevant for the detected changes, filling them with `[x]`/`[ ]` as appropriate. +- Cite touched paths with inline backticks, matching the guidance in `.github/copilot-instructions.md`. +- Call out test coverage explicitly: list automated tests run (unit/UI) or state why they are not applicable. + +**Workflow:** +1. Determine the target branch from user context; default to `main` when no branch is supplied. +2. Run `git status --short` once to surface uncommitted files that may influence the summary. +3. Run `git diff ...HEAD` a single time to review the detailed changes. Only when confidence stays low dig deeper with focused calls such as `git diff ...HEAD -- `. +4. From the diff, capture impacted areas, key file changes, behavioral risks, migrations, and noteworthy edge cases. +5. Confirm validation: list tests executed with results or state why tests were skipped in line with repo guidance. +6. Load `.github/pull_request_template.md`, mirror its section order, and populate it with the gathered facts. Include only relevant checklist entries, marking them `[x]/[ ]` and noting any intentional omissions as "N/A". +7. Present the filled template inside a fenced ```markdown code block with no extra commentary so it is ready to paste into a PR, clearly flagging any placeholders that still need user input. +8. Prepend the PR title above the filled template, applying the Conventional Commit type/scope rules from `.github/prompts/create-commit-title.prompt.md`; pick the dominant component from the diff and keep the title concise and imperative. diff --git a/.github/skills/issue-fix/references/fix-issue.prompt.md b/.github/skills/issue-fix/references/fix-issue.prompt.md new file mode 100644 index 000000000000..9b758c4e8dc8 --- /dev/null +++ b/.github/skills/issue-fix/references/fix-issue.prompt.md @@ -0,0 +1,72 @@ +--- +agent: 'agent' +description: 'Execute the fix for a GitHub issue using the previously generated implementation plan' +--- + +# Fix GitHub Issue + +## Dependencies +Source review prompt (for generating the implementation plan if missing): +- .github/prompts/review-issue.prompt.md + +Required plan file (single source of truth): +- Generated Files/issueReview/{{issue_number}}/implementation-plan.md + +## Dependency Handling +1) If `implementation-plan.md` exists → proceed. +2) If missing → run the review prompt: + - Invoke: `.github/prompts/review-issue.prompt.md` + - Pass: `issue_number={{issue_number}}` + - Then re-check for `implementation-plan.md`. +3) If still missing → stop and generate: + - `Generated Files/issueFix/{{issue_number}}/manual-steps.md` containing: + “implementation-plan.md not found; please run .github/prompts/review-issue.prompt.md for #{{issue_number}}.” + +# GOAL +For **#{{issue_number}}**: +- Use implementation-plan.md as the single authority. +- Apply code and test changes directly in the repository. +- Produce a PR-ready description. + +# OUTPUT FILES +1) Generated Files/issueFix/{{issue_number}}/pr-description.md +2) Generated Files/issueFix/{{issue_number}}/manual-steps.md # only if human interaction or external setup is required + +# EXECUTION RULES +1) Read implementation-plan.md and execute: + - Layers & Files → edit/create as listed + - Pattern Choices → follow repository conventions + - Fundamentals (perf, security, compatibility, accessibility) + - Logging & Exceptions + - Telemetry (only if explicitly included in the plan) + - Risks & Mitigations + - Tests to Add +2) Locate affected files via `rg` or `git grep`. +3) Add/update tests to enforce the fixed behavior. +4) If any ambiguity exists, add: +// TODO(Human input needed): +5) Verify locally: build & tests run successfully. + +# pr-description.md should include: +- Title: `Fix: (#{{issue_number}})` +- What changed and why the fix works +- Files or modules touched +- Risks & mitigations (implemented) +- Tests added/updated and how to run them +- Telemetry behavior (if applicable) +- Validation / reproduction steps +- `Closes #{{issue_number}}` + +# manual-steps.md (only if needed) +- List required human actions: secrets, config, approvals, missing info, or code comments requiring human decisions. + +# IMPORTANT +- Apply code and tests directly; do not produce patch files. +- Follow implementation-plan.md as the source of truth. +- Insert comments for human review where a decision or input is required. +- Use repository conventions and deterministic, minimal changes. + +# FINALIZE +- Write pr-description.md +- Write manual-steps.md only if needed +- Print concise success message or note items requiring human interaction diff --git a/.github/skills/issue-fix/references/mcp-config.json b/.github/skills/issue-fix/references/mcp-config.json new file mode 100644 index 000000000000..5af15d54218c --- /dev/null +++ b/.github/skills/issue-fix/references/mcp-config.json @@ -0,0 +1,9 @@ +{ + "mcpServers": { + "github-artifacts": { + "command": "cmd", + "args": ["/c", "for /f %i in ('git rev-parse --show-toplevel') do node %i/tools/mcp/github-artifacts/launch.js"], + "tools": ["*"] + } + } +} diff --git a/.github/skills/issue-fix/scripts/Get-WorktreeStatus.ps1 b/.github/skills/issue-fix/scripts/Get-WorktreeStatus.ps1 new file mode 100644 index 000000000000..6f5b68c8c77b --- /dev/null +++ b/.github/skills/issue-fix/scripts/Get-WorktreeStatus.ps1 @@ -0,0 +1,22 @@ +<# +.SYNOPSIS + Show commit/uncommitted status for issue/* worktrees. +#> +[CmdletBinding()] +param() + +$repoRoot = Resolve-Path (Join-Path $PSScriptRoot '..\..\..\..') +Set-Location $repoRoot + +git worktree list | Select-String "issue/" | ForEach-Object { + $path = ($_ -split "\s+")[0] + $branch = ($_ -split "\s+")[2] -replace "\[|\]","" + $ahead = (git -C $path rev-list main..HEAD --count 2>$null) + $uncommitted = (git -C $path status --porcelain 2>$null | Measure-Object).Count + [pscustomobject]@{ + Branch = $branch + CommitsAhead = $ahead + Uncommitted = $uncommitted + Path = $path + } +} \ No newline at end of file diff --git a/.github/skills/issue-fix/scripts/IssueReviewLib.ps1 b/.github/skills/issue-fix/scripts/IssueReviewLib.ps1 new file mode 100644 index 000000000000..d4195c70200c --- /dev/null +++ b/.github/skills/issue-fix/scripts/IssueReviewLib.ps1 @@ -0,0 +1,644 @@ +# IssueReviewLib.ps1 - Helpers for issue auto-fix workflow +# Part of the PowerToys GitHub Copilot/Claude Code issue review system +# This is a trimmed version with only what issue-fix needs + +#region Console Output Helpers +function Info { param([string]$Message) Write-Host $Message -ForegroundColor Cyan } +function Warn { param([string]$Message) Write-Host $Message -ForegroundColor Yellow } +function Err { param([string]$Message) Write-Host $Message -ForegroundColor Red } +function Success { param([string]$Message) Write-Host $Message -ForegroundColor Green } +#endregion + +#region Repository Helpers +function Get-RepoRoot { + $root = git rev-parse --show-toplevel 2>$null + if (-not $root) { throw 'Not inside a git repository.' } + return (Resolve-Path $root).Path +} + +function Get-GeneratedFilesPath { + param([string]$RepoRoot) + return Join-Path $RepoRoot 'Generated Files' +} + +function Get-IssueReviewPath { + param( + [string]$RepoRoot, + [int]$IssueNumber + ) + $genFiles = Get-GeneratedFilesPath -RepoRoot $RepoRoot + return Join-Path $genFiles "issueReview/$IssueNumber" +} + +function Ensure-DirectoryExists { + param([string]$Path) + if (-not (Test-Path $Path)) { + New-Item -ItemType Directory -Path $Path -Force | Out-Null + } +} +#endregion + +#region CLI Detection +function Get-AvailableCLI { + <# + .SYNOPSIS + Detect which AI CLI is available: GitHub Copilot CLI or Claude Code. + #> + + # Check for standalone GitHub Copilot CLI + $copilotCLI = Get-Command 'copilot' -ErrorAction SilentlyContinue + if ($copilotCLI) { + return @{ Name = 'GitHub Copilot CLI'; Command = 'copilot'; Type = 'copilot' } + } + + # Check for Claude Code CLI + $claudeCode = Get-Command 'claude' -ErrorAction SilentlyContinue + if ($claudeCode) { + return @{ Name = 'Claude Code CLI'; Command = 'claude'; Type = 'claude' } + } + + # Check for GitHub Copilot CLI via gh extension + $ghCopilot = Get-Command 'gh' -ErrorAction SilentlyContinue + if ($ghCopilot) { + $copilotCheck = gh extension list 2>&1 | Select-String -Pattern 'copilot' + if ($copilotCheck) { + return @{ Name = 'GitHub Copilot CLI (gh extension)'; Command = 'gh'; Type = 'gh-copilot' } + } + } + + # Check for VS Code CLI + $code = Get-Command 'code' -ErrorAction SilentlyContinue + if ($code) { + return @{ Name = 'VS Code (Copilot Chat)'; Command = 'code'; Type = 'vscode' } + } + + return $null +} +#endregion + +#region Issue Review Results Helpers +function Get-IssueReviewResult { + <# + .SYNOPSIS + Check if an issue has been reviewed and get its results. + #> + param( + [Parameter(Mandatory)] + [int]$IssueNumber, + [Parameter(Mandatory)] + [string]$RepoRoot + ) + + $reviewPath = Get-IssueReviewPath -RepoRoot $RepoRoot -IssueNumber $IssueNumber + + $result = @{ + IssueNumber = $IssueNumber + Path = $reviewPath + HasOverview = $false + HasImplementationPlan = $false + OverviewPath = $null + ImplementationPlanPath = $null + } + + $overviewPath = Join-Path $reviewPath 'overview.md' + $implPlanPath = Join-Path $reviewPath 'implementation-plan.md' + + if (Test-Path $overviewPath) { + $result.HasOverview = $true + $result.OverviewPath = $overviewPath + } + + if (Test-Path $implPlanPath) { + $result.HasImplementationPlan = $true + $result.ImplementationPlanPath = $implPlanPath + } + + return $result +} + +function Get-HighConfidenceIssues { + <# + .SYNOPSIS + Find issues with high confidence for auto-fix based on review results. + #> + param( + [Parameter(Mandatory)] + [string]$RepoRoot, + [int]$MinFeasibilityScore = 70, + [int]$MinClarityScore = 60, + [int]$MaxEffortDays = 2, + [int[]]$FilterIssueNumbers = @() + ) + + $genFiles = Get-GeneratedFilesPath -RepoRoot $RepoRoot + $reviewDir = Join-Path $genFiles 'issueReview' + + if (-not (Test-Path $reviewDir)) { + return @() + } + + $highConfidence = @() + + Get-ChildItem -Path $reviewDir -Directory | ForEach-Object { + $issueNum = [int]$_.Name + + if ($FilterIssueNumbers.Count -gt 0 -and $issueNum -notin $FilterIssueNumbers) { + return + } + + $overviewPath = Join-Path $_.FullName 'overview.md' + $implPlanPath = Join-Path $_.FullName 'implementation-plan.md' + + if (-not (Test-Path $overviewPath) -or -not (Test-Path $implPlanPath)) { + return + } + + $overview = Get-Content $overviewPath -Raw + + $feasibility = 0 + $clarity = 0 + $effortDays = 999 + + if ($overview -match 'Technical Feasibility[^\d]*(\d+)/100') { + $feasibility = [int]$Matches[1] + } + if ($overview -match 'Requirement Clarity[^\d]*(\d+)/100') { + $clarity = [int]$Matches[1] + } + if ($overview -match 'Effort Estimate[^|]*\|\s*[\d.]+(?:-(\d+))?\s*days?') { + if ($Matches[1]) { + $effortDays = [int]$Matches[1] + } elseif ($overview -match 'Effort Estimate[^|]*\|\s*(\d+)\s*days?') { + $effortDays = [int]$Matches[1] + } + } + if ($overview -match 'Effort Estimate[^|]*\|[^|]*\|\s*(XS|S)\b') { + if ($Matches[1] -eq 'XS') { $effortDays = 1 } else { $effortDays = 2 } + } elseif ($overview -match 'Effort Estimate[^|]*\|[^|]*\(XS\)') { + $effortDays = 1 + } elseif ($overview -match 'Effort Estimate[^|]*\|[^|]*\(S\)') { + $effortDays = 2 + } + + if ($feasibility -ge $MinFeasibilityScore -and + $clarity -ge $MinClarityScore -and + $effortDays -le $MaxEffortDays) { + + $highConfidence += @{ + IssueNumber = $issueNum + FeasibilityScore = $feasibility + ClarityScore = $clarity + EffortDays = $effortDays + OverviewPath = $overviewPath + ImplementationPlanPath = $implPlanPath + } + } + } + + return $highConfidence | Sort-Object -Property FeasibilityScore -Descending +} +#endregion + +#region Release & PR Status Helpers +function Get-PRReleaseStatus { + <# + .SYNOPSIS + Check if a PR has been merged and released. + .DESCRIPTION + Queries GitHub to determine: + 1. If the PR is merged + 2. What release (if any) contains the merge commit + .OUTPUTS + @{ + PRNumber = + IsMerged = $true | $false + MergeCommit = + ReleasedIn = # e.g., "v0.90.0" + IsReleased = $true | $false + } + #> + param( + [Parameter(Mandatory)] + [int]$PRNumber, + [string]$Repo = 'microsoft/PowerToys' + ) + + $result = @{ + PRNumber = $PRNumber + IsMerged = $false + MergeCommit = $null + ReleasedIn = $null + IsReleased = $false + } + + try { + # Get PR details from GitHub + $prJson = gh pr view $PRNumber --repo $Repo --json state,mergeCommit,mergedAt 2>$null + if (-not $prJson) { + return $result + } + + $pr = $prJson | ConvertFrom-Json + + if ($pr.state -eq 'MERGED' -and $pr.mergeCommit) { + $result.IsMerged = $true + $result.MergeCommit = $pr.mergeCommit.oid + + # Check which release tags contain this commit + # Use git tag --contains to find tags that include the merge commit + $tags = git tag --contains $result.MergeCommit 2>$null + + if ($tags) { + # Filter to release tags (v0.XX.X pattern) and get the earliest one + $releaseTags = $tags | Where-Object { $_ -match '^v\d+\.\d+\.\d+$' } | Sort-Object + if ($releaseTags) { + $result.ReleasedIn = $releaseTags | Select-Object -First 1 + $result.IsReleased = $true + } + } + } + } + catch { + # Silently fail - will return default "not merged" status + } + + return $result +} + +function Get-LatestRelease { + <# + .SYNOPSIS + Get the latest release version of PowerToys. + #> + param( + [string]$Repo = 'microsoft/PowerToys' + ) + + try { + $releaseJson = gh release view --repo $Repo --json tagName 2>$null + if ($releaseJson) { + $release = $releaseJson | ConvertFrom-Json + return $release.tagName + } + } + catch { + # Fallback: try to get from git tags + $latestTag = git describe --tags --abbrev=0 2>$null + if ($latestTag) { + return $latestTag + } + } + return $null +} +#endregion + +#region Implementation Plan Analysis +function Get-ImplementationPlanStatus { + <# + .SYNOPSIS + Parse implementation-plan.md to determine the recommended action. + .DESCRIPTION + Reads the implementation plan and extracts the status/recommendation. + For "already resolved" issues, also checks if the fix has been released. + Returns an object indicating what action should be taken. + .OUTPUTS + @{ + Status = 'AlreadyResolved' | 'FixedButUnreleased' | 'NeedsClarification' | 'Duplicate' | 'WontFix' | 'ReadyToImplement' | 'Unknown' + Action = 'CloseIssue' | 'AddComment' | 'LinkDuplicate' | 'ImplementFix' | 'Skip' + Reason = + RelatedPR = + ReleasedIn = + DuplicateOf = + CommentText = + } + #> + param( + [Parameter(Mandatory)] + [string]$ImplementationPlanPath, + [switch]$SkipReleaseCheck + ) + + $result = @{ + Status = 'Unknown' + Action = 'Skip' + Reason = 'Could not determine status from implementation plan' + RelatedPR = $null + ReleasedIn = $null + DuplicateOf = $null + CommentText = $null + } + + if (-not (Test-Path $ImplementationPlanPath)) { + $result.Reason = 'Implementation plan file not found' + return $result + } + + $content = Get-Content $ImplementationPlanPath -Raw + + # Check for ALREADY RESOLVED status + if ($content -match '(?i)STATUS:\s*ALREADY\s+RESOLVED' -or + $content -match '(?i)⚠️\s*STATUS:\s*ALREADY\s+RESOLVED' -or + $content -match '(?i)This issue has been fixed by' -or + $content -match '(?i)No implementation work is needed') { + + # Try to extract the PR number + $prNumber = $null + if ($content -match '\[PR #(\d+)\]' -or $content -match 'PR #(\d+)' -or $content -match '/pull/(\d+)') { + $prNumber = [int]$Matches[1] + $result.RelatedPR = $prNumber + } + + # Check if the fix has been released + if ($prNumber -and -not $SkipReleaseCheck) { + $prStatus = Get-PRReleaseStatus -PRNumber $prNumber + + if ($prStatus.IsReleased) { + # Fix is released - safe to close + $result.Status = 'AlreadyResolved' + $result.Action = 'CloseIssue' + $result.ReleasedIn = $prStatus.ReleasedIn + $result.Reason = "Issue fixed by PR #$prNumber, released in $($prStatus.ReleasedIn)" + $result.CommentText = @" +This issue has been fixed by PR #$prNumber and is available in **$($prStatus.ReleasedIn)**. + +Please update to the latest version. If you're still experiencing this issue after updating, please reopen with additional details. +"@ + } + elseif ($prStatus.IsMerged) { + # PR merged but not yet released - add comment but don't close + $result.Status = 'FixedButUnreleased' + $result.Action = 'AddComment' + $result.Reason = "Issue fixed by PR #$prNumber, but not yet released" + $result.CommentText = @" +This issue has been fixed by PR #$prNumber, which has been merged but **not yet released**. + +The fix will be available in the next PowerToys release. You can: +- Wait for the next official release +- Build from source to get the fix immediately + +We'll close this issue once the fix is released. +"@ + } + else { + # PR exists but not merged - treat as ready to implement (PR might have been reverted) + $result.Status = 'ReadyToImplement' + $result.Action = 'ImplementFix' + $result.Reason = "PR #$prNumber exists but is not merged - may need reimplementation" + } + } + elseif ($prNumber) { + # Skip release check requested or no PR number - assume it's resolved + $result.Status = 'AlreadyResolved' + $result.Action = 'CloseIssue' + $result.Reason = 'Issue has already been fixed' + $result.CommentText = "This issue has been fixed by PR #$prNumber. Closing as resolved." + } + else { + # No PR number found - just mark as resolved with generic message + $result.Status = 'AlreadyResolved' + $result.Action = 'CloseIssue' + $result.Reason = 'Issue appears to have been resolved' + $result.CommentText = "Based on analysis, this issue appears to have already been resolved. Please verify and reopen if the issue persists." + } + + return $result + } + + # Check for DUPLICATE status + if ($content -match '(?i)STATUS:\s*DUPLICATE' -or + $content -match '(?i)This is a duplicate of' -or + $content -match '(?i)duplicate of #(\d+)') { + + $result.Status = 'Duplicate' + $result.Action = 'LinkDuplicate' + $result.Reason = 'Issue is a duplicate' + + # Try to extract the duplicate issue number + if ($content -match 'duplicate of #(\d+)' -or $content -match '#(\d+)') { + $result.DuplicateOf = [int]$Matches[1] + $result.CommentText = "This appears to be a duplicate of #$($result.DuplicateOf)." + } + + return $result + } + + # Check for NEEDS CLARIFICATION status + if ($content -match '(?i)STATUS:\s*NEEDS?\s+CLARIFICATION' -or + $content -match '(?i)STATUS:\s*NEEDS?\s+MORE\s+INFO' -or + $content -match '(?i)cannot proceed without' -or + $content -match '(?i)need(?:s)? more information') { + + $result.Status = 'NeedsClarification' + $result.Action = 'AddComment' + $result.Reason = 'Issue needs more information from reporter' + + # Try to extract what information is needed + if ($content -match '(?i)(?:need(?:s)?|require(?:s)?|missing)[:\s]+([^\n]+)') { + $result.CommentText = "Additional information is needed to proceed with this issue: $($Matches[1].Trim())" + } else { + $result.CommentText = "Could you please provide more details about this issue? Specifically, steps to reproduce and expected vs actual behavior would help." + } + + return $result + } + + # Check for WONT FIX / NOT FEASIBLE status + if ($content -match '(?i)STATUS:\s*(?:WONT?\s+FIX|NOT\s+FEASIBLE|REJECTED)' -or + $content -match '(?i)(?:not|cannot be) (?:feasible|implemented)' -or + $content -match '(?i)recommend(?:ed)?\s+(?:to\s+)?close') { + + $result.Status = 'WontFix' + $result.Action = 'AddComment' + $result.Reason = 'Issue is not feasible or recommended to close' + + # Try to extract the reason + if ($content -match '(?i)(?:because|reason|due to)[:\s]+([^\n]+)') { + $result.CommentText = "After analysis, this issue cannot be implemented: $($Matches[1].Trim())" + } + + return $result + } + + # Check for external dependency / blocked status + if ($content -match '(?i)STATUS:\s*BLOCKED' -or + $content -match '(?i)blocked by' -or + $content -match '(?i)depends on external' -or + $content -match '(?i)waiting for upstream') { + + $result.Status = 'Blocked' + $result.Action = 'AddComment' + $result.Reason = 'Issue is blocked by external dependency' + + return $result + } + + # Check for READY TO IMPLEMENT (positive signals) + if ($content -match '(?i)## \d+\)\s*Task Breakdown' -or + $content -match '(?i)implementation steps' -or + $content -match '(?i)## Layers & Files' -or + ($content -match '(?i)Feasibility' -and $content -notmatch '(?i)not\s+feasible')) { + + $result.Status = 'ReadyToImplement' + $result.Action = 'ImplementFix' + $result.Reason = 'Implementation plan is ready' + + return $result + } + + # Default: if we have a detailed plan, assume it's ready + if ($content.Length -gt 500 -and $content -match '(?i)##') { + $result.Status = 'ReadyToImplement' + $result.Action = 'ImplementFix' + $result.Reason = 'Implementation plan appears complete' + } + + return $result +} + +function Invoke-ImplementationPlanAction { + <# + .SYNOPSIS + Execute the recommended action from the implementation plan analysis. + .DESCRIPTION + Based on the status from Get-ImplementationPlanStatus, takes appropriate action: + - CloseIssue: Closes the issue with a comment + - AddComment: Adds a comment to the issue + - LinkDuplicate: Marks as duplicate + - ImplementFix: Returns $true to indicate code fix should proceed + - Skip: Returns $false + .OUTPUTS + @{ + ActionTaken = + ShouldProceedWithFix = $true | $false + Success = $true | $false + } + #> + param( + [Parameter(Mandatory)] + [int]$IssueNumber, + [Parameter(Mandatory)] + [hashtable]$PlanStatus, + [switch]$DryRun + ) + + $result = @{ + ActionTaken = 'None' + ShouldProceedWithFix = $false + Success = $true + } + + switch ($PlanStatus.Action) { + 'ImplementFix' { + $result.ActionTaken = 'Proceeding with code fix' + $result.ShouldProceedWithFix = $true + Info "[Issue #$IssueNumber] Status: $($PlanStatus.Status) - $($PlanStatus.Reason)" + } + + 'CloseIssue' { + $result.ActionTaken = "Closing issue: $($PlanStatus.Reason)" + Info "[Issue #$IssueNumber] $($PlanStatus.Status): $($PlanStatus.Reason)" + + if (-not $DryRun) { + $comment = $PlanStatus.CommentText + if (-not $comment) { + $comment = "Closing based on automated analysis: $($PlanStatus.Reason)" + } + + try { + # Check if issue is already closed + $issueState = gh issue view $IssueNumber --json state 2>$null | ConvertFrom-Json + if ($issueState.state -eq 'CLOSED') { + Info "[Issue #$IssueNumber] Already closed, skipping" + $result.ActionTaken = "Already closed" + return $result + } + + # Close the issue with comment (single operation to avoid duplicates) + gh issue close $IssueNumber --reason "completed" --comment $comment 2>&1 | Out-Null + + Success "[Issue #$IssueNumber] ✓ Closed with comment" + } + catch { + Err "[Issue #$IssueNumber] Failed to close: $($_.Exception.Message)" + $result.Success = $false + } + } else { + Info "[Issue #$IssueNumber] (DryRun) Would close with: $($PlanStatus.CommentText)" + } + } + + 'AddComment' { + $result.ActionTaken = "Adding comment: $($PlanStatus.Reason)" + Info "[Issue #$IssueNumber] $($PlanStatus.Status): $($PlanStatus.Reason)" + + if (-not $DryRun -and $PlanStatus.CommentText) { + try { + gh issue comment $IssueNumber --body $PlanStatus.CommentText 2>&1 | Out-Null + Success "[Issue #$IssueNumber] ✓ Comment added" + } + catch { + Err "[Issue #$IssueNumber] Failed to add comment: $($_.Exception.Message)" + $result.Success = $false + } + } else { + Info "[Issue #$IssueNumber] (DryRun) Would comment: $($PlanStatus.CommentText)" + } + } + + 'LinkDuplicate' { + $result.ActionTaken = "Marking as duplicate of #$($PlanStatus.DuplicateOf)" + Info "[Issue #$IssueNumber] Duplicate of #$($PlanStatus.DuplicateOf)" + + if (-not $DryRun -and $PlanStatus.DuplicateOf) { + try { + gh issue close $IssueNumber --reason "not_planned" --comment "Closing as duplicate of #$($PlanStatus.DuplicateOf)" 2>&1 | Out-Null + Success "[Issue #$IssueNumber] ✓ Closed as duplicate" + } + catch { + Err "[Issue #$IssueNumber] Failed to close as duplicate: $($_.Exception.Message)" + $result.Success = $false + } + } + } + + 'Skip' { + $result.ActionTaken = "Skipped: $($PlanStatus.Reason)" + Warn "[Issue #$IssueNumber] Skipping: $($PlanStatus.Reason)" + } + } + + return $result +} +#endregion + +#region Worktree Integration +function Copy-IssueReviewToWorktree { + <# + .SYNOPSIS + Copy the Generated Files for an issue to a worktree. + #> + param( + [Parameter(Mandatory)] + [int]$IssueNumber, + [Parameter(Mandatory)] + [string]$SourceRepoRoot, + [Parameter(Mandatory)] + [string]$WorktreePath + ) + + $sourceReviewPath = Get-IssueReviewPath -RepoRoot $SourceRepoRoot -IssueNumber $IssueNumber + $destReviewPath = Get-IssueReviewPath -RepoRoot $WorktreePath -IssueNumber $IssueNumber + + if (-not (Test-Path $sourceReviewPath)) { + throw "Issue review files not found at: $sourceReviewPath" + } + + Ensure-DirectoryExists -Path $destReviewPath + + Copy-Item -Path "$sourceReviewPath\*" -Destination $destReviewPath -Recurse -Force + + Info "Copied issue review files to: $destReviewPath" + + return $destReviewPath +} +#endregion diff --git a/.github/skills/issue-fix/scripts/Start-IssueAutoFix.ps1 b/.github/skills/issue-fix/scripts/Start-IssueAutoFix.ps1 new file mode 100644 index 000000000000..a97118abeaa5 --- /dev/null +++ b/.github/skills/issue-fix/scripts/Start-IssueAutoFix.ps1 @@ -0,0 +1,581 @@ +<#! +.SYNOPSIS + Auto-fix high-confidence issues using worktrees and AI CLI. + +.DESCRIPTION + Finds issues with high confidence scores from the review results, creates worktrees + for each, copies the Generated Files, and kicks off the FixIssue agent to implement fixes. + +.PARAMETER IssueNumber + Specific issue number to fix. If not specified, finds high-confidence issues automatically. + +.PARAMETER MinFeasibilityScore + Minimum Technical Feasibility score (0-100). Default: 70. + +.PARAMETER MinClarityScore + Minimum Requirement Clarity score (0-100). Default: 60. + +.PARAMETER MaxEffortDays + Maximum effort estimate in days. Default: 2 (Small fixes). + +.PARAMETER MaxConcurrent + Maximum parallel fix jobs. Default: 5 (worktrees are resource-intensive). + +.PARAMETER CLIType + AI CLI to use: claude, gh-copilot, or vscode. Auto-detected if not specified. + +.PARAMETER Model + Copilot CLI model to use (e.g., gpt-5.2-codex). + +.PARAMETER DryRun + List issues without starting fixes. + +.PARAMETER SkipWorktree + Fix in the current repository instead of creating worktrees (useful for single issue). + +.PARAMETER VSCodeProfile + VS Code profile to use when opening worktrees. Default: Default. + +.PARAMETER AutoCommit + Automatically commit changes after successful fix. + +.PARAMETER CreatePR + Automatically create a pull request after successful fix. + +.EXAMPLE + # Fix a specific issue + ./Start-IssueAutoFix.ps1 -IssueNumber 12345 + +.EXAMPLE + # Find and fix all high-confidence issues (dry run) + ./Start-IssueAutoFix.ps1 -DryRun + +.EXAMPLE + # Fix issues with very high confidence + ./Start-IssueAutoFix.ps1 -MinFeasibilityScore 80 -MinClarityScore 70 -MaxEffortDays 1 + +.EXAMPLE + # Fix single issue in current repo (no worktree) + ./Start-IssueAutoFix.ps1 -IssueNumber 12345 -SkipWorktree + +.NOTES + Prerequisites: + - Run Start-BulkIssueReview.ps1 first to generate review files + - GitHub CLI (gh) authenticated + - Claude Code CLI or VS Code with Copilot + + Results: + - Worktrees created at ../-/ + - Generated Files copied to each worktree + - Fix agent invoked in each worktree +#> + +[CmdletBinding()] +param( + [int]$IssueNumber, + + [int]$MinFeasibilityScore = 70, + + [int]$MinClarityScore = 60, + + [int]$MaxEffortDays = 2, + + [int]$MaxConcurrent = 5, + + [ValidateSet('claude', 'copilot', 'gh-copilot', 'vscode', 'auto')] + [string]$CLIType = 'auto', + + [string]$Model, + + [switch]$DryRun, + + [switch]$SkipWorktree, + + [Alias('Profile')] + [string]$VSCodeProfile = 'Default', + + [switch]$AutoCommit, + + [switch]$CreatePR, + + [switch]$Force, + + [switch]$Help +) + +# Load libraries +$scriptDir = Split-Path -Parent $MyInvocation.MyCommand.Path +. "$scriptDir/IssueReviewLib.ps1" + +# Resolve config directory name (.github or .claude) from script location +$_cfgDir = if ($PSScriptRoot -match '[\\/](\.github|\.claude)[\\/]') { $Matches[1] } else { '.github' } + +# Load worktree library from tools/build +$repoRoot = Get-RepoRoot +$worktreeLib = Join-Path $repoRoot 'tools/build/WorktreeLib.ps1' +if (Test-Path $worktreeLib) { + . $worktreeLib +} + +# Show help +if ($Help) { + Get-Help $MyInvocation.MyCommand.Path -Full + return +} + +function Start-IssueFixInWorktree { + <# + .SYNOPSIS + Analyze implementation plan and either take action or create worktree for fix. + .DESCRIPTION + First analyzes the implementation plan to determine if: + - Issue is already resolved (close it) + - Issue needs clarification (add comment) + - Issue is a duplicate (close as duplicate) + - Issue is ready to implement (create worktree and fix) + #> + param( + [Parameter(Mandatory)] + [int]$IssueNumber, + [Parameter(Mandatory)] + [string]$SourceRepoRoot, + [string]$CLIType = 'claude', + [string]$Model, + [string]$VSCodeProfile = 'Default', + [switch]$SkipWorktree, + [switch]$DryRun + ) + + $issueReviewPath = Get-IssueReviewPath -RepoRoot $SourceRepoRoot -IssueNumber $IssueNumber + $overviewPath = Join-Path $issueReviewPath 'overview.md' + $implPlanPath = Join-Path $issueReviewPath 'implementation-plan.md' + + # Verify review files exist + if (-not (Test-Path $overviewPath)) { + throw "No overview.md found for issue #$IssueNumber. Run Start-BulkIssueReview.ps1 first." + } + if (-not (Test-Path $implPlanPath)) { + throw "No implementation-plan.md found for issue #$IssueNumber. Run Start-BulkIssueReview.ps1 first." + } + + # ===================================== + # STEP 1: Analyze the implementation plan + # ===================================== + Info "Analyzing implementation plan for issue #$IssueNumber..." + $planStatus = Get-ImplementationPlanStatus -ImplementationPlanPath $implPlanPath + + # ===================================== + # STEP 2: Execute the recommended action + # ===================================== + $actionResult = Invoke-ImplementationPlanAction -IssueNumber $IssueNumber -PlanStatus $planStatus -DryRun:$DryRun + + # If we shouldn't proceed with fix, return early + if (-not $actionResult.ShouldProceedWithFix) { + return @{ + IssueNumber = $IssueNumber + WorktreePath = $null + Success = $actionResult.Success + ActionTaken = $actionResult.ActionTaken + SkippedCodeFix = $true + } + } + + # ===================================== + # STEP 3: Proceed with code fix + # ===================================== + + $workingDir = $SourceRepoRoot + + if (-not $SkipWorktree) { + # Use the simplified New-WorktreeFromIssue.cmd which only needs issue number + $worktreeCmd = Join-Path $SourceRepoRoot 'tools/build/New-WorktreeFromIssue.cmd' + + Info "Creating worktree for issue #$IssueNumber..." + + # Call the cmd script with issue number and -NoVSCode for automation + & cmd /c $worktreeCmd $IssueNumber -NoVSCode + + if ($LASTEXITCODE -ne 0) { + throw "Failed to create worktree for issue #$IssueNumber" + } + + # Find the created worktree + $entries = Get-WorktreeEntries + $worktreeEntry = $entries | Where-Object { $_.Branch -like "issue/$IssueNumber*" } | Select-Object -First 1 + + if (-not $worktreeEntry) { + throw "Failed to find worktree for issue #$IssueNumber" + } + + $workingDir = $worktreeEntry.Path + Info "Worktree created at: $workingDir" + + # Copy Generated Files to worktree + Info "Copying review files to worktree..." + $destReviewPath = Copy-IssueReviewToWorktree -IssueNumber $IssueNumber -SourceRepoRoot $SourceRepoRoot -WorktreePath $workingDir + Info "Review files copied to: $destReviewPath" + + # Copy config dirs to worktree (agents, skills, instructions, prompts, top-level md) + # These aren't on the issue branch so the CLI can't find them without this. + $sourceCfg = Join-Path $SourceRepoRoot $_cfgDir + $destCfg = Join-Path $workingDir $_cfgDir + if (Test-Path $sourceCfg) { + if (-not (Test-Path $destCfg)) { + New-Item -ItemType Directory -Path $destCfg -Force | Out-Null + } + foreach ($sub in @('agents', 'skills', 'instructions', 'prompts')) { + $src = Join-Path $sourceCfg $sub + $dst = Join-Path $destCfg $sub + if ((Test-Path $src) -and -not (Test-Path $dst)) { + Copy-Item -Path $src -Destination $dst -Recurse -Force + Info "Copied $_cfgDir/$sub to worktree" + } + } + foreach ($mdFile in @('copilot-instructions.md', 'CLAUDE.md')) { + $src = Join-Path $sourceCfg $mdFile + $dst = Join-Path $destCfg $mdFile + if ((Test-Path $src) -and -not (Test-Path $dst)) { + Copy-Item -Path $src -Destination $dst -Force + Info "Copied $_cfgDir/$mdFile to worktree" + } + } + } + } + + # Build the prompt for the fix agent + $prompt = @" +You are the FixIssue agent. Fix GitHub issue #$IssueNumber. + +The implementation plan is at: Generated Files/issueReview/$IssueNumber/implementation-plan.md +The overview is at: Generated Files/issueReview/$IssueNumber/overview.md + +Follow the implementation plan exactly. Build and verify after each change. +"@ + + # Start the fix agent + Info "Starting fix agent for issue #$IssueNumber in $workingDir..." + + # MCP config for github-artifacts tools (relative to repo root) + $mcpConfig = "@$_cfgDir/skills/issue-fix/references/mcp-config.json" + + switch ($CLIType) { + 'copilot' { + # GitHub Copilot CLI (standalone copilot command) + # -p: Non-interactive prompt mode (exits after completion) + # --yolo: Enable all permissions for automated execution + # -s: Silent mode - output only agent response + # --additional-mcp-config: Load github-artifacts MCP for image/attachment analysis + $copilotArgs = @( + '--additional-mcp-config', $mcpConfig, + '-p', $prompt, + '--yolo', + '-s', + '--agent', 'FixIssue' + ) + if ($Model) { + $copilotArgs += @('--model', $Model) + } + Info "Running: copilot $($copilotArgs -join ' ')" + Push-Location $workingDir + try { + & copilot @copilotArgs + if ($LASTEXITCODE -ne 0) { + Warn "Copilot exited with code $LASTEXITCODE" + } + } finally { + Pop-Location + } + } + 'claude' { + $claudeArgs = @( + '--print', + '--dangerously-skip-permissions', + '--agent', 'FixIssue', + '--prompt', $prompt + ) + Start-Process -FilePath 'claude' -ArgumentList $claudeArgs -WorkingDirectory $workingDir -Wait -NoNewWindow + } + 'gh-copilot' { + # Use GitHub Copilot CLI via gh extension + # gh copilot suggest requires interactive mode, so we open VS Code with the prompt + Info "GitHub Copilot CLI detected. Opening VS Code with prompt..." + + # Create a prompt file in the worktree for easy access + $promptFile = Join-Path $workingDir "Generated Files/issueReview/$IssueNumber/fix-prompt.md" + $promptContent = @" +# Fix Issue #$IssueNumber + +## Instructions + +$prompt + +## Quick Start + +1. Read the implementation plan: ``Generated Files/issueReview/$IssueNumber/implementation-plan.md`` +2. Read the overview: ``Generated Files/issueReview/$IssueNumber/overview.md`` +3. Follow the plan step by step +4. Build and test after each change +"@ + Set-Content -Path $promptFile -Value $promptContent -Force + + # Open VS Code with the worktree + code --new-window $workingDir --profile $VSCodeProfile + Info "VS Code opened at $workingDir" + Info "Prompt file created at: $promptFile" + Info "Use GitHub Copilot in VS Code to implement the fix." + } + 'vscode' { + # Open VS Code and let user manually trigger the fix + code --new-window $workingDir --profile $VSCodeProfile + Info "VS Code opened at $workingDir. Use Copilot to implement the fix." + } + default { + Warn "CLI type '$CLIType' not fully supported for auto-fix. Opening VS Code..." + code --new-window $workingDir --profile $VSCodeProfile + } + } + + # Check if any changes were actually made + $hasChanges = $false + Push-Location $workingDir + try { + $uncommitted = git status --porcelain 2>$null + $commitsAhead = git rev-list main..HEAD --count 2>$null + if ($uncommitted -or ($commitsAhead -gt 0)) { + $hasChanges = $true + } + } finally { + Pop-Location + } + + return @{ + IssueNumber = $IssueNumber + WorktreePath = $workingDir + Success = $true + ActionTaken = 'CodeFixAttempted' + SkippedCodeFix = $false + HasChanges = $hasChanges + } +} + +#region Main Script +try { + Info "Repository root: $repoRoot" + + # Detect or validate CLI + if ($CLIType -eq 'auto') { + $cli = Get-AvailableCLI + if ($cli) { + $CLIType = $cli.Type + Info "Auto-detected CLI: $($cli.Name)" + } else { + $CLIType = 'vscode' + Info "No CLI detected, will use VS Code" + } + } + + # Find issues to fix + $issuesToFix = @() + + if ($IssueNumber) { + # Single issue specified + $reviewResult = Get-IssueReviewResult -IssueNumber $IssueNumber -RepoRoot $repoRoot + if (-not $reviewResult.HasOverview -or -not $reviewResult.HasImplementationPlan) { + throw "Issue #$IssueNumber does not have review files. Run Start-BulkIssueReview.ps1 first." + } + $issuesToFix += @{ + IssueNumber = $IssueNumber + OverviewPath = $reviewResult.OverviewPath + ImplementationPlanPath = $reviewResult.ImplementationPlanPath + } + } else { + # Find high-confidence issues + Info "`nSearching for high-confidence issues..." + Info " Min Feasibility Score: $MinFeasibilityScore" + Info " Min Clarity Score: $MinClarityScore" + Info " Max Effort: $MaxEffortDays days" + + $highConfidence = Get-HighConfidenceIssues ` + -RepoRoot $repoRoot ` + -MinFeasibilityScore $MinFeasibilityScore ` + -MinClarityScore $MinClarityScore ` + -MaxEffortDays $MaxEffortDays + + if ($highConfidence.Count -eq 0) { + Warn "No high-confidence issues found matching criteria." + Info "Try lowering the score thresholds or increasing MaxEffortDays." + return + } + + $issuesToFix = $highConfidence + } + + Info "`nIssues ready for auto-fix: $($issuesToFix.Count)" + Info ("-" * 80) + foreach ($issue in $issuesToFix) { + $scores = "" + if ($issue.FeasibilityScore) { + $scores = " [Feasibility: $($issue.FeasibilityScore), Clarity: $($issue.ClarityScore), Effort: $($issue.EffortDays)d]" + } + Info ("#{0,-6}{1}" -f $issue.IssueNumber, $scores) + } + Info ("-" * 80) + + # In DryRun mode, still analyze plans but don't take action + if ($DryRun) { + Info "`nAnalyzing implementation plans (dry run)..." + foreach ($issue in $issuesToFix) { + $implPlanPath = Join-Path (Get-IssueReviewPath -RepoRoot $repoRoot -IssueNumber $issue.IssueNumber) 'implementation-plan.md' + if (Test-Path $implPlanPath) { + $planStatus = Get-ImplementationPlanStatus -ImplementationPlanPath $implPlanPath + $color = switch ($planStatus.Action) { + 'ImplementFix' { 'Green' } + 'CloseIssue' { 'Yellow' } + 'AddComment' { 'Cyan' } + 'LinkDuplicate' { 'Magenta' } + default { 'Gray' } + } + Write-Host (" #{0,-6} [{1,-20}] -> {2}" -f $issue.IssueNumber, $planStatus.Status, $planStatus.Action) -ForegroundColor $color + if ($planStatus.RelatedPR) { + $prInfo = "PR #$($planStatus.RelatedPR)" + if ($planStatus.ReleasedIn) { + $prInfo += " (released in $($planStatus.ReleasedIn))" + } elseif ($planStatus.Status -eq 'FixedButUnreleased') { + $prInfo += " (merged, awaiting release)" + } + Write-Host " $prInfo" -ForegroundColor DarkGray + } + if ($planStatus.DuplicateOf) { + Write-Host " Duplicate of #$($planStatus.DuplicateOf)" -ForegroundColor DarkGray + } + } + } + Warn "`nDry run mode - no actions taken." + return + } + + # Confirm before proceeding (skip if -Force) + if (-not $Force) { + $confirm = Read-Host "`nProceed with fixing $($issuesToFix.Count) issues? (y/N)" + if ($confirm -notmatch '^[yY]') { + Info "Cancelled." + return + } + } + + # Process issues + $results = @{ + Succeeded = @() + Failed = @() + AlreadyResolved = @() + AwaitingRelease = @() + NeedsClarification = @() + Duplicates = @() + NoChanges = @() + } + + foreach ($issue in $issuesToFix) { + try { + Info "`n" + ("=" * 60) + Info "PROCESSING ISSUE #$($issue.IssueNumber)" + Info ("=" * 60) + + $result = Start-IssueFixInWorktree ` + -IssueNumber $issue.IssueNumber ` + -SourceRepoRoot $repoRoot ` + -CLIType $CLIType ` + -Model $Model ` + -VSCodeProfile $VSCodeProfile ` + -SkipWorktree:$SkipWorktree ` + -DryRun:$DryRun + + if ($result.SkippedCodeFix) { + # Action was taken but no code fix (e.g., closed issue, added comment) + switch -Wildcard ($result.ActionTaken) { + '*Closing*' { $results.AlreadyResolved += $issue.IssueNumber } + '*clarification*' { $results.NeedsClarification += $issue.IssueNumber } + '*duplicate*' { $results.Duplicates += $issue.IssueNumber } + '*merged*awaiting*' { $results.AwaitingRelease += $issue.IssueNumber } + '*merged but not yet released*' { $results.AwaitingRelease += $issue.IssueNumber } + default { $results.Succeeded += $issue.IssueNumber } + } + Success "✓ Issue #$($issue.IssueNumber) handled: $($result.ActionTaken)" + } + elseif ($result.HasChanges) { + $results.Succeeded += $issue.IssueNumber + Success "✓ Issue #$($issue.IssueNumber) fix completed with changes" + } + else { + $results.NoChanges += $issue.IssueNumber + Warn "⚠ Issue #$($issue.IssueNumber) fix ran but no code changes were made" + } + } + catch { + Err "✗ Issue #$($issue.IssueNumber) failed: $($_.Exception.Message)" + $results.Failed += $issue.IssueNumber + } + } + + # Summary + Info "`n" + ("=" * 80) + Info "AUTO-FIX COMPLETE" + Info ("=" * 80) + Info "Total issues: $($issuesToFix.Count)" + if ($results.Succeeded.Count -gt 0) { + Success "Code fixes: $($results.Succeeded.Count)" + } + if ($results.AlreadyResolved.Count -gt 0) { + Success "Already resolved: $($results.AlreadyResolved.Count) (issues closed)" + } + if ($results.AwaitingRelease.Count -gt 0) { + Info "Awaiting release: $($results.AwaitingRelease.Count) (fix merged, pending release)" + } + if ($results.NeedsClarification.Count -gt 0) { + Warn "Need clarification: $($results.NeedsClarification.Count) (comments added)" + } + if ($results.Duplicates.Count -gt 0) { + Warn "Duplicates: $($results.Duplicates.Count) (issues closed)" + } + if ($results.NoChanges.Count -gt 0) { + Warn "No changes made: $($results.NoChanges.Count)" + } + if ($results.Failed.Count -gt 0) { + Err "Failed: $($results.Failed.Count)" + Err "Failed issues: $($results.Failed -join ', ')" + } + Info ("=" * 80) + + if (-not $SkipWorktree -and ($results.Succeeded.Count -gt 0 -or $results.NoChanges.Count -gt 0)) { + Info "`nWorktrees created. Use 'git worktree list' to see all worktrees." + Info "To clean up: Delete-Worktree.ps1 -Branch issue/" + } + + # Write signal files for orchestrator + $genFiles = Get-GeneratedFilesPath -RepoRoot $repoRoot + foreach ($issueNum in $results.Succeeded) { + $signalDir = Join-Path $genFiles "issueFix/$issueNum" + if (-not (Test-Path $signalDir)) { New-Item -ItemType Directory -Path $signalDir -Force | Out-Null } + @{ + status = "success" + issueNumber = $issueNum + timestamp = (Get-Date).ToString("o") + worktreePath = (git worktree list --porcelain | Select-String "worktree.*issue.$issueNum" | ForEach-Object { $_.Line -replace 'worktree ', '' }) + } | ConvertTo-Json | Set-Content "$signalDir/.signal" -Force + } + foreach ($issueNum in $results.Failed) { + $signalDir = Join-Path $genFiles "issueFix/$issueNum" + if (-not (Test-Path $signalDir)) { New-Item -ItemType Directory -Path $signalDir -Force | Out-Null } + @{ + status = "failure" + issueNumber = $issueNum + timestamp = (Get-Date).ToString("o") + } | ConvertTo-Json | Set-Content "$signalDir/.signal" -Force + } + + return $results +} +catch { + Err "Error: $($_.Exception.Message)" + exit 1 +} +#endregion diff --git a/.github/skills/issue-fix/scripts/Start-IssueFixParallel.ps1 b/.github/skills/issue-fix/scripts/Start-IssueFixParallel.ps1 new file mode 100644 index 000000000000..ca612633e472 --- /dev/null +++ b/.github/skills/issue-fix/scripts/Start-IssueFixParallel.ps1 @@ -0,0 +1,86 @@ +<# +.SYNOPSIS + Run issue-fix in parallel from a single terminal. + +.PARAMETER IssueNumbers + Issue numbers to fix. + +.PARAMETER ThrottleLimit + Maximum parallel tasks. + +.PARAMETER CLIType + AI CLI type (copilot/claude/gh-copilot/vscode/auto). + +.PARAMETER Model + Copilot CLI model to use (e.g., gpt-5.2-codex). + +.PARAMETER Force + Skip confirmation prompts in Start-IssueAutoFix.ps1. +#> +[CmdletBinding()] +param( + [Parameter(Mandatory)] + [int[]]$IssueNumbers, + + [int]$ThrottleLimit = 5, + + [ValidateSet('claude', 'copilot', 'gh-copilot', 'vscode', 'auto')] + [string]$CLIType = 'copilot', + + [string]$Model, + + [switch]$Force +) + +$repoRoot = Resolve-Path (Join-Path $PSScriptRoot '..\..\..\..') + +# Resolve config directory name (.github or .claude) from script location +$_cfgDir = if ($PSScriptRoot -match '[\\/](\.github|\.claude)[\\/]') { $Matches[1] } else { '.github' } +$scriptPath = Join-Path $repoRoot "$_cfgDir\skills\issue-fix\scripts\Start-IssueAutoFix.ps1" + +$results = $IssueNumbers | ForEach-Object -Parallel { + $issue = $PSItem + $repoRoot = $using:repoRoot + $scriptPath = $using:scriptPath + $cliType = $using:CLIType + $model = $using:Model + $force = $using:Force + + Set-Location $repoRoot + + if (-not $issue) { + return [pscustomobject]@{ + IssueNumber = $issue + ExitCode = 1 + Error = 'Issue number is empty.' + } + } + + $params = @{ + IssueNumber = [int]$issue + CLIType = $cliType + } + if ($model) { + $params.Model = $model + } + if ($force) { + $params.Force = $true + } + + try { + & $scriptPath @params | Out-Default + [pscustomobject]@{ + IssueNumber = $issue + ExitCode = $LASTEXITCODE + } + } + catch { + [pscustomobject]@{ + IssueNumber = $issue + ExitCode = 1 + Error = $_.Exception.Message + } + } +} -ThrottleLimit $ThrottleLimit + +$results diff --git a/.github/skills/issue-fix/scripts/Submit-IssueFix.ps1 b/.github/skills/issue-fix/scripts/Submit-IssueFix.ps1 new file mode 100644 index 000000000000..6db8b64c64b6 --- /dev/null +++ b/.github/skills/issue-fix/scripts/Submit-IssueFix.ps1 @@ -0,0 +1,562 @@ +<#! +.SYNOPSIS + Commit and create PRs for completed issue fixes in worktrees. + +.DESCRIPTION + For each specified issue (or all issue worktrees), commits changes using AI-generated + commit messages and creates PRs with AI-generated summaries, linking to the original issue. + +.PARAMETER IssueNumbers + Array of issue numbers to submit. If not specified, processes all issue/* worktrees. + +.PARAMETER DryRun + Show what would be done without actually committing or creating PRs. + +.PARAMETER SkipCommit + Skip the commit step (assume changes are already committed). + +.PARAMETER SkipPush + Skip pushing to remote (useful for testing). + +.PARAMETER TargetBranch + Target branch for the PR. Default: main. + +.PARAMETER CLIType + AI CLI to use for generating messages: copilot, claude, or manual. Default: copilot. + +.PARAMETER Draft + Create PRs as drafts. + +.EXAMPLE + # Submit all issue worktrees + ./Submit-IssueFixes.ps1 + +.EXAMPLE + # Submit specific issues + ./Submit-IssueFixes.ps1 -IssueNumbers 44044, 44480 + +.EXAMPLE + # Dry run to see what would happen + ./Submit-IssueFixes.ps1 -DryRun + +.EXAMPLE + # Create draft PRs + ./Submit-IssueFixes.ps1 -Draft + +.NOTES + Prerequisites: + - Worktrees created by Start-IssueAutoFix.ps1 + - Changes made in the worktrees + - GitHub CLI (gh) authenticated + - Copilot CLI or Claude Code CLI +#> + +[CmdletBinding()] +param( + [int[]]$IssueNumbers, + + [switch]$DryRun, + + [switch]$SkipCommit, + + [switch]$SkipPush, + + [string]$TargetBranch = 'main', + + [ValidateSet('copilot', 'claude', 'manual')] + [string]$CLIType = 'copilot', + + [switch]$Draft, + + [switch]$Force, + + [switch]$Help +) + +# Load libraries +$scriptDir = Split-Path -Parent $MyInvocation.MyCommand.Path +. "$scriptDir/IssueReviewLib.ps1" + +# Load worktree library +$repoRoot = Get-RepoRoot + +# Resolve config directory name (.github or .claude) from script location +$_cfgDir = if ($PSScriptRoot -match '[\\/](\.github|\.claude)[\\/]') { $Matches[1] } else { '.github' } +$worktreeLib = Join-Path $repoRoot 'tools/build/WorktreeLib.ps1' +if (Test-Path $worktreeLib) { + . $worktreeLib +} + +if ($Help) { + Get-Help $MyInvocation.MyCommand.Path -Full + return +} + +function Get-AIGeneratedCommitTitle { + <# + .SYNOPSIS + Generate commit title using AI CLI with create-commit-title prompt. + #> + param( + [Parameter(Mandatory)] + [string]$WorktreePath, + [string]$CLIType = 'copilot' + ) + + $promptFile = Join-Path $repoRoot "$_cfgDir/prompts/create-commit-title.prompt.md" + if (-not (Test-Path $promptFile)) { + throw "Prompt file not found: $promptFile" + } + + $prompt = "Follow the instructions in $_cfgDir/prompts/create-commit-title.prompt.md to generate a commit title for the current changes. Output ONLY the commit title, nothing else." + + # MCP config for github-artifacts tools (relative to repo root) + $mcpConfig = "@$_cfgDir/skills/issue-fix/references/mcp-config.json" + + Push-Location $WorktreePath + try { + switch ($CLIType) { + 'copilot' { + $result = & copilot --additional-mcp-config $mcpConfig -p $prompt --yolo -s --agent FixIssue 2>&1 + # Extract just the title line (last non-empty line that looks like a title) + $lines = $result -split "`n" | Where-Object { $_.Trim() -and $_ -notmatch '^\s*```' -and $_ -notmatch '^\s*#' } + $title = $lines | Select-Object -Last 1 + return $title.Trim() + } + 'claude' { + $result = & claude --print --dangerously-skip-permissions --agent FixIssue --prompt $prompt 2>&1 + $lines = $result -split "`n" | Where-Object { $_.Trim() -and $_ -notmatch '^\s*```' } + $title = $lines | Select-Object -Last 1 + return $title.Trim() + } + 'manual' { + # Show diff and ask user for title + git diff HEAD --stat + return Read-Host "Enter commit title" + } + } + } finally { + Pop-Location + } +} + +function Get-AIGeneratedPRSummary { + <# + .SYNOPSIS + Generate PR summary using AI CLI with create-pr-summary prompt. + #> + param( + [Parameter(Mandatory)] + [string]$WorktreePath, + [Parameter(Mandatory)] + [int]$IssueNumber, + [string]$TargetBranch = 'main', + [string]$CLIType = 'copilot' + ) + + $prompt = @" +Follow the instructions in $_cfgDir/prompts/create-pr-summary.prompt.md to generate a PR summary. +Target branch: $TargetBranch +This PR fixes issue #$IssueNumber. + +IMPORTANT: +1. Output the PR title on the first line +2. Then output the PR body in markdown format +3. Make sure to include "Fixes #$IssueNumber" in the body to auto-link the issue +"@ + + # MCP config for github-artifacts tools (relative to repo root) + $mcpConfig = "@$_cfgDir/skills/issue-fix/references/mcp-config.json" + + Push-Location $WorktreePath + try { + switch ($CLIType) { + 'copilot' { + $result = & copilot --additional-mcp-config $mcpConfig -p $prompt --yolo -s --agent FixIssue 2>&1 + return $result -join "`n" + } + 'claude' { + $result = & claude --print --dangerously-skip-permissions --agent FixIssue --prompt $prompt 2>&1 + return $result -join "`n" + } + 'manual' { + git diff "$TargetBranch...HEAD" --stat + $title = Read-Host "Enter PR title" + $body = Read-Host "Enter PR body (or press Enter for default)" + if (-not $body) { + $body = "Fixes #$IssueNumber" + } + return "$title`n`n$body" + } + } + } finally { + Pop-Location + } +} + +function Parse-PRContent { + <# + .SYNOPSIS + Parse AI output to extract PR title and body. + Expected format: + Line 1: feat(scope): title text + Line 2+: ```markdown + ## Summary... + ``` + #> + param( + [Parameter(Mandatory)] + [string]$Content, + [int]$IssueNumber + ) + + $lines = $Content -split "`n" + + # Title is the FIRST line that looks like a conventional commit + # Body is the content INSIDE the ```markdown ... ``` block + $title = $null + $body = $null + + # Find title - first line matching conventional commit format + foreach ($line in $lines) { + $trimmed = $line.Trim() + if ($trimmed -match '^(feat|fix|docs|refactor|perf|test|build|ci|chore)(\([^)]+\))?:') { + $title = $trimmed -replace '^#+\s*', '' + break + } + } + + # Fallback title + if (-not $title) { + $title = "fix: address issue #$IssueNumber" + } + + # Extract body from markdown code block + $fullContent = $Content + if ($fullContent -match '```markdown\r?\n([\s\S]*?)\r?\n```') { + $body = $Matches[1].Trim() + } else { + # No markdown block - use everything after the title line + $titleIndex = [array]::IndexOf($lines, ($lines | Where-Object { $_.Trim() -eq $title } | Select-Object -First 1)) + if ($titleIndex -ge 0 -and $titleIndex -lt $lines.Count - 1) { + $body = ($lines[($titleIndex + 1)..($lines.Count - 1)] -join "`n").Trim() + # Clean up any remaining code fences + $body = $body -replace '^```\w*\r?\n', '' -replace '\r?\n```\s*$', '' + } else { + $body = "" + } + } + + # Ensure issue link is present + if ($body -notmatch "Fixes\s*#$IssueNumber" -and $body -notmatch "Closes\s*#$IssueNumber" -and $body -notmatch "Resolves\s*#$IssueNumber") { + $body = "$body`n`nFixes #$IssueNumber" + } + + return @{ + Title = $title + Body = $body + } +} + +function Submit-IssueFix { + <# + .SYNOPSIS + Commit changes, push, and create PR for a single issue. + #> + param( + [Parameter(Mandatory)] + [int]$IssueNumber, + [Parameter(Mandatory)] + [string]$WorktreePath, + [Parameter(Mandatory)] + [string]$Branch, + [string]$TargetBranch = 'main', + [string]$CLIType = 'copilot', + [switch]$DryRun, + [switch]$SkipCommit, + [switch]$SkipPush, + [switch]$Draft + ) + + Push-Location $WorktreePath + try { + # Check for changes + $status = git status --porcelain + $hasUncommitted = $status.Count -gt 0 + + # Check for commits ahead of target + git fetch origin $TargetBranch 2>$null + $commitsAhead = git rev-list --count "origin/$TargetBranch..$Branch" 2>$null + if (-not $commitsAhead) { $commitsAhead = 0 } + + Info "Issue #$IssueNumber in $WorktreePath" + Info " Branch: $Branch" + Info " Uncommitted changes: $hasUncommitted" + Info " Commits ahead of $TargetBranch`: $commitsAhead" + + if (-not $hasUncommitted -and $commitsAhead -eq 0) { + Warn " No changes to submit for issue #$IssueNumber" + return @{ IssueNumber = $IssueNumber; Status = 'NoChanges' } + } + + # Step 1: Commit if there are uncommitted changes + if ($hasUncommitted -and -not $SkipCommit) { + Info " Generating commit title..." + + if ($DryRun) { + Info " [DRY RUN] Would generate commit title and commit changes" + } else { + $commitTitle = Get-AIGeneratedCommitTitle -WorktreePath $WorktreePath -CLIType $CLIType + + if (-not $commitTitle) { + throw "Failed to generate commit title" + } + + Info " Commit title: $commitTitle" + + # Stage all changes and commit + git add -A + git commit -m $commitTitle + + if ($LASTEXITCODE -ne 0) { + throw "Git commit failed" + } + + Success " ✓ Changes committed" + } + } + + # Step 2: Push to remote + if (-not $SkipPush) { + if ($DryRun) { + Info " [DRY RUN] Would push branch $Branch to origin" + } else { + Info " Pushing to origin..." + git push -u origin $Branch 2>&1 | Out-Null + + if ($LASTEXITCODE -ne 0) { + # Try force push if normal push fails (branch might have been reset) + Warn " Normal push failed, trying force push..." + git push -u origin $Branch --force-with-lease 2>&1 | Out-Null + if ($LASTEXITCODE -ne 0) { + throw "Git push failed" + } + } + + Success " ✓ Pushed to origin" + } + } + + # Step 3: Create PR + Info " Generating PR summary..." + + if ($DryRun) { + Info " [DRY RUN] Would generate PR summary and create PR" + Info " [DRY RUN] PR would link to issue #$IssueNumber" + return @{ IssueNumber = $IssueNumber; Status = 'DryRun' } + } + + # Check if PR already exists + $existingPR = gh pr list --head $Branch --json number,url 2>$null | ConvertFrom-Json + if ($existingPR -and $existingPR.Count -gt 0) { + Warn " PR already exists: $($existingPR[0].url)" + return @{ IssueNumber = $IssueNumber; Status = 'PRExists'; PRUrl = $existingPR[0].url } + } + + $prContent = Get-AIGeneratedPRSummary -WorktreePath $WorktreePath -IssueNumber $IssueNumber -TargetBranch $TargetBranch -CLIType $CLIType + $parsed = Parse-PRContent -Content $prContent -IssueNumber $IssueNumber + + if (-not $parsed.Title) { + throw "Failed to generate PR title" + } + + Info " PR Title: $($parsed.Title)" + + # Create PR using gh CLI + $ghArgs = @( + 'pr', 'create', + '--base', $TargetBranch, + '--head', $Branch, + '--title', $parsed.Title, + '--body', $parsed.Body + ) + + if ($Draft) { + $ghArgs += '--draft' + } + + $prResult = & gh @ghArgs 2>&1 + + if ($LASTEXITCODE -ne 0) { + throw "Failed to create PR: $prResult" + } + + # Extract PR URL from result + $prUrl = $prResult | Select-String -Pattern 'https://github.com/[^\s]+' | ForEach-Object { $_.Matches[0].Value } + + Success " ✓ PR created: $prUrl" + + return @{ + IssueNumber = $IssueNumber + Status = 'Success' + PRUrl = $prUrl + CommitTitle = $commitTitle + PRTitle = $parsed.Title + } + } + catch { + Err " ✗ Failed: $($_.Exception.Message)" + return @{ + IssueNumber = $IssueNumber + Status = 'Failed' + Error = $_.Exception.Message + } + } + finally { + Pop-Location + } +} + +#region Main Script +try { + Info "Repository root: $repoRoot" + Info "Target branch: $TargetBranch" + Info "CLI type: $CLIType" + + # Get all issue worktrees + $allWorktrees = Get-WorktreeEntries | Where-Object { $_.Branch -like 'issue/*' } + + if ($allWorktrees.Count -eq 0) { + Warn "No issue worktrees found. Run Start-IssueAutoFix.ps1 first." + return + } + + # Filter to specified issues if provided + $worktreesToProcess = @() + + if ($IssueNumbers -and $IssueNumbers.Count -gt 0) { + foreach ($issueNum in $IssueNumbers) { + $wt = $allWorktrees | Where-Object { $_.Branch -match "issue/$issueNum\b" } + if ($wt) { + $worktreesToProcess += $wt + } else { + Warn "No worktree found for issue #$issueNum" + } + } + } else { + $worktreesToProcess = $allWorktrees + } + + if ($worktreesToProcess.Count -eq 0) { + Warn "No worktrees to process." + return + } + + # Display worktrees to process + Info "`nWorktrees to submit:" + Info ("-" * 80) + foreach ($wt in $worktreesToProcess) { + # Extract issue number from branch name + if ($wt.Branch -match 'issue/(\d+)') { + $issueNum = $Matches[1] + Info " #$issueNum -> $($wt.Path) [$($wt.Branch)]" + } + } + Info ("-" * 80) + + if ($DryRun) { + Warn "`nDry run mode - no changes will be made." + } + + # Confirm before proceeding + if (-not $Force -and -not $DryRun) { + $confirm = Read-Host "`nProceed with submitting $($worktreesToProcess.Count) fixes? (y/N)" + if ($confirm -notmatch '^[yY]') { + Info "Cancelled." + return + } + } + + # Process each worktree + $results = @{ + Success = @() + Failed = @() + NoChanges = @() + PRExists = @() + DryRun = @() + } + + foreach ($wt in $worktreesToProcess) { + if ($wt.Branch -match 'issue/(\d+)') { + $issueNum = [int]$Matches[1] + + Info "`n" + ("=" * 60) + Info "SUBMITTING ISSUE #$issueNum" + Info ("=" * 60) + + $result = Submit-IssueFix ` + -IssueNumber $issueNum ` + -WorktreePath $wt.Path ` + -Branch $wt.Branch ` + -TargetBranch $TargetBranch ` + -CLIType $CLIType ` + -DryRun:$DryRun ` + -SkipCommit:$SkipCommit ` + -SkipPush:$SkipPush ` + -Draft:$Draft + + switch ($result.Status) { + 'Success' { $results.Success += $result } + 'Failed' { $results.Failed += $result } + 'NoChanges' { $results.NoChanges += $result } + 'PRExists' { $results.PRExists += $result } + 'DryRun' { $results.DryRun += $result } + } + } + } + + # Summary + Info "`n" + ("=" * 80) + Info "SUBMISSION COMPLETE" + Info ("=" * 80) + Info "Total worktrees: $($worktreesToProcess.Count)" + + if ($results.Success.Count -gt 0) { + Success "PRs created: $($results.Success.Count)" + foreach ($r in $results.Success) { + Success " #$($r.IssueNumber): $($r.PRUrl)" + } + } + + if ($results.PRExists.Count -gt 0) { + Warn "PRs already exist: $($results.PRExists.Count)" + foreach ($r in $results.PRExists) { + Warn " #$($r.IssueNumber): $($r.PRUrl)" + } + } + + if ($results.NoChanges.Count -gt 0) { + Warn "No changes: $($results.NoChanges.Count)" + Warn " Issues: $($results.NoChanges.IssueNumber -join ', ')" + } + + if ($results.Failed.Count -gt 0) { + Err "Failed: $($results.Failed.Count)" + foreach ($r in $results.Failed) { + Err " #$($r.IssueNumber): $($r.Error)" + } + } + + if ($results.DryRun.Count -gt 0) { + Info "Dry run: $($results.DryRun.Count)" + } + + Info ("=" * 80) + + return $results +} +catch { + Err "Error: $($_.Exception.Message)" + exit 1 +} +#endregion diff --git a/.github/skills/issue-review-review/LICENSE.txt b/.github/skills/issue-review-review/LICENSE.txt new file mode 100644 index 000000000000..22aed37e650b --- /dev/null +++ b/.github/skills/issue-review-review/LICENSE.txt @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) Microsoft Corporation. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/.github/skills/issue-review-review/SKILL.md b/.github/skills/issue-review-review/SKILL.md new file mode 100644 index 000000000000..078a51e302a8 --- /dev/null +++ b/.github/skills/issue-review-review/SKILL.md @@ -0,0 +1,184 @@ +--- +name: issue-review-review +description: Meta-review of issue-review outputs to validate scoring accuracy and implementation plan quality. Use when asked to verify an issue review, validate review scores, check if implementation plan is sound, audit issue analysis quality, second-opinion on issue feasibility, or ensure review consistency. Outputs a quality score (0-100) and corrective feedback that feeds back into issue-review for re-analysis. +license: Complete terms in LICENSE.txt +--- + +# Issue Review Review Skill + +Validate the quality of `issue-review` outputs by cross-checking scores against evidence, verifying implementation plan correctness, and producing actionable feedback. When the quality score is below 90, the feedback is fed back into `issue-review` to re-run the analysis with corrections. + +## Skill Contents + +This skill is **self-contained** with all required resources: + +``` +.github/skills/issue-review-review/ +├── SKILL.md # This file +├── LICENSE.txt # MIT License +├── scripts/ +│ ├── Start-IssueReviewReview.ps1 # Main review-review script +│ ├── Start-IssueReviewReviewParallel.ps1 # Parallel runner +│ └── IssueReviewLib.ps1 # Shared library functions +└── references/ + ├── review-the-review.prompt.md # AI prompt for meta-review + └── mcp-config.json # MCP configuration +``` + +## Output Directory + +All generated artifacts are placed under `Generated Files/issueReviewReview//` at the repository root (gitignored). + +``` +Generated Files/issueReviewReview/ +└── / + ├── reviewTheReview.md # Meta-review with quality score and feedback + ├── .signal # Completion signal for orchestrator + └── iteration-/ # Previous iteration outputs (if looped) + └── reviewTheReview.md +``` + +## Signal File + +On completion, a `.signal` file is created for orchestrator coordination: + +```json +{ + "status": "success", + "issueNumber": 45363, + "timestamp": "2026-02-04T10:05:23Z", + "qualityScore": 85, + "iteration": 1, + "outputs": ["reviewTheReview.md"], + "needsReReview": true +} +``` + +Status values: `success`, `failure` + +Key fields: +- `qualityScore` (0-100): Overall quality of the original review +- `iteration`: Which review-review pass this is (1, 2, 3...) +- `needsReReview`: `true` if score < 90, meaning `issue-review` should re-run with feedback + +## When to Use This Skill + +- Validate that an issue review's scores match the evidence +- Check if an implementation plan is technically sound +- Verify that short-term and long-term fix strategies are correct +- Audit review quality before sending issues to `issue-fix` +- Second-opinion on feasibility and clarity assessments +- Quality gate in the issue-to-PR cycle automation + +## Prerequisites + +- GitHub CLI (`gh`) installed and authenticated +- PowerShell 7+ for running scripts +- Issue must be reviewed first (use `issue-review` skill) +- Copilot CLI or Claude CLI installed + +## Required Variables + +⚠️ **Before starting**, confirm `{{IssueNumber}}` with the user. If not provided, **ASK**: "What issue number should I review-review?" + +| Variable | Description | Example | +|----------|-------------|---------| +| `{{IssueNumber}}` | GitHub issue number whose review to validate | `44044` | + +## Workflow + +### Step 1: Ensure Issue Is Reviewed + +The issue must already have `Generated Files/issueReview/{{IssueNumber}}/overview.md` and `implementation-plan.md`. If not, run `issue-review` first. + +### Step 2: Run Review-Review + +```powershell +# From repo root +.github/skills/issue-review-review/scripts/Start-IssueReviewReview.ps1 -IssueNumber {{IssueNumber}} +``` + +This will: +1. Read the original issue from GitHub +2. Read the existing `overview.md` and `implementation-plan.md` +3. Cross-check scores against evidence in the issue +4. Validate implementation plan against codebase +5. Generate `reviewTheReview.md` with quality score and feedback + +### Step 3: Check Quality Score + +Read the signal file at `Generated Files/issueReviewReview/{{IssueNumber}}/.signal`: + +| Quality Score | Action | +|---------------|--------| +| 90-100 | ✅ Review is high quality — proceed to `issue-fix` | +| 70-89 | ⚠️ Review needs improvement — re-run `issue-review` with feedback | +| 50-69 | 🔶 Review has significant issues — re-run with feedback, may need 2 iterations | +| 0-49 | 🔴 Review is poor — re-run with feedback, consider manual review | + +### Step 4: Feed Back to Issue-Review (if score < 90) + +If `needsReReview` is `true`, re-run issue-review with the feedback file: + +```powershell +# Re-run issue-review with feedback from review-review +.github/skills/issue-review/scripts/Start-BulkIssueReview.ps1 -IssueNumber {{IssueNumber}} -FeedbackFile "Generated Files/issueReviewReview/{{IssueNumber}}/reviewTheReview.md" -Force +``` + +Then re-run the review-review to check if quality improved: + +```powershell +.github/skills/issue-review-review/scripts/Start-IssueReviewReview.ps1 -IssueNumber {{IssueNumber}} -Force +``` + +### Step 5: Loop Until Quality ≥ 90 + +The orchestrator (`issue-to-pr-cycle`) will loop Steps 2-4 until either: +- Quality score ≥ 90, OR +- Maximum iterations reached (default: 3) + +## Batch Review-Review + +To review-review multiple issues at once: + +```powershell +.github/skills/issue-review-review/scripts/Start-IssueReviewReviewParallel.ps1 -IssueNumbers 44044,32950,45029 -ThrottleLimit 5 -Force +``` + +## CLI Options + +### Start-IssueReviewReview.ps1 + +| Parameter | Description | Default | +|-----------|-------------|---------| +| `-IssueNumber` | Issue number to review-review | (required) | +| `-CLIType` | AI CLI: `copilot` or `claude` | `copilot` | +| `-Model` | Copilot model to use | (auto) | +| `-Force` | Skip confirmation prompts | `$false` | +| `-DryRun` | Show what would be done | `$false` | + +### Start-IssueReviewReviewParallel.ps1 + +| Parameter | Description | Default | +|-----------|-------------|---------| +| `-IssueNumbers` | Array of issue numbers | (required) | +| `-ThrottleLimit` | Max parallel tasks | `5` | +| `-CLIType` | AI CLI type | `copilot` | +| `-Model` | Copilot model to use | (auto) | +| `-Force` | Skip confirmation prompts | `$false` | + +## Quality Dimensions Checked + +The meta-review evaluates these dimensions: + +| Dimension | What It Checks | Weight | +|-----------|---------------|--------| +| Score Accuracy | Do scores match the evidence cited? | 30% | +| Implementation Correctness | Are the right files/patterns identified? | 25% | +| Risk Assessment | Are risks properly identified and mitigated? | 15% | +| Completeness | Are all aspects covered (perf, security, a11y, i18n)? | 15% | +| Actionability | Can an AI agent execute the plan as written? | 15% | + +## AI Prompt Reference + +The full prompt template is at [references/review-the-review.prompt.md](./references/review-the-review.prompt.md). diff --git a/.github/skills/issue-review-review/references/mcp-config.json b/.github/skills/issue-review-review/references/mcp-config.json new file mode 100644 index 000000000000..5af15d54218c --- /dev/null +++ b/.github/skills/issue-review-review/references/mcp-config.json @@ -0,0 +1,9 @@ +{ + "mcpServers": { + "github-artifacts": { + "command": "cmd", + "args": ["/c", "for /f %i in ('git rev-parse --show-toplevel') do node %i/tools/mcp/github-artifacts/launch.js"], + "tools": ["*"] + } + } +} diff --git a/.github/skills/issue-review-review/references/review-the-review.prompt.md b/.github/skills/issue-review-review/references/review-the-review.prompt.md new file mode 100644 index 000000000000..3d7363ec918b --- /dev/null +++ b/.github/skills/issue-review-review/references/review-the-review.prompt.md @@ -0,0 +1,194 @@ +--- +agent: 'agent' +description: 'Meta-review of issue-review outputs: validate scores, check implementation plan quality, produce feedback' +--- + +# Review the Review — Meta-Analysis of Issue Review Quality + +## Goal +For issue **#{{issue_number}}**, validate the existing `issue-review` outputs and produce: +1) `Generated Files/issueReviewReview/{{issue_number}}/reviewTheReview.md` + +## Inputs + +You MUST have these files available before starting: +- `Generated Files/issueReview/{{issue_number}}/overview.md` — The original review scores and assessment +- `Generated Files/issueReview/{{issue_number}}/implementation-plan.md` — The original implementation plan +- The original GitHub issue data (fetch via `gh issue view {{issue_number}}`) + +If a feedback file from a previous iteration exists, also read it: +- `Generated Files/issueReviewReview/{{issue_number}}/reviewTheReview.md` — Previous meta-review feedback (check if iteration > 1) + +## Process + +### Step 1: Gather Context + +1. **Read the original issue**: `gh issue view {{issue_number}} --json number,title,body,author,createdAt,updatedAt,state,labels,milestone,reactions,comments,linkedPullRequests` +2. **Read overview.md**: Parse all scores (Business Importance, Community Excitement, Technical Feasibility, Requirement Clarity, Overall Priority, Effort Estimate) +3. **Read implementation-plan.md**: Parse all sections (Problem Framing, Layers & Files, Pattern Choices, Fundamentals, Task Breakdown) +4. **Examine the actual codebase**: Use `rg`/`git grep`/`find` to verify file paths mentioned in the implementation plan actually exist +5. **Check for similar past fixes**: Search for related PRs and how they were implemented + +### Step 2: Validate Scores + +For EACH score dimension, evaluate whether the score matches the evidence: + +#### A) Business Importance Score Validation +- Does the score align with the issue's labels (priority/security/regression)? +- Is the milestone/roadmap impact correctly assessed? +- Are customer/contract impacts properly weighted? + +#### B) Community Excitement Score Validation +- Count actual 👍/❤️ reactions and compare against the score +- Verify comment count and unique participant count +- Check if recent activity assessment is accurate +- Verify duplicate/related issue count + +#### C) Technical Feasibility Score Validation +- **CRITICAL**: Verify that files mentioned in the plan actually exist in the repo +- Check if the proposed changes follow existing patterns (use `rg` to find similar patterns) +- Assess whether risk factors (perf/security/compat) are properly identified +- Verify testability claims by checking if test infrastructure exists for the affected module + +#### D) Requirement Clarity Score Validation +- Does the issue actually contain clear repro steps? +- Are non-functional requirements (perf/security/i18n/a11y) addressed? +- Are acceptance criteria defined or at least inferable? + +### Step 3: Validate Implementation Plan + +For EACH section of the implementation plan: + +#### Problem Framing +- Is the problem correctly understood? +- Are scope boundaries reasonable? +- Is current vs expected behavior accurately described? + +#### Layers & Files +- **CRITICAL**: Do ALL referenced files/directories exist? Run `test -f ` or `ls ` for each one +- Are the file paths using correct casing and separators? +- Are all affected layers identified (UI/domain/data/infra/build)? +- Are any files missing that should be modified? + +#### Pattern Choices +- Do the suggested patterns match what the repo actually uses? +- Use `rg` to find 2-3 examples of the suggested pattern in the codebase +- If a new pattern is suggested, is the justification sound? + +#### Fundamentals +- Are performance concerns addressed for the specific module? +- Are security implications properly assessed? +- Is i18n/l10n handled (check for hardcoded strings)? +- Is accessibility considered (keyboard nav, screen readers)? + +#### Task Breakdown +- Can an AI agent actually execute each task as written? +- Are the steps in the right order (dependencies respected)? +- Are test requirements specified for each task? +- Is the human-vs-agent ownership realistic? + +### Step 4: Check for Red Flags + +Flag these issues if found: +- 🔴 **Ghost files**: Implementation plan references files that don't exist +- 🔴 **Wrong patterns**: Suggested approach contradicts existing codebase patterns +- 🔴 **Missing tests**: No test plan for behavior changes +- 🔴 **Score inflation**: Scores are ≥20 points higher than evidence supports +- 🔴 **Score deflation**: Scores are ≥20 points lower than evidence supports +- 🟡 **Incomplete coverage**: Missing fundamentals (security, i18n, a11y) +- 🟡 **Vague tasks**: Task breakdown has steps that are too broad to execute +- 🟡 **Missing dependencies**: Task order doesn't respect build/import dependencies + +## Output: reviewTheReview.md + +Generate the following structure: + +```markdown +# Review-Review: Issue #{{issue_number}} + +**Review Quality Score: X/100** +**Iteration: N** +**Verdict: PASS / NEEDS_IMPROVEMENT / FAIL** + +## Executive Summary + +Brief (2-3 sentences) on whether the original review is trustworthy and actionable. + +## Score Validation + +| Dimension | Original Score | Validated Score | Delta | Assessment | +|-----------|---------------|-----------------|-------|------------| +| Business Importance | X/100 | Y/100 | ±Z | ✅ Accurate / ⚠️ Inflated / ⚠️ Deflated | +| Community Excitement | X/100 | Y/100 | ±Z | ✅ / ⚠️ | +| Technical Feasibility | X/100 | Y/100 | ±Z | ✅ / ⚠️ | +| Requirement Clarity | X/100 | Y/100 | ±Z | ✅ / ⚠️ | +| Overall Priority | X/100 | Y/100 | ±Z | ✅ / ⚠️ | + +### Score Details + +For each dimension where delta ≥ 10 points: +- What evidence was missed or misinterpreted +- What the correct assessment should be +- Specific data points supporting the correction + +## Implementation Plan Validation + +### Files Verification + +| File Path | Exists? | Correct? | Notes | +|-----------|---------|----------|-------| +| `src/modules/...` | ✅/❌ | ✅/⚠️ | ... | + +### Pattern Verification + +| Suggested Pattern | Used in Repo? | Examples Found | Assessment | +|-------------------|---------------|----------------|------------| +| ... | ✅/❌ | `src/...`, `src/...` | ✅ Correct / ⚠️ Wrong pattern | + +### Task Breakdown Assessment + +| Task # | Executable by Agent? | Issues | Corrective Action | +|--------|---------------------|--------|-------------------| +| 1 | ✅/⚠️/❌ | ... | ... | + +## Red Flags Found + +List any 🔴 or 🟡 flags with evidence. + +## Corrective Feedback for Re-Review + +**IF quality score < 90, provide specific instructions for issue-review to fix:** + +### Scores to Adjust +- Dimension X: Change from Y to Z because [evidence] + +### Implementation Plan Corrections +- File path corrections: [list] +- Missing files to add: [list] +- Pattern corrections: [list] +- Task breakdown fixes: [list] + +### Missing Coverage +- Add section on: [topic] +- Expand analysis of: [topic] + +## Quality Score Breakdown + +| Dimension | Score | Weight | Weighted | +|-----------|-------|--------|----------| +| Score Accuracy | X/100 | 30% | X | +| Implementation Correctness | X/100 | 25% | X | +| Risk Assessment | X/100 | 15% | X | +| Completeness | X/100 | 15% | X | +| Actionability | X/100 | 15% | X | +| **Total** | | | **X/100** | +``` + +## Important Rules + +1. **Be evidence-based**: Every correction must cite specific files, lines, or data +2. **Verify file existence**: ALWAYS run `test -f` or `ls` for paths in the implementation plan +3. **Check patterns**: Use `rg` to find at least 2 examples of any suggested pattern +4. **Don't be a rubber stamp**: If the review looks perfect, still verify the top 3 most impactful claims +5. **Actionable feedback**: Every issue found must include a specific correction, not just "this is wrong" +6. **Score honestly**: The quality score should reflect real issues found, not just gut feeling diff --git a/.github/skills/issue-review-review/scripts/IssueReviewLib.ps1 b/.github/skills/issue-review-review/scripts/IssueReviewLib.ps1 new file mode 100644 index 000000000000..ac1dc7754363 --- /dev/null +++ b/.github/skills/issue-review-review/scripts/IssueReviewLib.ps1 @@ -0,0 +1,777 @@ +# IssueReviewLib.ps1 - Shared helpers for bulk issue review automation +# Part of the PowerToys GitHub Copilot/Claude Code issue review system + +# Resolve config directory name (.github or .claude) from this script's location +$_cfgDir = if ($PSScriptRoot -match '[\\/](\.github|\.claude)[\\/]') { $Matches[1] } else { '.github' } + +#region Console Output Helpers +function Info { param([string]$Message) Write-Host $Message -ForegroundColor Cyan } +function Warn { param([string]$Message) Write-Host $Message -ForegroundColor Yellow } +function Err { param([string]$Message) Write-Host $Message -ForegroundColor Red } +function Success { param([string]$Message) Write-Host $Message -ForegroundColor Green } +#endregion + +#region Repository Helpers +function Get-RepoRoot { + $root = git rev-parse --show-toplevel 2>$null + if (-not $root) { throw 'Not inside a git repository.' } + return (Resolve-Path $root).Path +} + +function Get-GeneratedFilesPath { + param([string]$RepoRoot) + return Join-Path $RepoRoot 'Generated Files' +} + +function Get-IssueReviewPath { + param( + [string]$RepoRoot, + [int]$IssueNumber + ) + $genFiles = Get-GeneratedFilesPath -RepoRoot $RepoRoot + return Join-Path $genFiles "issueReview/$IssueNumber" +} + +function Get-IssueTitleFromOverview { + <# + .SYNOPSIS + Extract issue title from existing overview.md file. + .DESCRIPTION + Parses the overview.md to get the issue title without requiring GitHub CLI. + #> + param( + [Parameter(Mandatory)] + [string]$OverviewPath + ) + + if (-not (Test-Path $OverviewPath)) { + return $null + } + + $content = Get-Content $OverviewPath -Raw + + # Try to match title from Summary table: | **Title** | | + if ($content -match '\*\*Title\*\*\s*\|\s*([^|]+)\s*\|') { + return $Matches[1].Trim() + } + + # Try to match from header: # Issue #XXXX: <title> + if ($content -match '# Issue #\d+[:\s]+(.+)$' ) { + return $Matches[1].Trim() + } + + # Try to match: # Issue #XXXX Review: <title> + if ($content -match '# Issue #\d+ Review[:\s]+(.+)$') { + return $Matches[1].Trim() + } + + return $null +} + +function Ensure-DirectoryExists { + param([string]$Path) + if (-not (Test-Path $Path)) { + New-Item -ItemType Directory -Path $Path -Force | Out-Null + } +} +#endregion + +#region GitHub Issue Query Helpers +function Get-GitHubIssues { + <# + .SYNOPSIS + Query GitHub issues by label, state, and sort order. + .PARAMETER Labels + Comma-separated list of labels to filter by (e.g., "bug,help wanted"). + .PARAMETER State + Issue state: open, closed, or all. Default: open. + .PARAMETER Sort + Sort field: created, updated, comments, reactions. Default: created. + .PARAMETER Order + Sort order: asc or desc. Default: desc. + .PARAMETER Limit + Maximum number of issues to return. Default: 100. + .PARAMETER Repository + Repository in owner/repo format. Default: microsoft/PowerToys. + #> + param( + [string]$Labels, + [ValidateSet('open', 'closed', 'all')] + [string]$State = 'open', + [ValidateSet('created', 'updated', 'comments', 'reactions')] + [string]$Sort = 'created', + [ValidateSet('asc', 'desc')] + [string]$Order = 'desc', + [int]$Limit = 100, + [string]$Repository = 'microsoft/PowerToys' + ) + + $ghArgs = @('issue', 'list', '--repo', $Repository, '--state', $State, '--limit', $Limit) + + if ($Labels) { + foreach ($label in ($Labels -split ',')) { + $ghArgs += @('--label', $label.Trim()) + } + } + + # Build JSON fields (use reactionGroups instead of reactions) + $jsonFields = 'number,title,state,labels,createdAt,updatedAt,author,reactionGroups,comments' + $ghArgs += @('--json', $jsonFields) + + Info "Querying issues: gh $($ghArgs -join ' ')" + $result = & gh @ghArgs 2>&1 + + if ($LASTEXITCODE -ne 0) { + throw "Failed to query issues: $result" + } + + $issues = $result | ConvertFrom-Json + + # Sort by reactions if requested (gh CLI doesn't support this natively) + if ($Sort -eq 'reactions') { + $issues = $issues | ForEach-Object { + # reactionGroups is an array of {content, users} - sum up user counts + $totalReactions = ($_.reactionGroups | ForEach-Object { $_.users.totalCount } | Measure-Object -Sum).Sum + if (-not $totalReactions) { $totalReactions = 0 } + $_ | Add-Member -NotePropertyName 'totalReactions' -NotePropertyValue $totalReactions -PassThru + } + if ($Order -eq 'desc') { + $issues = $issues | Sort-Object -Property totalReactions -Descending + } else { + $issues = $issues | Sort-Object -Property totalReactions + } + } + + return $issues +} + +function Get-IssueDetails { + <# + .SYNOPSIS + Get detailed information about a specific issue. + #> + param( + [Parameter(Mandatory)] + [int]$IssueNumber, + [string]$Repository = 'microsoft/PowerToys' + ) + + $jsonFields = 'number,title,body,state,labels,createdAt,updatedAt,author,reactions,comments,linkedPullRequests,milestone' + $result = gh issue view $IssueNumber --repo $Repository --json $jsonFields 2>&1 + + if ($LASTEXITCODE -ne 0) { + throw "Failed to get issue #$IssueNumber`: $result" + } + + return $result | ConvertFrom-Json +} +#endregion + +#region CLI Detection and Execution +function Get-AvailableCLI { + <# + .SYNOPSIS + Detect which AI CLI is available: GitHub Copilot CLI or Claude Code. + .OUTPUTS + Returns object with: Name, Command, PromptArg + #> + + # Check for standalone GitHub Copilot CLI (copilot command) + $copilotCLI = Get-Command 'copilot' -ErrorAction SilentlyContinue + if ($copilotCLI) { + return @{ + Name = 'GitHub Copilot CLI' + Command = 'copilot' + Args = @('-p') # Non-interactive prompt mode + Type = 'copilot' + } + } + + # Check for Claude Code CLI + $claudeCode = Get-Command 'claude' -ErrorAction SilentlyContinue + if ($claudeCode) { + return @{ + Name = 'Claude Code CLI' + Command = 'claude' + Args = @() + Type = 'claude' + } + } + + # Check for GitHub Copilot CLI via gh extension + $ghCopilot = Get-Command 'gh' -ErrorAction SilentlyContinue + if ($ghCopilot) { + $copilotCheck = gh extension list 2>&1 | Select-String -Pattern 'copilot' + if ($copilotCheck) { + return @{ + Name = 'GitHub Copilot CLI (gh extension)' + Command = 'gh' + Args = @('copilot', 'suggest') + Type = 'gh-copilot' + } + } + } + + # Check for VS Code CLI with Copilot + $code = Get-Command 'code' -ErrorAction SilentlyContinue + if ($code) { + return @{ + Name = 'VS Code (Copilot Chat)' + Command = 'code' + Args = @() + Type = 'vscode' + } + } + + return $null +} + +function Invoke-AIReview { + <# + .SYNOPSIS + Invoke AI CLI to review a single issue. + .PARAMETER IssueNumber + The issue number to review. + .PARAMETER RepoRoot + Repository root path. + .PARAMETER CLIType + CLI type: 'claude', 'copilot', 'gh-copilot', or 'vscode'. + .PARAMETER WorkingDirectory + Working directory for the CLI command. + .PARAMETER FeedbackContext + Optional feedback from review-the-review to incorporate into the re-review. + .PARAMETER Model + Optional model override for Copilot CLI (e.g., claude-sonnet-4). + #> + param( + [Parameter(Mandatory)] + [int]$IssueNumber, + [Parameter(Mandatory)] + [string]$RepoRoot, + [ValidateSet('claude', 'copilot', 'gh-copilot', 'vscode')] + [string]$CLIType = 'copilot', + [string]$WorkingDirectory, + [string]$FeedbackContext, + [string]$Model + ) + + if (-not $WorkingDirectory) { + $WorkingDirectory = $RepoRoot + } + + $promptFile = Join-Path $RepoRoot "$_cfgDir/prompts/review-issue.prompt.md" + if (-not (Test-Path $promptFile)) { + throw "Prompt file not found: $promptFile" + } + + # Prepare the prompt with issue number substitution + $promptContent = Get-Content $promptFile -Raw + $promptContent = $promptContent -replace '\{\{issue_number\}\}', $IssueNumber + + # Create temp prompt file + $tempPromptDir = Join-Path $env:TEMP "issue-review-$IssueNumber" + Ensure-DirectoryExists -Path $tempPromptDir + $tempPromptFile = Join-Path $tempPromptDir "prompt.md" + $promptContent | Set-Content -Path $tempPromptFile -Encoding UTF8 + + # Build the prompt text for CLI + $promptText = "Review GitHub issue #$IssueNumber following the template in $_cfgDir/prompts/review-issue.prompt.md. Generate overview.md and implementation-plan.md in 'Generated Files/issueReview/$IssueNumber/'" + + # Inject feedback from review-the-review if available + if ($FeedbackContext) { + $promptText += @" + +IMPORTANT: This is a RE-REVIEW. A previous review was rejected by the quality gate. You MUST address ALL the corrective feedback below. Read the feedback carefully and fix every issue identified. + +=== CORRECTIVE FEEDBACK FROM REVIEW-THE-REVIEW === +$FeedbackContext +=== END FEEDBACK === + +Pay special attention to: +1. Score corrections — adjust scores to match the evidence cited in the feedback +2. File path corrections — verify all paths exist before including them +3. Pattern corrections — use the patterns identified as correct in the feedback +4. Missing coverage — add any sections flagged as missing +5. Task breakdown fixes — make tasks specific and executable +"@ + } + + switch ($CLIType) { + 'copilot' { + # GitHub Copilot CLI (standalone copilot command) + # Use --yolo for full permissions (--allow-all-tools --allow-all-paths --allow-all-urls) + # Use -s (silent) for cleaner output in batch mode + # Enable ALL GitHub MCP tools (issues, PRs, repos, etc.) + github-artifacts for images/attachments + # MCP config path relative to repo root for github-artifacts tools + $mcpConfig = "@$_cfgDir/skills/issue-review/references/mcp-config.json" + $args = @( + '--additional-mcp-config', $mcpConfig, # Load github-artifacts MCP for image/attachment analysis + '-p', $promptText, # Non-interactive prompt mode (exits after completion) + '--yolo', # Enable all permissions for automated execution + '-s', # Silent mode - output only agent response + '--enable-all-github-mcp-tools', # Enable ALL GitHub MCP tools (issues, PRs, search, etc.) + '--allow-tool', 'github-artifacts', # Also enable our custom github-artifacts MCP + '--agent', 'ReviewIssue' + ) + if ($Model) { + $args += @('--model', $Model) + } + + return @{ + Command = 'copilot' + Arguments = $args + WorkingDirectory = $WorkingDirectory + IssueNumber = $IssueNumber + } + } + 'claude' { + # Claude Code CLI + $args = @( + '--print', # Non-interactive mode + '--dangerously-skip-permissions', + '--agent', 'ReviewIssue', + '--prompt', $promptText + ) + + return @{ + Command = 'claude' + Arguments = $args + WorkingDirectory = $WorkingDirectory + IssueNumber = $IssueNumber + } + } + 'gh-copilot' { + # GitHub Copilot CLI via gh + $args = @( + 'copilot', 'suggest', + '-t', 'shell', + "Review GitHub issue #$IssueNumber and generate analysis files" + ) + + return @{ + Command = 'gh' + Arguments = $args + WorkingDirectory = $WorkingDirectory + IssueNumber = $IssueNumber + } + } + 'vscode' { + # VS Code with Copilot - open with prompt + $args = @( + '--new-window', + $WorkingDirectory, + '--goto', $tempPromptFile + ) + + return @{ + Command = 'code' + Arguments = $args + WorkingDirectory = $WorkingDirectory + IssueNumber = $IssueNumber + } + } + } +} +#endregion + +#region Parallel Job Management +function Start-ParallelIssueReviews { + <# + .SYNOPSIS + Start parallel issue reviews with throttling. + .PARAMETER Issues + Array of issue objects to review. + .PARAMETER MaxConcurrent + Maximum number of parallel jobs. Default: 20. + .PARAMETER CLIType + CLI type to use for reviews. + .PARAMETER RepoRoot + Repository root path. + .PARAMETER TimeoutMinutes + Timeout per issue in minutes. Default: 30. + .PARAMETER MaxRetryCount + Maximum number of retries for failed issues. Default: 2. + .PARAMETER RetryDelaySeconds + Delay between retries in seconds. Default: 10. + #> + param( + [Parameter(Mandatory)] + [array]$Issues, + [int]$MaxConcurrent = 20, + [ValidateSet('claude', 'copilot', 'gh-copilot', 'vscode')] + [string]$CLIType = 'copilot', + [Parameter(Mandatory)] + [string]$RepoRoot, + [int]$TimeoutMinutes = 30, + [int]$MaxRetryCount = 2, + [int]$RetryDelaySeconds = 10, + [string]$FeedbackContext, + [string]$Model + ) + + $totalIssues = $Issues.Count + $completed = 0 + $failed = @() + $succeeded = @() + $retryQueue = [System.Collections.Queue]::new() + + Info "Starting parallel review of $totalIssues issues (max $MaxConcurrent concurrent, $MaxRetryCount retries)" + + # Use PowerShell jobs for parallelization + $jobs = @() + $issueQueue = [System.Collections.Queue]::new($Issues) + + while ($issueQueue.Count -gt 0 -or $jobs.Count -gt 0 -or $retryQueue.Count -gt 0) { + # Process retry queue when main queue is empty + if ($issueQueue.Count -eq 0 -and $retryQueue.Count -gt 0 -and $jobs.Count -lt $MaxConcurrent) { + $retryItem = $retryQueue.Dequeue() + Warn "🔄 Retrying issue #$($retryItem.IssueNumber) (attempt $($retryItem.Attempt + 1)/$($MaxRetryCount + 1))" + Start-Sleep -Seconds $RetryDelaySeconds + $issueQueue.Enqueue(@{ number = $retryItem.IssueNumber; _retryAttempt = $retryItem.Attempt + 1 }) + } + + # Start new jobs up to MaxParallel + while ($jobs.Count -lt $MaxConcurrent -and $issueQueue.Count -gt 0) { + $issue = $issueQueue.Dequeue() + $issueNum = $issue.number + $retryAttempt = if ($issue._retryAttempt) { $issue._retryAttempt } else { 0 } + + $attemptInfo = if ($retryAttempt -gt 0) { " (retry $retryAttempt)" } else { "" } + Info "Starting review for issue #$issueNum$attemptInfo ($($totalIssues - $issueQueue.Count)/$totalIssues)" + + $job = Start-Job -Name "Issue-$issueNum" -ScriptBlock { + param($IssueNumber, $RepoRoot, $CLIType, $FeedbackCtx, $ModelOverride) + + Set-Location $RepoRoot + + # Import the library in the job context + . "$RepoRoot/.github/review-tools/IssueReviewLib.ps1" + + try { + $reviewParams = @{ + IssueNumber = $IssueNumber + RepoRoot = $RepoRoot + CLIType = $CLIType + } + if ($FeedbackCtx) { + $reviewParams.FeedbackContext = $FeedbackCtx + } + if ($ModelOverride) { + $reviewParams.Model = $ModelOverride + } + $reviewCmd = Invoke-AIReview @reviewParams + + # Execute the command using invocation operator (works for .ps1 scripts and executables) + Set-Location $reviewCmd.WorkingDirectory + $argList = $reviewCmd.Arguments + + # Capture both stdout and stderr for better error reporting + $output = & $reviewCmd.Command @argList 2>&1 + $exitCode = $LASTEXITCODE + + # Get last 20 lines of output for error context + $outputLines = $output | Out-String + $lastLines = ($outputLines -split "`n" | Select-Object -Last 20) -join "`n" + + # Check if output files were created (success indicator) + $overviewPath = Join-Path $RepoRoot "Generated Files/issueReview/$IssueNumber/overview.md" + $implPlanPath = Join-Path $RepoRoot "Generated Files/issueReview/$IssueNumber/implementation-plan.md" + $filesCreated = (Test-Path $overviewPath) -and (Test-Path $implPlanPath) + + return @{ + IssueNumber = $IssueNumber + Success = ($exitCode -eq 0) -or $filesCreated + ExitCode = $exitCode + FilesCreated = $filesCreated + Output = $lastLines + Error = if ($exitCode -ne 0 -and -not $filesCreated) { "Exit code: $exitCode`n$lastLines" } else { $null } + } + } + catch { + return @{ + IssueNumber = $IssueNumber + Success = $false + ExitCode = -1 + FilesCreated = $false + Output = $null + Error = $_.Exception.Message + } + } + } -ArgumentList $issueNum, $RepoRoot, $CLIType, $FeedbackContext, $Model + + $jobs += @{ + Job = $job + IssueNumber = $issueNum + StartTime = Get-Date + RetryAttempt = $retryAttempt + } + } + + # Check for completed jobs + $completedJobs = @() + foreach ($jobInfo in $jobs) { + $job = $jobInfo.Job + $issueNum = $jobInfo.IssueNumber + $startTime = $jobInfo.StartTime + $retryAttempt = $jobInfo.RetryAttempt + + if ($job.State -eq 'Completed') { + $result = Receive-Job -Job $job + Remove-Job -Job $job -Force + + if ($result.Success) { + Success "✓ Issue #$issueNum completed (files created: $($result.FilesCreated))" + $succeeded += $issueNum + $completed++ + } else { + # Check if we should retry + if ($retryAttempt -lt $MaxRetryCount) { + $errorPreview = if ($result.Error) { ($result.Error -split "`n" | Select-Object -First 3) -join " | " } else { "Unknown error" } + Warn "⚠ Issue #$issueNum failed (will retry): $errorPreview" + $retryQueue.Enqueue(@{ IssueNumber = $issueNum; Attempt = $retryAttempt; LastError = $result.Error }) + } else { + $errorMsg = if ($result.Error) { $result.Error } else { "Exit code: $($result.ExitCode)" } + Err "✗ Issue #$issueNum failed after $($retryAttempt + 1) attempts:" + Err " Error: $errorMsg" + $failed += @{ IssueNumber = $issueNum; Error = $errorMsg; Attempts = $retryAttempt + 1 } + $completed++ + } + } + $completedJobs += $jobInfo + } + elseif ($job.State -eq 'Failed') { + $jobError = $job.ChildJobs[0].JobStateInfo.Reason.Message + Remove-Job -Job $job -Force + + if ($retryAttempt -lt $MaxRetryCount) { + Warn "⚠ Issue #$issueNum job crashed (will retry): $jobError" + $retryQueue.Enqueue(@{ IssueNumber = $issueNum; Attempt = $retryAttempt; LastError = $jobError }) + } else { + Err "✗ Issue #$issueNum job failed after $($retryAttempt + 1) attempts: $jobError" + $failed += @{ IssueNumber = $issueNum; Error = $jobError; Attempts = $retryAttempt + 1 } + $completed++ + } + $completedJobs += $jobInfo + } + elseif ((Get-Date) - $startTime -gt [TimeSpan]::FromMinutes($TimeoutMinutes)) { + Stop-Job -Job $job -ErrorAction SilentlyContinue + Remove-Job -Job $job -Force + + if ($retryAttempt -lt $MaxRetryCount) { + Warn "⏱ Issue #$issueNum timed out after $TimeoutMinutes min (will retry)" + $retryQueue.Enqueue(@{ IssueNumber = $issueNum; Attempt = $retryAttempt; LastError = "Timeout after $TimeoutMinutes minutes" }) + } else { + Err "⏱ Issue #$issueNum timed out after $($retryAttempt + 1) attempts" + $failed += @{ IssueNumber = $issueNum; Error = "Timeout after $TimeoutMinutes minutes"; Attempts = $retryAttempt + 1 } + $completed++ + } + $completedJobs += $jobInfo + } + } + + # Remove completed jobs from active list + $jobs = $jobs | Where-Object { $_ -notin $completedJobs } + + # Brief pause to avoid tight loop + if ($jobs.Count -gt 0) { + Start-Sleep -Seconds 2 + } + } + + # Extract just issue numbers for the failed list + $failedNumbers = $failed | ForEach-Object { $_.IssueNumber } + + return @{ + Total = $totalIssues + Succeeded = $succeeded + Failed = $failedNumbers + FailedDetails = $failed + } +} +#endregion + +#region Issue Review Results Helpers +function Get-IssueReviewResult { + <# + .SYNOPSIS + Check if an issue has been reviewed and get its results. + #> + param( + [Parameter(Mandatory)] + [int]$IssueNumber, + [Parameter(Mandatory)] + [string]$RepoRoot + ) + + $reviewPath = Get-IssueReviewPath -RepoRoot $RepoRoot -IssueNumber $IssueNumber + + $result = @{ + IssueNumber = $IssueNumber + Path = $reviewPath + HasOverview = $false + HasImplementationPlan = $false + OverviewPath = $null + ImplementationPlanPath = $null + } + + $overviewPath = Join-Path $reviewPath 'overview.md' + $implPlanPath = Join-Path $reviewPath 'implementation-plan.md' + + if (Test-Path $overviewPath) { + $result.HasOverview = $true + $result.OverviewPath = $overviewPath + } + + if (Test-Path $implPlanPath) { + $result.HasImplementationPlan = $true + $result.ImplementationPlanPath = $implPlanPath + } + + return $result +} + +function Get-HighConfidenceIssues { + <# + .SYNOPSIS + Find issues with high confidence for auto-fix based on review results. + .PARAMETER RepoRoot + Repository root path. + .PARAMETER MinFeasibilityScore + Minimum Technical Feasibility score (0-100). Default: 70. + .PARAMETER MinClarityScore + Minimum Requirement Clarity score (0-100). Default: 60. + .PARAMETER MaxEffortDays + Maximum effort estimate in days. Default: 2 (S = Small). + .PARAMETER FilterIssueNumbers + Optional array of issue numbers to filter to. If specified, only these issues are considered. + #> + param( + [Parameter(Mandatory)] + [string]$RepoRoot, + [int]$MinFeasibilityScore = 70, + [int]$MinClarityScore = 60, + [int]$MaxEffortDays = 2, + [int[]]$FilterIssueNumbers = @() + ) + + $genFiles = Get-GeneratedFilesPath -RepoRoot $RepoRoot + $reviewDir = Join-Path $genFiles 'issueReview' + + if (-not (Test-Path $reviewDir)) { + return @() + } + + $highConfidence = @() + + Get-ChildItem -Path $reviewDir -Directory | ForEach-Object { + $issueNum = [int]$_.Name + + # Skip if filter is specified and this issue is not in the filter list + if ($FilterIssueNumbers.Count -gt 0 -and $issueNum -notin $FilterIssueNumbers) { + return + } + + $overviewPath = Join-Path $_.FullName 'overview.md' + $implPlanPath = Join-Path $_.FullName 'implementation-plan.md' + + if (-not (Test-Path $overviewPath) -or -not (Test-Path $implPlanPath)) { + return + } + + # Parse overview.md to extract scores + $overview = Get-Content $overviewPath -Raw + + # Extract scores using regex (looking for score table or inline scores) + $feasibility = 0 + $clarity = 0 + $effortDays = 999 + + # Try to extract from At-a-Glance Score Table + if ($overview -match 'Technical Feasibility[^\d]*(\d+)/100') { + $feasibility = [int]$Matches[1] + } + if ($overview -match 'Requirement Clarity[^\d]*(\d+)/100') { + $clarity = [int]$Matches[1] + } + # Match effort formats like "0.5-1 day", "1-2 days", "2-3 days" - extract the upper bound + if ($overview -match 'Effort Estimate[^|]*\|\s*[\d.]+(?:-(\d+))?\s*days?') { + if ($Matches[1]) { + $effortDays = [int]$Matches[1] + } elseif ($overview -match 'Effort Estimate[^|]*\|\s*(\d+)\s*days?') { + $effortDays = [int]$Matches[1] + } + } + # Also check for XS/S sizing in the table (e.g., "| XS |" or "| S |" or "(XS)" or "(S)") + if ($overview -match 'Effort Estimate[^|]*\|[^|]*\|\s*(XS|S)\b') { + # XS = 1 day, S = 2 days + if ($Matches[1] -eq 'XS') { + $effortDays = 1 + } else { + $effortDays = 2 + } + } elseif ($overview -match 'Effort Estimate[^|]*\|[^|]*\(XS\)') { + $effortDays = 1 + } elseif ($overview -match 'Effort Estimate[^|]*\|[^|]*\(S\)') { + $effortDays = 2 + } + + if ($feasibility -ge $MinFeasibilityScore -and + $clarity -ge $MinClarityScore -and + $effortDays -le $MaxEffortDays) { + + $highConfidence += @{ + IssueNumber = $issueNum + FeasibilityScore = $feasibility + ClarityScore = $clarity + EffortDays = $effortDays + OverviewPath = $overviewPath + ImplementationPlanPath = $implPlanPath + } + } + } + + return $highConfidence | Sort-Object -Property FeasibilityScore -Descending +} +#endregion + +#region Worktree Integration +function Copy-IssueReviewToWorktree { + <# + .SYNOPSIS + Copy the Generated Files for an issue to a worktree. + .PARAMETER IssueNumber + The issue number. + .PARAMETER SourceRepoRoot + Source repository root (main repo). + .PARAMETER WorktreePath + Destination worktree path. + #> + param( + [Parameter(Mandatory)] + [int]$IssueNumber, + [Parameter(Mandatory)] + [string]$SourceRepoRoot, + [Parameter(Mandatory)] + [string]$WorktreePath + ) + + $sourceReviewPath = Get-IssueReviewPath -RepoRoot $SourceRepoRoot -IssueNumber $IssueNumber + $destReviewPath = Get-IssueReviewPath -RepoRoot $WorktreePath -IssueNumber $IssueNumber + + if (-not (Test-Path $sourceReviewPath)) { + throw "Issue review files not found at: $sourceReviewPath" + } + + Ensure-DirectoryExists -Path $destReviewPath + + # Copy all files from the issue review folder + Copy-Item -Path "$sourceReviewPath\*" -Destination $destReviewPath -Recurse -Force + + Info "Copied issue review files to: $destReviewPath" + + return $destReviewPath +} +#endregion + +# Note: This script is dot-sourced, not imported as a module. +# All functions above are available after: . "path/to/IssueReviewLib.ps1" diff --git a/.github/skills/issue-review-review/scripts/Start-FeedbackLoop.ps1 b/.github/skills/issue-review-review/scripts/Start-FeedbackLoop.ps1 new file mode 100644 index 000000000000..84aaf2106cb8 --- /dev/null +++ b/.github/skills/issue-review-review/scripts/Start-FeedbackLoop.ps1 @@ -0,0 +1,298 @@ +<# +.SYNOPSIS + Orchestrate the feedback loop: re-run issue-review with corrections, then re-review. + +.DESCRIPTION + For each issue whose review-review score is below the threshold: + 1. Re-run issue-review with the corrective feedback from reviewTheReview.md + 2. Re-run review-review on the updated review files + 3. Repeat up to MaxIterations times or until the score passes + +.PARAMETER ThrottleLimit + Maximum parallel tasks. Default: 3. + +.PARAMETER QualityThreshold + Score threshold for PASS. Default: 90. + +.PARAMETER MaxIterations + Maximum feedback loop iterations per issue. Default: 3. + +.PARAMETER CLIType + AI CLI type (copilot/claude). Default: copilot. + +.PARAMETER Model + Copilot CLI model override (e.g., claude-sonnet-4). + +.PARAMETER IssueNumbers + Optional: specific issue numbers to process. If omitted, processes all issues with needsReReview=true. + +.PARAMETER Force + Skip confirmation prompts. + +.EXAMPLE + ./Start-FeedbackLoop.ps1 -CLIType copilot -Model claude-sonnet-4 -ThrottleLimit 3 -Force + +.EXAMPLE + # Process specific issues only + ./Start-FeedbackLoop.ps1 -IssueNumbers @(1929, 1934) -CLIType copilot -Model claude-sonnet-4 -Force +#> +[CmdletBinding()] +param( + [int]$ThrottleLimit = 3, + + [int]$QualityThreshold = 90, + + [int]$MaxIterations = 3, + + [ValidateSet('copilot', 'claude')] + [string]$CLIType = 'copilot', + + [string]$Model, + + [int[]]$IssueNumbers, + + [switch]$Force +) + +$ErrorActionPreference = 'Continue' + +$repoRoot = Resolve-Path (Join-Path $PSScriptRoot '..\..\..\..') + +# Resolve config directory name (.github or .claude) from script location +$_cfgDir = if ($PSScriptRoot -match '[\\/](\.github|\.claude)[\\/]') { $Matches[1] } else { '.github' } +$genFiles = Join-Path $repoRoot 'Generated Files' +$reviewReviewDir = Join-Path $genFiles 'issueReviewReview' +$issueReviewDir = Join-Path $genFiles 'issueReview' + +$bulkReviewScript = Join-Path $repoRoot "$_cfgDir\skills\issue-review\scripts\Start-BulkIssueReview.ps1" +$reviewReviewScript = Join-Path $repoRoot "$_cfgDir\skills\issue-review-review\scripts\Start-IssueReviewReview.ps1" + +Write-Host "=== FEEDBACK LOOP ORCHESTRATOR ===" -ForegroundColor Cyan +Write-Host "Repository root: $repoRoot" +Write-Host "Quality threshold: $QualityThreshold" +Write-Host "Max iterations: $MaxIterations" +Write-Host "Throttle limit: $ThrottleLimit" +Write-Host "CLI: $CLIType $(if ($Model) { "(model: $Model)" })" +Write-Host "" + +# ------------------------------------------------------------------ +# Step 1: Identify issues that need re-review +# ------------------------------------------------------------------ +if ($IssueNumbers -and $IssueNumbers.Count -gt 0) { + # Use explicit list + $needsWork = $IssueNumbers | ForEach-Object { + $signalPath = Join-Path $reviewReviewDir "$_\.signal" + if (Test-Path $signalPath) { + $signal = Get-Content $signalPath -Raw | ConvertFrom-Json + [PSCustomObject]@{ + IssueNumber = $_ + CurrentScore = [int]$signal.qualityScore + Iteration = [int]$signal.iteration + FeedbackFile = Join-Path $reviewReviewDir "$_\reviewTheReview.md" + } + } + else { + Write-Host " Warning: No signal for issue #$_ — skipping" -ForegroundColor Yellow + } + } | Where-Object { $_ } +} +else { + # Auto-discover from signals with needsReReview = true + $needsWork = Get-ChildItem $reviewReviewDir -Directory -ErrorAction SilentlyContinue | + Where-Object { Test-Path (Join-Path $_.FullName '.signal') } | + ForEach-Object { + $signal = Get-Content (Join-Path $_.FullName '.signal') -Raw | ConvertFrom-Json + if ($signal.needsReReview -eq $true -and [int]$signal.iteration -lt $MaxIterations) { + [PSCustomObject]@{ + IssueNumber = [int]$signal.issueNumber + CurrentScore = [int]$signal.qualityScore + Iteration = [int]$signal.iteration + FeedbackFile = Join-Path $_.FullName 'reviewTheReview.md' + } + } + } | Sort-Object IssueNumber +} + +if (-not $needsWork -or $needsWork.Count -eq 0) { + Write-Host "No issues need re-review. All passed or reached max iterations." -ForegroundColor Green + return +} + +Write-Host "Issues needing feedback loop: $($needsWork.Count)" -ForegroundColor Yellow +Write-Host ("-" * 70) +$needsWork | Format-Table IssueNumber, CurrentScore, Iteration -AutoSize | Out-String | Write-Host +Write-Host ("-" * 70) + +if (-not $Force) { + $confirm = Read-Host "Proceed with feedback loop for $($needsWork.Count) issues? (y/N)" + if ($confirm -notmatch '^[yY]') { + Write-Host "Cancelled." + return + } +} + +# ------------------------------------------------------------------ +# Step 2: Run feedback loop in parallel +# ------------------------------------------------------------------ +$startTime = Get-Date + +$results = $needsWork | ForEach-Object -Parallel { + $item = $PSItem + $repoRoot = $using:repoRoot + $bulkScript = $using:bulkReviewScript + $reviewScript = $using:reviewReviewScript + $cliType = $using:CLIType + $model = $using:Model + $qualityThreshold = $using:QualityThreshold + $maxIter = $using:MaxIterations + + Set-Location $repoRoot + + $issueNum = $item.IssueNumber + $currentScore = $item.CurrentScore + $currentIter = $item.Iteration + $feedbackFile = $item.FeedbackFile + + Write-Host "[#$issueNum] Starting feedback loop (current score: $currentScore, iteration: $currentIter)" -ForegroundColor Cyan + + # Phase A: Re-run issue-review with corrective feedback + Write-Host "[#$issueNum] Phase A: Re-running issue-review with feedback..." -ForegroundColor Yellow + $bulkParams = @{ + IssueNumber = $issueNum + CLIType = $cliType + Force = $true + } + if ($model) { $bulkParams.Model = $model } + if (Test-Path $feedbackFile) { + $bulkParams.FeedbackFile = $feedbackFile + } + + try { + & $bulkScript @bulkParams 2>&1 | ForEach-Object { Write-Host "[#$issueNum] $_" } + } + catch { + Write-Host "[#$issueNum] Phase A error: $($_.Exception.Message)" -ForegroundColor Red + return [PSCustomObject]@{ + IssueNumber = $issueNum + OldScore = $currentScore + NewScore = 0 + Iteration = $currentIter + Status = 'FAILED_REVIEW' + Error = $_.Exception.Message + } + } + + # Phase B: Re-run review-review on the updated files + Write-Host "[#$issueNum] Phase B: Re-running review-review..." -ForegroundColor Yellow + $rrParams = @{ + IssueNumber = $issueNum + CLIType = $cliType + Force = $true + } + if ($model) { $rrParams.Model = $model } + + try { + & $reviewScript @rrParams 2>&1 | ForEach-Object { Write-Host "[#$issueNum] $_" } + } + catch { + Write-Host "[#$issueNum] Phase B error: $($_.Exception.Message)" -ForegroundColor Red + return [PSCustomObject]@{ + IssueNumber = $issueNum + OldScore = $currentScore + NewScore = 0 + Iteration = $currentIter + 1 + Status = 'FAILED_REVIEW_REVIEW' + Error = $_.Exception.Message + } + } + + # Read updated signal + $signalPath = Join-Path $using:reviewReviewDir "$issueNum\.signal" + if (Test-Path $signalPath) { + $newSignal = Get-Content $signalPath -Raw | ConvertFrom-Json + $newScore = [int]$newSignal.qualityScore + $newIter = [int]$newSignal.iteration + $verdict = $newSignal.verdict + + $status = if ($newScore -ge $qualityThreshold) { 'IMPROVED_TO_PASS' } + elseif ($newScore -gt $currentScore) { 'IMPROVED' } + elseif ($newScore -eq $currentScore) { 'NO_CHANGE' } + else { 'REGRESSED' } + + Write-Host "[#$issueNum] Done: $currentScore → $newScore ($status)" -ForegroundColor $( + if ($status -eq 'IMPROVED_TO_PASS') { 'Green' } + elseif ($status -eq 'IMPROVED') { 'Yellow' } + else { 'Red' } + ) + + [PSCustomObject]@{ + IssueNumber = $issueNum + OldScore = $currentScore + NewScore = $newScore + Iteration = $newIter + Status = $status + Verdict = $verdict + } + } + else { + [PSCustomObject]@{ + IssueNumber = $issueNum + OldScore = $currentScore + NewScore = 0 + Iteration = $currentIter + 1 + Status = 'NO_SIGNAL' + Error = 'No signal file after review-review' + } + } +} -ThrottleLimit $ThrottleLimit + +$duration = (Get-Date) - $startTime + +# ------------------------------------------------------------------ +# Step 3: Summary +# ------------------------------------------------------------------ +Write-Host "" +Write-Host ("=" * 70) -ForegroundColor Cyan +Write-Host " FEEDBACK LOOP SUMMARY" -ForegroundColor Cyan +Write-Host ("=" * 70) -ForegroundColor Cyan + +$improved = @($results | Where-Object Status -eq 'IMPROVED_TO_PASS') +$partial = @($results | Where-Object Status -eq 'IMPROVED') +$noChange = @($results | Where-Object Status -eq 'NO_CHANGE') +$regressed = @($results | Where-Object Status -eq 'REGRESSED') +$errors = @($results | Where-Object { $_.Status -like 'FAILED*' -or $_.Status -eq 'NO_SIGNAL' }) + +Write-Host "Total processed: $($results.Count)" +Write-Host "Improved to PASS: $($improved.Count)" -ForegroundColor Green +Write-Host "Improved (below): $($partial.Count)" -ForegroundColor Yellow +Write-Host "No change: $($noChange.Count)" -ForegroundColor DarkYellow +Write-Host "Regressed: $($regressed.Count)" -ForegroundColor Red +Write-Host "Errors: $($errors.Count)" -ForegroundColor Red +Write-Host "Duration: $($duration.ToString('hh\:mm\:ss'))" +Write-Host ("=" * 70) -ForegroundColor Cyan + +# Show details +if ($results.Count -gt 0) { + Write-Host "" + Write-Host "Details:" -ForegroundColor White + $results | Sort-Object NewScore -Descending | Format-Table IssueNumber, OldScore, NewScore, Status, Iteration -AutoSize | Out-String | Write-Host +} + +# Count remaining issues that still need work +$stillNeedsWork = Get-ChildItem $reviewReviewDir -Directory -ErrorAction SilentlyContinue | + Where-Object { Test-Path (Join-Path $_.FullName '.signal') } | + ForEach-Object { + $signal = Get-Content (Join-Path $_.FullName '.signal') -Raw | ConvertFrom-Json + if ($signal.needsReReview -eq $true -and [int]$signal.iteration -lt $MaxIterations) { $signal } + } + +if ($stillNeedsWork.Count -gt 0) { + Write-Host "`nStill needs improvement: $($stillNeedsWork.Count) issues" -ForegroundColor Yellow + Write-Host "Run this script again for another iteration." -ForegroundColor Yellow +} +else { + Write-Host "`nAll issues have either passed or reached max iterations!" -ForegroundColor Green +} + +# Return results for pipeline +return $results diff --git a/.github/skills/issue-review-review/scripts/Start-IssueReviewReview.ps1 b/.github/skills/issue-review-review/scripts/Start-IssueReviewReview.ps1 new file mode 100644 index 000000000000..9182e3230afe --- /dev/null +++ b/.github/skills/issue-review-review/scripts/Start-IssueReviewReview.ps1 @@ -0,0 +1,327 @@ +<# +.SYNOPSIS + Meta-review of issue-review outputs to validate scoring and implementation plan quality. + +.DESCRIPTION + Reads the existing overview.md and implementation-plan.md from issue-review, + cross-checks scores against evidence, validates file paths and patterns, + and produces a reviewTheReview.md with a quality score (0-100). + + If the quality score is < 90, the signal file indicates that issue-review + should re-run with the feedback. + +.PARAMETER IssueNumber + GitHub issue number whose review to validate. + +.PARAMETER CLIType + AI CLI to use: copilot or claude. Default: copilot. + +.PARAMETER Model + Copilot CLI model to use (e.g., gpt-5.2-codex). + +.PARAMETER Force + Skip confirmation prompts. + +.PARAMETER DryRun + Show what would be done without executing. + +.EXAMPLE + ./Start-IssueReviewReview.ps1 -IssueNumber 44044 + +.EXAMPLE + ./Start-IssueReviewReview.ps1 -IssueNumber 44044 -CLIType copilot -Model gpt-5.2-codex -Force +#> + +[CmdletBinding()] +param( + [Parameter(Mandatory)] + [int]$IssueNumber, + + [ValidateSet('copilot', 'claude')] + [string]$CLIType = 'copilot', + + [string]$Model, + + [switch]$Force, + + [switch]$DryRun, + + [switch]$Help +) + +$scriptDir = Split-Path -Parent $MyInvocation.MyCommand.Path +. (Join-Path $scriptDir 'IssueReviewLib.ps1') + +if ($Help) { + Get-Help $MyInvocation.MyCommand.Path -Full + return +} + +#region Main +try { + $repoRoot = Get-RepoRoot + $genFiles = Get-GeneratedFilesPath -RepoRoot $repoRoot + Info "Repository root: $repoRoot" + + #region Validate prerequisites + $reviewDir = Join-Path $genFiles "issueReview/$IssueNumber" + $overviewPath = Join-Path $reviewDir 'overview.md' + $implPlanPath = Join-Path $reviewDir 'implementation-plan.md' + + if (-not (Test-Path $overviewPath)) { + throw "overview.md not found for issue #$IssueNumber at: $overviewPath. Run issue-review first." + } + if (-not (Test-Path $implPlanPath)) { + throw "implementation-plan.md not found for issue #$IssueNumber at: $implPlanPath. Run issue-review first." + } + + Info "Found review files for issue #$IssueNumber" + Info " Overview: $overviewPath" + Info " Implementation plan: $implPlanPath" + #endregion + + #region Determine iteration + $outputDir = Join-Path $genFiles "issueReviewReview/$IssueNumber" + Ensure-DirectoryExists -Path $outputDir + + $existingSignalPath = Join-Path $outputDir '.signal' + $iteration = 1 + if (Test-Path $existingSignalPath) { + try { + $existingSignal = Get-Content $existingSignalPath -Raw | ConvertFrom-Json + $iteration = ([int]$existingSignal.iteration) + 1 + Info "Previous review-review found (iteration $($existingSignal.iteration), score: $($existingSignal.qualityScore))" + + # Archive previous output + $archiveDir = Join-Path $outputDir "iteration-$($existingSignal.iteration)" + Ensure-DirectoryExists -Path $archiveDir + $prevReviewPath = Join-Path $outputDir 'reviewTheReview.md' + if (Test-Path $prevReviewPath) { + Copy-Item $prevReviewPath (Join-Path $archiveDir 'reviewTheReview.md') -Force + Info "Archived previous review to: $archiveDir" + } + } + catch { + Warn "Could not parse existing signal, starting fresh" + } + } + + Info "Starting review-review iteration $iteration for issue #$IssueNumber" + #endregion + + if ($DryRun) { + Warn "Dry run mode - would review-review issue #$IssueNumber (iteration $iteration)" + return + } + + if (-not $Force) { + $confirm = Read-Host "Proceed with review-review for issue #$IssueNumber? (y/N)" + if ($confirm -notmatch '^[yY]') { + Info "Cancelled." + return + } + } + + #region Build and run AI prompt + $promptText = @" +TASK: Write a meta-review file to 'Generated Files/issueReviewReview/$IssueNumber/reviewTheReview.md'. + +You MUST create this file before finishing. This is your primary deliverable. + +Issue number: $IssueNumber +Iteration: $iteration + +STEP 1 - Read these inputs: +- Run: gh issue view $IssueNumber --json number,title,body,state,labels,comments +- Read file: Generated Files/issueReview/$IssueNumber/overview.md +- Read file: Generated Files/issueReview/$IssueNumber/implementation-plan.md +$(if ($iteration -gt 1) { "- Read file: Generated Files/issueReviewReview/$IssueNumber/iteration-$($iteration - 1)/reviewTheReview.md" }) + +STEP 2 - Verify file paths from the implementation plan exist using test -f or ls. +STEP 3 - Verify code patterns from the implementation plan using rg. + +STEP 4 - Write the file 'Generated Files/issueReviewReview/$IssueNumber/reviewTheReview.md' with this structure: + +# Meta-Review: Issue #$IssueNumber + +## Score Validation +| Dimension | Original Score | Verified Score | Evidence | +|-----------|---------------|----------------|----------| +(validate each score dimension from overview.md against actual codebase evidence) + +## Implementation Plan Verification +- File paths: which exist, which don't +- Patterns: which are correct, which are wrong +- Task breakdown: are tasks specific and executable? + +## Quality Score Breakdown +| Dimension | Weight | Score | Weighted | +|-----------|--------|-------|----------| +| Score Accuracy | 30% | X/100 | X | +| Implementation Correctness | 25% | X/100 | X | +| Risk Assessment | 15% | X/100 | X | +| Completeness | 15% | X/100 | X | +| Actionability | 15% | X/100 | X | +| **Total** | | | **X/100** | + +## Review Quality Score: X/100 + +## Verdict: PASS/NEEDS_IMPROVEMENT/FAIL + +## Corrective Feedback +(specific items the review should fix, if any) + +CRITICAL: You MUST write the output file. Do NOT just describe what you would do. Actually create the file. +"@ + + $mcpConfig = "@$_cfgDir/skills/issue-review-review/references/mcp-config.json" + + switch ($CLIType) { + 'copilot' { + $cliArgs = @( + '--additional-mcp-config', $mcpConfig, + '-p', $promptText, + '--yolo', + '-s', + '--enable-all-github-mcp-tools', + '--allow-tool', 'github-artifacts', + '--agent', 'ReviewTheReview' + ) + if ($Model) { + $cliArgs += @('--model', $Model) + } + + Info "Running Copilot CLI for review-review..." + & copilot @cliArgs 2>&1 | Out-Default + $exitCode = $LASTEXITCODE + } + 'claude' { + $cliArgs = @( + '--print', + '--dangerously-skip-permissions', + '--agent', 'ReviewTheReview', + '--prompt', $promptText + ) + + Info "Running Claude CLI for review-review..." + & claude @cliArgs 2>&1 | Out-Default + $exitCode = $LASTEXITCODE + } + } + #endregion + + #region Parse result and write signal + $reviewTheReviewPath = Join-Path $outputDir 'reviewTheReview.md' + + if (-not (Test-Path $reviewTheReviewPath)) { + # CLI may have failed + Err "reviewTheReview.md was not generated for issue #$IssueNumber" + + @{ + status = 'failure' + issueNumber = $IssueNumber + timestamp = (Get-Date).ToString('o') + qualityScore = 0 + iteration = $iteration + outputs = @() + needsReReview = $true + error = "Output file not generated (exit code: $exitCode)" + } | ConvertTo-Json | Set-Content $existingSignalPath -Force + + return @{ + IssueNumber = $IssueNumber + Status = 'failure' + QualityScore = 0 + Iteration = $iteration + NeedsReReview = $true + Error = "Output file not generated" + } + } + + # Parse quality score from the generated reviewTheReview.md + $content = Get-Content $reviewTheReviewPath -Raw + $qualityScore = 0 + + # Try to extract "Review Quality Score: X/100" + if ($content -match 'Review Quality Score:\s*(\d+)/100') { + $qualityScore = [int]$Matches[1] + } + # Also try total from breakdown table: "| **Total** | | | **X/100** |" + elseif ($content -match '\*\*Total\*\*[^|]*\|[^|]*\|[^|]*\|\s*\*\*(\d+)/100\*\*') { + $qualityScore = [int]$Matches[1] + } + # Fallback: any line with "Quality Score" and a number + elseif ($content -match 'Quality Score[^\d]*(\d+)') { + $qualityScore = [int]$Matches[1] + } + + $needsReReview = $qualityScore -lt 90 + + # Determine verdict + $verdict = if ($qualityScore -ge 90) { 'PASS' } + elseif ($qualityScore -ge 50) { 'NEEDS_IMPROVEMENT' } + else { 'FAIL' } + + # Write signal + $signal = @{ + status = 'success' + issueNumber = $IssueNumber + timestamp = (Get-Date).ToString('o') + qualityScore = $qualityScore + iteration = $iteration + verdict = $verdict + outputs = @('reviewTheReview.md') + needsReReview = $needsReReview + } + $signal | ConvertTo-Json | Set-Content $existingSignalPath -Force + + if ($needsReReview) { + Warn "Review-review score: $qualityScore/100 (iteration $iteration) — NEEDS RE-REVIEW" + Warn "Feedback written to: $reviewTheReviewPath" + Warn "Re-run issue-review with: -FeedbackFile `"$reviewTheReviewPath`"" + } + else { + Success "Review-review score: $qualityScore/100 (iteration $iteration) — PASS" + Success "Review quality is sufficient. Proceed to issue-fix." + } + + Info "Signal: $existingSignalPath" + #endregion + + return @{ + IssueNumber = $IssueNumber + Status = 'success' + QualityScore = $qualityScore + Iteration = $iteration + Verdict = $verdict + NeedsReReview = $needsReReview + } +} +catch { + Err "Error: $($_.Exception.Message)" + + # Write failure signal + $outputDir = Join-Path (Get-GeneratedFilesPath -RepoRoot (Get-RepoRoot)) "issueReviewReview/$IssueNumber" + Ensure-DirectoryExists -Path $outputDir + $signalPath = Join-Path $outputDir '.signal' + @{ + status = 'failure' + issueNumber = $IssueNumber + timestamp = (Get-Date).ToString('o') + qualityScore = 0 + iteration = 1 + outputs = @() + needsReReview = $true + error = $_.Exception.Message + } | ConvertTo-Json | Set-Content $signalPath -Force + + return @{ + IssueNumber = $IssueNumber + Status = 'failure' + QualityScore = 0 + Iteration = 1 + NeedsReReview = $true + Error = $_.Exception.Message + } +} +#endregion diff --git a/.github/skills/issue-review-review/scripts/Start-IssueReviewReviewParallel.ps1 b/.github/skills/issue-review-review/scripts/Start-IssueReviewReviewParallel.ps1 new file mode 100644 index 000000000000..3748d1ef755b --- /dev/null +++ b/.github/skills/issue-review-review/scripts/Start-IssueReviewReviewParallel.ps1 @@ -0,0 +1,111 @@ +<# +.SYNOPSIS + Run issue-review-review in parallel from a single terminal. + +.PARAMETER IssueNumbers + Issue numbers to review-review. + +.PARAMETER ThrottleLimit + Maximum parallel tasks. + +.PARAMETER CLIType + AI CLI type (copilot/claude). + +.PARAMETER Model + Copilot CLI model to use (e.g., gpt-5.2-codex). + +.PARAMETER Force + Skip confirmation prompts. +#> +[CmdletBinding()] +param( + [Parameter(Mandatory)] + [int[]]$IssueNumbers, + + [int]$ThrottleLimit = 5, + + [ValidateSet('copilot', 'claude')] + [string]$CLIType = 'copilot', + + [string]$Model, + + [switch]$Force +) + +$repoRoot = Resolve-Path (Join-Path $PSScriptRoot '..\..\..\..') + +# Resolve config directory name (.github or .claude) from script location +$_cfgDir = if ($PSScriptRoot -match '[\\/](\.github|\.claude)[\\/]') { $Matches[1] } else { '.github' } +$scriptPath = Join-Path $repoRoot "$_cfgDir\skills\issue-review-review\scripts\Start-IssueReviewReview.ps1" + +$results = $IssueNumbers | ForEach-Object -Parallel { + $issue = $PSItem + $repoRoot = $using:repoRoot + $scriptPath = $using:scriptPath + $cliType = $using:CLIType + $model = $using:Model + $force = $using:Force + + Set-Location $repoRoot + + if (-not $issue) { + return [pscustomobject]@{ + IssueNumber = $issue + ExitCode = 1 + QualityScore = 0 + Error = 'Issue number is empty.' + } + } + + $params = @{ + IssueNumber = [int]$issue + CLIType = $cliType + } + if ($model) { + $params.Model = $model + } + if ($force) { + $params.Force = $true + } + + try { + $result = & $scriptPath @params + [pscustomobject]@{ + IssueNumber = $issue + ExitCode = $LASTEXITCODE + QualityScore = $result.QualityScore + NeedsReReview = $result.NeedsReReview + Iteration = $result.Iteration + Verdict = $result.Verdict + } + } + catch { + [pscustomobject]@{ + IssueNumber = $issue + ExitCode = 1 + QualityScore = 0 + NeedsReReview = $true + Error = $_.Exception.Message + } + } +} -ThrottleLimit $ThrottleLimit + +# Summary +$passed = @($results | Where-Object { $_.QualityScore -ge 90 }) +$needsWork = @($results | Where-Object { $_.QualityScore -gt 0 -and $_.QualityScore -lt 90 }) +$failed = @($results | Where-Object { $_.QualityScore -eq 0 -or $_.Error }) + +Write-Host "`n=== REVIEW-REVIEW SUMMARY ===" -ForegroundColor Cyan +Write-Host "Total: $($results.Count)" +Write-Host "Passed (>=90): $($passed.Count)" -ForegroundColor Green +Write-Host "Needs work: $($needsWork.Count)" -ForegroundColor Yellow +Write-Host "Failed: $($failed.Count)" -ForegroundColor Red + +if ($needsWork.Count -gt 0) { + Write-Host "`nIssues needing re-review:" -ForegroundColor Yellow + foreach ($r in $needsWork) { + Write-Host " #$($r.IssueNumber) — score: $($r.QualityScore)/100 (iteration $($r.Iteration))" + } +} + +$results diff --git a/.github/skills/issue-review/LICENSE.txt b/.github/skills/issue-review/LICENSE.txt new file mode 100644 index 000000000000..22aed37e650b --- /dev/null +++ b/.github/skills/issue-review/LICENSE.txt @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) Microsoft Corporation. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/.github/skills/issue-review/SKILL.md b/.github/skills/issue-review/SKILL.md new file mode 100644 index 000000000000..89be4b41703d --- /dev/null +++ b/.github/skills/issue-review/SKILL.md @@ -0,0 +1,148 @@ +--- +name: issue-review +description: Analyze GitHub issues for feasibility and implementation planning. Use when asked to review an issue, analyze if an issue is fixable, evaluate issue complexity, create implementation plan for an issue, triage issues, assess technical feasibility, or estimate effort for an issue. Outputs structured analysis including feasibility score, clarity score, effort estimate, and detailed implementation plan. +license: Complete terms in LICENSE.txt +--- + +# Issue Review Skill + +Analyze GitHub issues to determine technical feasibility, requirement clarity, and create detailed implementation plans for PowerToys. + +## Skill Contents + +This skill is **self-contained** with all required resources: + +``` +.github/skills/issue-review/ +├── SKILL.md # This file +├── LICENSE.txt # MIT License +├── scripts/ +│ ├── IssueReviewLib.ps1 # Shared library functions +│ └── Start-BulkIssueReview.ps1 # Main review script +└── references/ + └── review-issue.prompt.md # Full AI prompt template +``` + +## Output Directory + +All generated artifacts are placed under `Generated Files/issueReview/<issue-number>/` at the repository root (gitignored). + +``` +Generated Files/issueReview/ +└── <issue-number>/ + ├── overview.md # High-level assessment with scores + ├── implementation-plan.md # Detailed step-by-step fix plan + ├── _raw-issue.json # Cached issue data from GitHub + └── .signal # Completion signal for orchestrator +``` + +## Signal File + +On completion, a `.signal` file is created for orchestrator coordination: + +```json +{ + "status": "success", + "issueNumber": 45363, + "timestamp": "2026-02-04T10:05:23Z", + "outputs": ["overview.md", "implementation-plan.md"] +} +``` + +Status values: `success`, `failure` + +## When to Use This Skill + +- Review a specific GitHub issue for feasibility +- Analyze whether an issue can be fixed by AI +- Create an implementation plan for an issue +- Triage issues by complexity and clarity +- Estimate effort for fixing an issue +- Evaluate technical requirements of an issue + +## Prerequisites + +- GitHub CLI (`gh`) installed and authenticated +- PowerShell 7+ for running scripts + +## Required Variables + +⚠️ **Before starting**, confirm `{{IssueNumber}}` with the user. If not provided, **ASK**: "What issue number should I review?" + +| Variable | Description | Example | +|----------|-------------|---------| +| `{{IssueNumber}}` | GitHub issue number to analyze | `44044` | + +## Workflow + +### Step 1: Run Issue Review + +Execute the review script (use paths relative to this skill folder): + +```powershell +# From repo root +.github/skills/issue-review/scripts/Start-BulkIssueReview.ps1 -IssueNumber {{IssueNumber}} +``` + +This will: +1. Fetch issue details from GitHub +2. Analyze the codebase for relevant files +3. Generate `overview.md` with feasibility assessment +4. Generate `implementation-plan.md` with detailed steps + +### Step 2: Review Output + +Check the generated files at `Generated Files/issueReview/{{IssueNumber}}/`: + +| File | Contains | +|------|----------| +| `overview.md` | Feasibility score (0-100), Clarity score (0-100), Effort estimate, Risk assessment | +| `implementation-plan.md` | Step-by-step implementation with file paths, code snippets, test requirements | + +### Step 3: Interpret Scores + +| Score Range | Interpretation | +|-------------|----------------| +| 80-100 | High confidence - straightforward fix | +| 60-79 | Medium confidence - some complexity | +| 40-59 | Low confidence - significant challenges | +| 0-39 | Very low - may need human intervention | + +## Batch Review + +To review multiple issues at once: + +```powershell +.github/skills/issue-review/scripts/Start-BulkIssueReview.ps1 -IssueNumbers 44044, 32950, 45029 +``` + +## AI Prompt Reference + +For manual AI invocation, the full prompt is at: +- `references/review-issue.prompt.md` (relative to this skill folder) + +## Re-Review with Feedback + +When the `issue-review-review` skill identifies quality issues, re-run with feedback: + +```powershell +.github/skills/issue-review/scripts/Start-BulkIssueReview.ps1 -IssueNumber {{IssueNumber}} -FeedbackFile "Generated Files/issueReviewReview/{{IssueNumber}}/reviewTheReview.md" -Force +``` + +The `-FeedbackFile` parameter injects corrective feedback into the AI prompt so the review addresses specific issues found by the meta-review. + +## Troubleshooting + +| Problem | Solution | +|---------|----------| +| Issue not found | Verify issue number exists: `gh issue view {{IssueNumber}}` | +| No implementation plan | Issue may be unclear - check `overview.md` for clarity score | +| Script errors | Ensure you're in the PowerToys repo root | + +## Related Skills + +| Skill | Purpose | +|-------|---------| +| `issue-review-review` | Validate review quality, loop until score ≥ 90 | +| `issue-fix` | Fix issues after review, create PRs | +| `issue-to-pr-cycle` | Full orchestration (review → fix → PR → review loop) | diff --git a/.github/skills/issue-review/references/mcp-config.json b/.github/skills/issue-review/references/mcp-config.json new file mode 100644 index 000000000000..5af15d54218c --- /dev/null +++ b/.github/skills/issue-review/references/mcp-config.json @@ -0,0 +1,9 @@ +{ + "mcpServers": { + "github-artifacts": { + "command": "cmd", + "args": ["/c", "for /f %i in ('git rev-parse --show-toplevel') do node %i/tools/mcp/github-artifacts/launch.js"], + "tools": ["*"] + } + } +} diff --git a/.github/skills/issue-review/references/review-issue.prompt.md b/.github/skills/issue-review/references/review-issue.prompt.md new file mode 100644 index 000000000000..2ed4b9ef1f49 --- /dev/null +++ b/.github/skills/issue-review/references/review-issue.prompt.md @@ -0,0 +1,165 @@ +--- +agent: 'agent' +description: 'Review a GitHub issue, score it (0-100), and generate an implementation plan' +--- + +# Review GitHub Issue + +## Goal +For **#{{issue_number}}** produce: +1) `Generated Files/issueReview/{{issue_number}}/overview.md` +2) `Generated Files/issueReview/{{issue_number}}/implementation-plan.md` + +## Inputs +Figure out required inputs {{issue_number}} from the invocation context; if anything is missing, ask for the value or note it as a gap. + +# CONTEXT (brief) +Ground evidence using `gh issue view {{issue_number}} --json number,title,body,author,createdAt,updatedAt,state,labels,milestone,reactions,comments,linkedPullRequests`, download images via MCP `github_issue_images` to better understand the issue context. Finally, use MCP `github_issue_attachments` to download logs with parameter `extractFolder` as `Generated Files/issueReview/{{issue_number}}/logs`, and analyze the downloaded logs if available to identify relevant issues. Locate the source code in the current workspace (use `rg`/`git grep` as needed). Link related issues and PRs. + +## When to call MCP tools +If the following MCP "github-artifacts" tools are available in the environment, use them: +- `github_issue_images`: use when the issue/PR likely contains screenshots or other visual evidence (UI bugs, glitches, design problems). +- `github_issue_attachments`: use when the issue/PR mentions attached ZIPs (PowerToysReport_*.zip, logs.zip, debug.zip) or asks to analyze logs/diagnostics. Always provide `extractFolder` as `Generated Files/issueReview/{{issue_number}}/logs` + +If these tools are not available (not listed by the runtime), start the MCP server "github-artifacts" first. + +# OVERVIEW.MD +## Summary +Issue, state, milestone, labels. **Signals**: 👍/❤️/👎, comment count, last activity, linked PRs. + +## At-a-Glance Score Table +Present all ratings in a compact table for quick scanning: + +| Dimension | Score | Assessment | Key Drivers | +|-----------|-------|------------|-------------| +| **A) Business Importance** | X/100 | Low/Medium/High | Top 2 factors with scores | +| **B) Community Excitement** | X/100 | Low/Medium/High | Top 2 factors with scores | +| **C) Technical Feasibility** | X/100 | Low/Medium/High | Top 2 factors with scores | +| **D) Requirement Clarity** | X/100 | Low/Medium/High | Top 2 factors with scores | +| **Overall Priority** | X/100 | Low/Medium/High/Critical | Average or weighted summary | +| **Effort Estimate** | X days (T-shirt) | XS/S/M/L/XL/XXL/Epic | Type: bug/feature/chore | +| **Similar Issues Found** | X open, Y closed | — | Quick reference to related work | +| **Potential Assignees** | @username, @username | — | Top contributors to module | + +**Assessment bands**: 0-25 Low, 26-50 Medium, 51-75 High, 76-100 Critical + +## Ratings (0–100) — add evidence & short rationale +### A) Business Importance +- Labels (priority/security/regression): **≤35** +- Milestone/roadmap: **≤25** +- Customer/contract impact: **≤20** +- Unblocks/platform leverage: **≤20** +### B) Community Excitement +- 👍+❤️ normalized: **≤45** +- Comment volume & unique participants: **≤25** +- Recent activity (≤30d): **≤15** +- Duplicates/related issues: **≤15** +### C) Technical Feasibility +- Contained surface/clear seams: **≤30** +- Existing patterns/utilities: **≤25** +- Risk (perf/sec/compat) manageable: **≤25** +- Testability & CI support: **≤20** +### D) Requirement Clarity +- Behavior/repro/constraints: **≤60** +- Non-functionals (perf/sec/i18n/a11y): **≤25** +- Decision owners/acceptance signals: **≤15** + +## Effort +Days + **T-shirt** (XS 0.5–1d, S 1–2, M 2–4, L 4–7, XL 7–14, XXL 14–30, Epic >30). +Type/level: bug/feature/chore/docs/refactor/test-only; severity/value tier. + +## Suggested Actions +Provide actionable recommendations for issue triage and assignment: + +### A) Requirement Clarification (if Clarity score <50) +**When Requirement Clarity (Dimension D) is Medium or Low:** +- Identify specific gaps in issue description: missing repro steps, unclear expected behavior, undefined acceptance criteria, missing non-functional requirements +- Draft 3-5 clarifying questions to post as issue comment +- Suggest additional information needed: screenshots, logs, environment details, OS version, PowerToys version, error messages +- If behavior is ambiguous, propose 2-3 interpretation scenarios and ask reporter to confirm +- Example questions: + - "Can you provide exact steps to reproduce this issue?" + - "What is the expected behavior vs. what you're actually seeing?" + - "Does this happen on Windows 10, 11, or both?" + - "Can you attach a screenshot or screen recording?" + +### B) Correct Label Suggestions +- Analyze issue type, module, and severity to suggest missing or incorrect labels +- Recommend labels from: `Issue-Bug`, `Issue-Feature`, `Issue-Docs`, `Issue-Task`, `Priority-High`, `Priority-Medium`, `Priority-Low`, `Needs-Triage`, `Needs-Author-Feedback`, `Product-<ModuleName>`, etc. +- If Requirement Clarity is low (<50), add `Needs-Author-Feedback` label +- If current labels are incorrect or incomplete, provide specific label changes with rationale + +### C) Find Similar Issues & Past Fixes +- Search for similar issues using `gh issue list --search "keywords" --state all --json number,title,state,closedAt` +- Identify patterns: duplicate issues, related bugs, or similar feature requests +- For closed issues, find linked PRs that fixed them: check `linkedPullRequests` in issue data +- Provide 3-5 examples of similar issues with format: `#<number> - <title> (closed by PR #<pr>)` or `(still open)` + +### D) Identify Subject Matter Experts +- Use git blame/log to find who fixed similar issues in the past +- Search for PR authors who touched relevant files: `git log --all --format='%aN' -- <file_paths> | sort | uniq -c | sort -rn | head -5` +- Check issue/PR history for frequent contributors to the affected module +- Suggest 2-3 potential assignees with context: `@<username> - <reason>` (e.g., "fixed similar rendering bug in #12345", "maintains FancyZones module") + +### E) Semantic Search for Related Work +- Use semantic_search tool to find similar issues, code patterns, or past discussions +- Search queries should include: issue keywords, module names, error messages, feature descriptions +- Cross-reference semantic results with GitHub issue search for comprehensive coverage + +**Output format for Suggested Actions section in overview.md:** +```markdown +## Suggested Actions + +### Clarifying Questions (if Clarity <50) +Post these questions as issue comment to gather missing information: +1. <question> +2. <question> +3. <question> + +**Recommended label**: `Needs-Author-Feedback` + +### Label Recommendations +- Add: `<label>` - <reason> +- Remove: `<label>` - <reason> +- Current labels are appropriate ✓ + +### Similar Issues Found +1. #<number> - <title> (<state>, closed by PR #<pr> on <date>) +2. #<number> - <title> (<state>) +... + +### Potential Assignees +- @<username> - <reason> +- @<username> - <reason> + +### Related Code/Discussions +- <semantic search findings> +``` + +# IMPLEMENTATION-PLAN.MD +1) **Problem Framing** — restate problem; current vs expected; scope boundaries. +2) **Layers & Files** — layers (UI/domain/data/infra/build). For each, list **files/dirs to modify** and **new files** (exact paths + why). Prefer repo patterns; cite examples/PRs. +3) **Pattern Choices** — reuse existing; if new, justify trade-offs & transition. +4) **Fundamentals** (brief plan or N/A + reason): +- Performance (hot paths, allocs, caching/streaming) +- Security (validation, authN/Z, secrets, SSRF/XSS/CSRF) +- G11N/L10N (resources, number/date, pluralization) +- Compatibility (public APIs, formats, OS/runtime/toolchain) +- Extensibility (DI seams, options/flags, plugin points) +- Accessibility (roles, labels, focus, keyboard, contrast) +- SOLID & repo conventions (naming, folders, dependency direction) +5) **Logging & Exception Handling** +- Where to log; levels; structured fields; correlation/traces. +- What to catch vs rethrow; retries/backoff; user-visible errors. +- **Privacy**: never log secrets/PII; redaction policy. +6) **Telemetry (optional — business metrics only)** +- Events/metrics (name, when, props); success signal; privacy/sampling; dashboards/alerts. +7) **Risks & Mitigations** — flags/canary/shadow-write/config guards. +8) **Task Breakdown (agent-ready)** — table (leave a blank line before the header so Markdown renders correctly): + +| Task | Intent | Files/Areas | Steps | Tests (brief) | Owner (Agent/Human) | Human interaction needed? (why) | +|---|---|---|---|---|---|---| + +9) **Tests to Add (only)** +- **Unit**: targets, cases (success/edge/error), mocks/fixtures, path, notes. +- **UI** (if applicable): flows, locator strategy, env/data/flags, path, flake mitigation. \ No newline at end of file diff --git a/.github/skills/issue-review/scripts/IssueReviewLib.ps1 b/.github/skills/issue-review/scripts/IssueReviewLib.ps1 new file mode 100644 index 000000000000..ac1dc7754363 --- /dev/null +++ b/.github/skills/issue-review/scripts/IssueReviewLib.ps1 @@ -0,0 +1,777 @@ +# IssueReviewLib.ps1 - Shared helpers for bulk issue review automation +# Part of the PowerToys GitHub Copilot/Claude Code issue review system + +# Resolve config directory name (.github or .claude) from this script's location +$_cfgDir = if ($PSScriptRoot -match '[\\/](\.github|\.claude)[\\/]') { $Matches[1] } else { '.github' } + +#region Console Output Helpers +function Info { param([string]$Message) Write-Host $Message -ForegroundColor Cyan } +function Warn { param([string]$Message) Write-Host $Message -ForegroundColor Yellow } +function Err { param([string]$Message) Write-Host $Message -ForegroundColor Red } +function Success { param([string]$Message) Write-Host $Message -ForegroundColor Green } +#endregion + +#region Repository Helpers +function Get-RepoRoot { + $root = git rev-parse --show-toplevel 2>$null + if (-not $root) { throw 'Not inside a git repository.' } + return (Resolve-Path $root).Path +} + +function Get-GeneratedFilesPath { + param([string]$RepoRoot) + return Join-Path $RepoRoot 'Generated Files' +} + +function Get-IssueReviewPath { + param( + [string]$RepoRoot, + [int]$IssueNumber + ) + $genFiles = Get-GeneratedFilesPath -RepoRoot $RepoRoot + return Join-Path $genFiles "issueReview/$IssueNumber" +} + +function Get-IssueTitleFromOverview { + <# + .SYNOPSIS + Extract issue title from existing overview.md file. + .DESCRIPTION + Parses the overview.md to get the issue title without requiring GitHub CLI. + #> + param( + [Parameter(Mandatory)] + [string]$OverviewPath + ) + + if (-not (Test-Path $OverviewPath)) { + return $null + } + + $content = Get-Content $OverviewPath -Raw + + # Try to match title from Summary table: | **Title** | <title> | + if ($content -match '\*\*Title\*\*\s*\|\s*([^|]+)\s*\|') { + return $Matches[1].Trim() + } + + # Try to match from header: # Issue #XXXX: <title> + if ($content -match '# Issue #\d+[:\s]+(.+)$' ) { + return $Matches[1].Trim() + } + + # Try to match: # Issue #XXXX Review: <title> + if ($content -match '# Issue #\d+ Review[:\s]+(.+)$') { + return $Matches[1].Trim() + } + + return $null +} + +function Ensure-DirectoryExists { + param([string]$Path) + if (-not (Test-Path $Path)) { + New-Item -ItemType Directory -Path $Path -Force | Out-Null + } +} +#endregion + +#region GitHub Issue Query Helpers +function Get-GitHubIssues { + <# + .SYNOPSIS + Query GitHub issues by label, state, and sort order. + .PARAMETER Labels + Comma-separated list of labels to filter by (e.g., "bug,help wanted"). + .PARAMETER State + Issue state: open, closed, or all. Default: open. + .PARAMETER Sort + Sort field: created, updated, comments, reactions. Default: created. + .PARAMETER Order + Sort order: asc or desc. Default: desc. + .PARAMETER Limit + Maximum number of issues to return. Default: 100. + .PARAMETER Repository + Repository in owner/repo format. Default: microsoft/PowerToys. + #> + param( + [string]$Labels, + [ValidateSet('open', 'closed', 'all')] + [string]$State = 'open', + [ValidateSet('created', 'updated', 'comments', 'reactions')] + [string]$Sort = 'created', + [ValidateSet('asc', 'desc')] + [string]$Order = 'desc', + [int]$Limit = 100, + [string]$Repository = 'microsoft/PowerToys' + ) + + $ghArgs = @('issue', 'list', '--repo', $Repository, '--state', $State, '--limit', $Limit) + + if ($Labels) { + foreach ($label in ($Labels -split ',')) { + $ghArgs += @('--label', $label.Trim()) + } + } + + # Build JSON fields (use reactionGroups instead of reactions) + $jsonFields = 'number,title,state,labels,createdAt,updatedAt,author,reactionGroups,comments' + $ghArgs += @('--json', $jsonFields) + + Info "Querying issues: gh $($ghArgs -join ' ')" + $result = & gh @ghArgs 2>&1 + + if ($LASTEXITCODE -ne 0) { + throw "Failed to query issues: $result" + } + + $issues = $result | ConvertFrom-Json + + # Sort by reactions if requested (gh CLI doesn't support this natively) + if ($Sort -eq 'reactions') { + $issues = $issues | ForEach-Object { + # reactionGroups is an array of {content, users} - sum up user counts + $totalReactions = ($_.reactionGroups | ForEach-Object { $_.users.totalCount } | Measure-Object -Sum).Sum + if (-not $totalReactions) { $totalReactions = 0 } + $_ | Add-Member -NotePropertyName 'totalReactions' -NotePropertyValue $totalReactions -PassThru + } + if ($Order -eq 'desc') { + $issues = $issues | Sort-Object -Property totalReactions -Descending + } else { + $issues = $issues | Sort-Object -Property totalReactions + } + } + + return $issues +} + +function Get-IssueDetails { + <# + .SYNOPSIS + Get detailed information about a specific issue. + #> + param( + [Parameter(Mandatory)] + [int]$IssueNumber, + [string]$Repository = 'microsoft/PowerToys' + ) + + $jsonFields = 'number,title,body,state,labels,createdAt,updatedAt,author,reactions,comments,linkedPullRequests,milestone' + $result = gh issue view $IssueNumber --repo $Repository --json $jsonFields 2>&1 + + if ($LASTEXITCODE -ne 0) { + throw "Failed to get issue #$IssueNumber`: $result" + } + + return $result | ConvertFrom-Json +} +#endregion + +#region CLI Detection and Execution +function Get-AvailableCLI { + <# + .SYNOPSIS + Detect which AI CLI is available: GitHub Copilot CLI or Claude Code. + .OUTPUTS + Returns object with: Name, Command, PromptArg + #> + + # Check for standalone GitHub Copilot CLI (copilot command) + $copilotCLI = Get-Command 'copilot' -ErrorAction SilentlyContinue + if ($copilotCLI) { + return @{ + Name = 'GitHub Copilot CLI' + Command = 'copilot' + Args = @('-p') # Non-interactive prompt mode + Type = 'copilot' + } + } + + # Check for Claude Code CLI + $claudeCode = Get-Command 'claude' -ErrorAction SilentlyContinue + if ($claudeCode) { + return @{ + Name = 'Claude Code CLI' + Command = 'claude' + Args = @() + Type = 'claude' + } + } + + # Check for GitHub Copilot CLI via gh extension + $ghCopilot = Get-Command 'gh' -ErrorAction SilentlyContinue + if ($ghCopilot) { + $copilotCheck = gh extension list 2>&1 | Select-String -Pattern 'copilot' + if ($copilotCheck) { + return @{ + Name = 'GitHub Copilot CLI (gh extension)' + Command = 'gh' + Args = @('copilot', 'suggest') + Type = 'gh-copilot' + } + } + } + + # Check for VS Code CLI with Copilot + $code = Get-Command 'code' -ErrorAction SilentlyContinue + if ($code) { + return @{ + Name = 'VS Code (Copilot Chat)' + Command = 'code' + Args = @() + Type = 'vscode' + } + } + + return $null +} + +function Invoke-AIReview { + <# + .SYNOPSIS + Invoke AI CLI to review a single issue. + .PARAMETER IssueNumber + The issue number to review. + .PARAMETER RepoRoot + Repository root path. + .PARAMETER CLIType + CLI type: 'claude', 'copilot', 'gh-copilot', or 'vscode'. + .PARAMETER WorkingDirectory + Working directory for the CLI command. + .PARAMETER FeedbackContext + Optional feedback from review-the-review to incorporate into the re-review. + .PARAMETER Model + Optional model override for Copilot CLI (e.g., claude-sonnet-4). + #> + param( + [Parameter(Mandatory)] + [int]$IssueNumber, + [Parameter(Mandatory)] + [string]$RepoRoot, + [ValidateSet('claude', 'copilot', 'gh-copilot', 'vscode')] + [string]$CLIType = 'copilot', + [string]$WorkingDirectory, + [string]$FeedbackContext, + [string]$Model + ) + + if (-not $WorkingDirectory) { + $WorkingDirectory = $RepoRoot + } + + $promptFile = Join-Path $RepoRoot "$_cfgDir/prompts/review-issue.prompt.md" + if (-not (Test-Path $promptFile)) { + throw "Prompt file not found: $promptFile" + } + + # Prepare the prompt with issue number substitution + $promptContent = Get-Content $promptFile -Raw + $promptContent = $promptContent -replace '\{\{issue_number\}\}', $IssueNumber + + # Create temp prompt file + $tempPromptDir = Join-Path $env:TEMP "issue-review-$IssueNumber" + Ensure-DirectoryExists -Path $tempPromptDir + $tempPromptFile = Join-Path $tempPromptDir "prompt.md" + $promptContent | Set-Content -Path $tempPromptFile -Encoding UTF8 + + # Build the prompt text for CLI + $promptText = "Review GitHub issue #$IssueNumber following the template in $_cfgDir/prompts/review-issue.prompt.md. Generate overview.md and implementation-plan.md in 'Generated Files/issueReview/$IssueNumber/'" + + # Inject feedback from review-the-review if available + if ($FeedbackContext) { + $promptText += @" + +IMPORTANT: This is a RE-REVIEW. A previous review was rejected by the quality gate. You MUST address ALL the corrective feedback below. Read the feedback carefully and fix every issue identified. + +=== CORRECTIVE FEEDBACK FROM REVIEW-THE-REVIEW === +$FeedbackContext +=== END FEEDBACK === + +Pay special attention to: +1. Score corrections — adjust scores to match the evidence cited in the feedback +2. File path corrections — verify all paths exist before including them +3. Pattern corrections — use the patterns identified as correct in the feedback +4. Missing coverage — add any sections flagged as missing +5. Task breakdown fixes — make tasks specific and executable +"@ + } + + switch ($CLIType) { + 'copilot' { + # GitHub Copilot CLI (standalone copilot command) + # Use --yolo for full permissions (--allow-all-tools --allow-all-paths --allow-all-urls) + # Use -s (silent) for cleaner output in batch mode + # Enable ALL GitHub MCP tools (issues, PRs, repos, etc.) + github-artifacts for images/attachments + # MCP config path relative to repo root for github-artifacts tools + $mcpConfig = "@$_cfgDir/skills/issue-review/references/mcp-config.json" + $args = @( + '--additional-mcp-config', $mcpConfig, # Load github-artifacts MCP for image/attachment analysis + '-p', $promptText, # Non-interactive prompt mode (exits after completion) + '--yolo', # Enable all permissions for automated execution + '-s', # Silent mode - output only agent response + '--enable-all-github-mcp-tools', # Enable ALL GitHub MCP tools (issues, PRs, search, etc.) + '--allow-tool', 'github-artifacts', # Also enable our custom github-artifacts MCP + '--agent', 'ReviewIssue' + ) + if ($Model) { + $args += @('--model', $Model) + } + + return @{ + Command = 'copilot' + Arguments = $args + WorkingDirectory = $WorkingDirectory + IssueNumber = $IssueNumber + } + } + 'claude' { + # Claude Code CLI + $args = @( + '--print', # Non-interactive mode + '--dangerously-skip-permissions', + '--agent', 'ReviewIssue', + '--prompt', $promptText + ) + + return @{ + Command = 'claude' + Arguments = $args + WorkingDirectory = $WorkingDirectory + IssueNumber = $IssueNumber + } + } + 'gh-copilot' { + # GitHub Copilot CLI via gh + $args = @( + 'copilot', 'suggest', + '-t', 'shell', + "Review GitHub issue #$IssueNumber and generate analysis files" + ) + + return @{ + Command = 'gh' + Arguments = $args + WorkingDirectory = $WorkingDirectory + IssueNumber = $IssueNumber + } + } + 'vscode' { + # VS Code with Copilot - open with prompt + $args = @( + '--new-window', + $WorkingDirectory, + '--goto', $tempPromptFile + ) + + return @{ + Command = 'code' + Arguments = $args + WorkingDirectory = $WorkingDirectory + IssueNumber = $IssueNumber + } + } + } +} +#endregion + +#region Parallel Job Management +function Start-ParallelIssueReviews { + <# + .SYNOPSIS + Start parallel issue reviews with throttling. + .PARAMETER Issues + Array of issue objects to review. + .PARAMETER MaxConcurrent + Maximum number of parallel jobs. Default: 20. + .PARAMETER CLIType + CLI type to use for reviews. + .PARAMETER RepoRoot + Repository root path. + .PARAMETER TimeoutMinutes + Timeout per issue in minutes. Default: 30. + .PARAMETER MaxRetryCount + Maximum number of retries for failed issues. Default: 2. + .PARAMETER RetryDelaySeconds + Delay between retries in seconds. Default: 10. + #> + param( + [Parameter(Mandatory)] + [array]$Issues, + [int]$MaxConcurrent = 20, + [ValidateSet('claude', 'copilot', 'gh-copilot', 'vscode')] + [string]$CLIType = 'copilot', + [Parameter(Mandatory)] + [string]$RepoRoot, + [int]$TimeoutMinutes = 30, + [int]$MaxRetryCount = 2, + [int]$RetryDelaySeconds = 10, + [string]$FeedbackContext, + [string]$Model + ) + + $totalIssues = $Issues.Count + $completed = 0 + $failed = @() + $succeeded = @() + $retryQueue = [System.Collections.Queue]::new() + + Info "Starting parallel review of $totalIssues issues (max $MaxConcurrent concurrent, $MaxRetryCount retries)" + + # Use PowerShell jobs for parallelization + $jobs = @() + $issueQueue = [System.Collections.Queue]::new($Issues) + + while ($issueQueue.Count -gt 0 -or $jobs.Count -gt 0 -or $retryQueue.Count -gt 0) { + # Process retry queue when main queue is empty + if ($issueQueue.Count -eq 0 -and $retryQueue.Count -gt 0 -and $jobs.Count -lt $MaxConcurrent) { + $retryItem = $retryQueue.Dequeue() + Warn "🔄 Retrying issue #$($retryItem.IssueNumber) (attempt $($retryItem.Attempt + 1)/$($MaxRetryCount + 1))" + Start-Sleep -Seconds $RetryDelaySeconds + $issueQueue.Enqueue(@{ number = $retryItem.IssueNumber; _retryAttempt = $retryItem.Attempt + 1 }) + } + + # Start new jobs up to MaxParallel + while ($jobs.Count -lt $MaxConcurrent -and $issueQueue.Count -gt 0) { + $issue = $issueQueue.Dequeue() + $issueNum = $issue.number + $retryAttempt = if ($issue._retryAttempt) { $issue._retryAttempt } else { 0 } + + $attemptInfo = if ($retryAttempt -gt 0) { " (retry $retryAttempt)" } else { "" } + Info "Starting review for issue #$issueNum$attemptInfo ($($totalIssues - $issueQueue.Count)/$totalIssues)" + + $job = Start-Job -Name "Issue-$issueNum" -ScriptBlock { + param($IssueNumber, $RepoRoot, $CLIType, $FeedbackCtx, $ModelOverride) + + Set-Location $RepoRoot + + # Import the library in the job context + . "$RepoRoot/.github/review-tools/IssueReviewLib.ps1" + + try { + $reviewParams = @{ + IssueNumber = $IssueNumber + RepoRoot = $RepoRoot + CLIType = $CLIType + } + if ($FeedbackCtx) { + $reviewParams.FeedbackContext = $FeedbackCtx + } + if ($ModelOverride) { + $reviewParams.Model = $ModelOverride + } + $reviewCmd = Invoke-AIReview @reviewParams + + # Execute the command using invocation operator (works for .ps1 scripts and executables) + Set-Location $reviewCmd.WorkingDirectory + $argList = $reviewCmd.Arguments + + # Capture both stdout and stderr for better error reporting + $output = & $reviewCmd.Command @argList 2>&1 + $exitCode = $LASTEXITCODE + + # Get last 20 lines of output for error context + $outputLines = $output | Out-String + $lastLines = ($outputLines -split "`n" | Select-Object -Last 20) -join "`n" + + # Check if output files were created (success indicator) + $overviewPath = Join-Path $RepoRoot "Generated Files/issueReview/$IssueNumber/overview.md" + $implPlanPath = Join-Path $RepoRoot "Generated Files/issueReview/$IssueNumber/implementation-plan.md" + $filesCreated = (Test-Path $overviewPath) -and (Test-Path $implPlanPath) + + return @{ + IssueNumber = $IssueNumber + Success = ($exitCode -eq 0) -or $filesCreated + ExitCode = $exitCode + FilesCreated = $filesCreated + Output = $lastLines + Error = if ($exitCode -ne 0 -and -not $filesCreated) { "Exit code: $exitCode`n$lastLines" } else { $null } + } + } + catch { + return @{ + IssueNumber = $IssueNumber + Success = $false + ExitCode = -1 + FilesCreated = $false + Output = $null + Error = $_.Exception.Message + } + } + } -ArgumentList $issueNum, $RepoRoot, $CLIType, $FeedbackContext, $Model + + $jobs += @{ + Job = $job + IssueNumber = $issueNum + StartTime = Get-Date + RetryAttempt = $retryAttempt + } + } + + # Check for completed jobs + $completedJobs = @() + foreach ($jobInfo in $jobs) { + $job = $jobInfo.Job + $issueNum = $jobInfo.IssueNumber + $startTime = $jobInfo.StartTime + $retryAttempt = $jobInfo.RetryAttempt + + if ($job.State -eq 'Completed') { + $result = Receive-Job -Job $job + Remove-Job -Job $job -Force + + if ($result.Success) { + Success "✓ Issue #$issueNum completed (files created: $($result.FilesCreated))" + $succeeded += $issueNum + $completed++ + } else { + # Check if we should retry + if ($retryAttempt -lt $MaxRetryCount) { + $errorPreview = if ($result.Error) { ($result.Error -split "`n" | Select-Object -First 3) -join " | " } else { "Unknown error" } + Warn "⚠ Issue #$issueNum failed (will retry): $errorPreview" + $retryQueue.Enqueue(@{ IssueNumber = $issueNum; Attempt = $retryAttempt; LastError = $result.Error }) + } else { + $errorMsg = if ($result.Error) { $result.Error } else { "Exit code: $($result.ExitCode)" } + Err "✗ Issue #$issueNum failed after $($retryAttempt + 1) attempts:" + Err " Error: $errorMsg" + $failed += @{ IssueNumber = $issueNum; Error = $errorMsg; Attempts = $retryAttempt + 1 } + $completed++ + } + } + $completedJobs += $jobInfo + } + elseif ($job.State -eq 'Failed') { + $jobError = $job.ChildJobs[0].JobStateInfo.Reason.Message + Remove-Job -Job $job -Force + + if ($retryAttempt -lt $MaxRetryCount) { + Warn "⚠ Issue #$issueNum job crashed (will retry): $jobError" + $retryQueue.Enqueue(@{ IssueNumber = $issueNum; Attempt = $retryAttempt; LastError = $jobError }) + } else { + Err "✗ Issue #$issueNum job failed after $($retryAttempt + 1) attempts: $jobError" + $failed += @{ IssueNumber = $issueNum; Error = $jobError; Attempts = $retryAttempt + 1 } + $completed++ + } + $completedJobs += $jobInfo + } + elseif ((Get-Date) - $startTime -gt [TimeSpan]::FromMinutes($TimeoutMinutes)) { + Stop-Job -Job $job -ErrorAction SilentlyContinue + Remove-Job -Job $job -Force + + if ($retryAttempt -lt $MaxRetryCount) { + Warn "⏱ Issue #$issueNum timed out after $TimeoutMinutes min (will retry)" + $retryQueue.Enqueue(@{ IssueNumber = $issueNum; Attempt = $retryAttempt; LastError = "Timeout after $TimeoutMinutes minutes" }) + } else { + Err "⏱ Issue #$issueNum timed out after $($retryAttempt + 1) attempts" + $failed += @{ IssueNumber = $issueNum; Error = "Timeout after $TimeoutMinutes minutes"; Attempts = $retryAttempt + 1 } + $completed++ + } + $completedJobs += $jobInfo + } + } + + # Remove completed jobs from active list + $jobs = $jobs | Where-Object { $_ -notin $completedJobs } + + # Brief pause to avoid tight loop + if ($jobs.Count -gt 0) { + Start-Sleep -Seconds 2 + } + } + + # Extract just issue numbers for the failed list + $failedNumbers = $failed | ForEach-Object { $_.IssueNumber } + + return @{ + Total = $totalIssues + Succeeded = $succeeded + Failed = $failedNumbers + FailedDetails = $failed + } +} +#endregion + +#region Issue Review Results Helpers +function Get-IssueReviewResult { + <# + .SYNOPSIS + Check if an issue has been reviewed and get its results. + #> + param( + [Parameter(Mandatory)] + [int]$IssueNumber, + [Parameter(Mandatory)] + [string]$RepoRoot + ) + + $reviewPath = Get-IssueReviewPath -RepoRoot $RepoRoot -IssueNumber $IssueNumber + + $result = @{ + IssueNumber = $IssueNumber + Path = $reviewPath + HasOverview = $false + HasImplementationPlan = $false + OverviewPath = $null + ImplementationPlanPath = $null + } + + $overviewPath = Join-Path $reviewPath 'overview.md' + $implPlanPath = Join-Path $reviewPath 'implementation-plan.md' + + if (Test-Path $overviewPath) { + $result.HasOverview = $true + $result.OverviewPath = $overviewPath + } + + if (Test-Path $implPlanPath) { + $result.HasImplementationPlan = $true + $result.ImplementationPlanPath = $implPlanPath + } + + return $result +} + +function Get-HighConfidenceIssues { + <# + .SYNOPSIS + Find issues with high confidence for auto-fix based on review results. + .PARAMETER RepoRoot + Repository root path. + .PARAMETER MinFeasibilityScore + Minimum Technical Feasibility score (0-100). Default: 70. + .PARAMETER MinClarityScore + Minimum Requirement Clarity score (0-100). Default: 60. + .PARAMETER MaxEffortDays + Maximum effort estimate in days. Default: 2 (S = Small). + .PARAMETER FilterIssueNumbers + Optional array of issue numbers to filter to. If specified, only these issues are considered. + #> + param( + [Parameter(Mandatory)] + [string]$RepoRoot, + [int]$MinFeasibilityScore = 70, + [int]$MinClarityScore = 60, + [int]$MaxEffortDays = 2, + [int[]]$FilterIssueNumbers = @() + ) + + $genFiles = Get-GeneratedFilesPath -RepoRoot $RepoRoot + $reviewDir = Join-Path $genFiles 'issueReview' + + if (-not (Test-Path $reviewDir)) { + return @() + } + + $highConfidence = @() + + Get-ChildItem -Path $reviewDir -Directory | ForEach-Object { + $issueNum = [int]$_.Name + + # Skip if filter is specified and this issue is not in the filter list + if ($FilterIssueNumbers.Count -gt 0 -and $issueNum -notin $FilterIssueNumbers) { + return + } + + $overviewPath = Join-Path $_.FullName 'overview.md' + $implPlanPath = Join-Path $_.FullName 'implementation-plan.md' + + if (-not (Test-Path $overviewPath) -or -not (Test-Path $implPlanPath)) { + return + } + + # Parse overview.md to extract scores + $overview = Get-Content $overviewPath -Raw + + # Extract scores using regex (looking for score table or inline scores) + $feasibility = 0 + $clarity = 0 + $effortDays = 999 + + # Try to extract from At-a-Glance Score Table + if ($overview -match 'Technical Feasibility[^\d]*(\d+)/100') { + $feasibility = [int]$Matches[1] + } + if ($overview -match 'Requirement Clarity[^\d]*(\d+)/100') { + $clarity = [int]$Matches[1] + } + # Match effort formats like "0.5-1 day", "1-2 days", "2-3 days" - extract the upper bound + if ($overview -match 'Effort Estimate[^|]*\|\s*[\d.]+(?:-(\d+))?\s*days?') { + if ($Matches[1]) { + $effortDays = [int]$Matches[1] + } elseif ($overview -match 'Effort Estimate[^|]*\|\s*(\d+)\s*days?') { + $effortDays = [int]$Matches[1] + } + } + # Also check for XS/S sizing in the table (e.g., "| XS |" or "| S |" or "(XS)" or "(S)") + if ($overview -match 'Effort Estimate[^|]*\|[^|]*\|\s*(XS|S)\b') { + # XS = 1 day, S = 2 days + if ($Matches[1] -eq 'XS') { + $effortDays = 1 + } else { + $effortDays = 2 + } + } elseif ($overview -match 'Effort Estimate[^|]*\|[^|]*\(XS\)') { + $effortDays = 1 + } elseif ($overview -match 'Effort Estimate[^|]*\|[^|]*\(S\)') { + $effortDays = 2 + } + + if ($feasibility -ge $MinFeasibilityScore -and + $clarity -ge $MinClarityScore -and + $effortDays -le $MaxEffortDays) { + + $highConfidence += @{ + IssueNumber = $issueNum + FeasibilityScore = $feasibility + ClarityScore = $clarity + EffortDays = $effortDays + OverviewPath = $overviewPath + ImplementationPlanPath = $implPlanPath + } + } + } + + return $highConfidence | Sort-Object -Property FeasibilityScore -Descending +} +#endregion + +#region Worktree Integration +function Copy-IssueReviewToWorktree { + <# + .SYNOPSIS + Copy the Generated Files for an issue to a worktree. + .PARAMETER IssueNumber + The issue number. + .PARAMETER SourceRepoRoot + Source repository root (main repo). + .PARAMETER WorktreePath + Destination worktree path. + #> + param( + [Parameter(Mandatory)] + [int]$IssueNumber, + [Parameter(Mandatory)] + [string]$SourceRepoRoot, + [Parameter(Mandatory)] + [string]$WorktreePath + ) + + $sourceReviewPath = Get-IssueReviewPath -RepoRoot $SourceRepoRoot -IssueNumber $IssueNumber + $destReviewPath = Get-IssueReviewPath -RepoRoot $WorktreePath -IssueNumber $IssueNumber + + if (-not (Test-Path $sourceReviewPath)) { + throw "Issue review files not found at: $sourceReviewPath" + } + + Ensure-DirectoryExists -Path $destReviewPath + + # Copy all files from the issue review folder + Copy-Item -Path "$sourceReviewPath\*" -Destination $destReviewPath -Recurse -Force + + Info "Copied issue review files to: $destReviewPath" + + return $destReviewPath +} +#endregion + +# Note: This script is dot-sourced, not imported as a module. +# All functions above are available after: . "path/to/IssueReviewLib.ps1" diff --git a/.github/skills/issue-review/scripts/Start-BulkIssueReview.ps1 b/.github/skills/issue-review/scripts/Start-BulkIssueReview.ps1 new file mode 100644 index 000000000000..ab98266fe0e3 --- /dev/null +++ b/.github/skills/issue-review/scripts/Start-BulkIssueReview.ps1 @@ -0,0 +1,291 @@ +<#! +.SYNOPSIS + Bulk review GitHub issues using AI CLI (Claude Code or GitHub Copilot). + +.DESCRIPTION + Queries GitHub issues by labels, state, and sort order, then kicks off parallel + AI-powered reviews for each issue. Results are stored in Generated Files/issueReview/<number>/. + +.PARAMETER Labels + Comma-separated list of labels to filter issues (e.g., "bug,help wanted"). + +.PARAMETER State + Issue state: open, closed, or all. Default: open. + +.PARAMETER Sort + Sort field: created, updated, comments, reactions. Default: created. + +.PARAMETER Order + Sort order: asc or desc. Default: desc. + +.PARAMETER Limit + Maximum number of issues to process. Default: 100. + +.PARAMETER MaxConcurrent + Maximum parallel review jobs. Default: 20. + +.PARAMETER CLIType + AI CLI to use: claude, gh-copilot, or vscode. Auto-detected if not specified. + +.PARAMETER DryRun + List issues without starting reviews. + +.PARAMETER SkipExisting + Skip issues that already have review files. + +.PARAMETER Repository + Repository in owner/repo format. Default: microsoft/PowerToys. + +.PARAMETER TimeoutMinutes + Timeout per issue review in minutes. Default: 30. + +.EXAMPLE + # Review all open bugs sorted by reactions + ./Start-BulkIssueReview.ps1 -Labels "bug" -Sort reactions -Order desc + +.EXAMPLE + # Dry run to see which issues would be reviewed + ./Start-BulkIssueReview.ps1 -Labels "help wanted" -DryRun + +.EXAMPLE + # Review top 50 issues with Claude Code, max 10 parallel + ./Start-BulkIssueReview.ps1 -Labels "Issue-Bug" -Limit 50 -MaxConcurrent 10 -CLIType claude + +.EXAMPLE + # Skip already-reviewed issues + ./Start-BulkIssueReview.ps1 -Labels "Issue-Feature" -SkipExisting + +.NOTES + Requires: GitHub CLI (gh) authenticated, and either Claude Code CLI or VS Code with Copilot. + Results: Generated Files/issueReview/<issue_number>/overview.md and implementation-plan.md +#> + +[CmdletBinding()] +param( + [Parameter(Position = 0)] + [string]$Labels, + + [ValidateSet('open', 'closed', 'all')] + [string]$State = 'open', + + [ValidateSet('created', 'updated', 'comments', 'reactions')] + [string]$Sort = 'created', + + [ValidateSet('asc', 'desc')] + [string]$Order = 'desc', + + [int]$Limit = 1000, + + [int]$MaxConcurrent = 20, + + [ValidateSet('claude', 'copilot', 'gh-copilot', 'vscode', 'auto')] + [string]$CLIType = 'auto', + + [switch]$DryRun, + + [switch]$SkipExisting, + + [string]$Repository = 'microsoft/PowerToys', + + [int]$TimeoutMinutes = 30, + + [int]$MaxRetryCount = 2, + + [int]$RetryDelaySeconds = 10, + + [switch]$Force, + + [int]$IssueNumber, + + [int[]]$IssueNumbers, + + [string]$FeedbackFile, + + [string]$Model, + + [switch]$Help +) + +# Load library +$scriptDir = Split-Path -Parent $MyInvocation.MyCommand.Path +. "$scriptDir/IssueReviewLib.ps1" + +# Show help +if ($Help) { + Get-Help $MyInvocation.MyCommand.Path -Full + return +} + +#region Main Script +try { + # Get repo root + $repoRoot = Get-RepoRoot + Info "Repository root: $repoRoot" + + # Detect or validate CLI + if ($CLIType -eq 'auto') { + $cli = Get-AvailableCLI + if (-not $cli) { + throw "No AI CLI found. Please install Claude Code CLI or GitHub Copilot CLI extension." + } + $CLIType = $cli.Type + Info "Auto-detected CLI: $($cli.Name)" + } + + # Load feedback context if provided + $feedbackContext = $null + if ($FeedbackFile -and (Test-Path $FeedbackFile)) { + $feedbackContext = Get-Content $FeedbackFile -Raw + Info "Loaded feedback from: $FeedbackFile" + } + elseif ($FeedbackFile) { + Warn "Feedback file not found: $FeedbackFile (proceeding without feedback)" + } + + # Determine issue list: explicit IssueNumber(s) take priority over label query + if ($IssueNumber -gt 0) { + Info "`nUsing single issue: #$IssueNumber" + $issues = @(@{ number = $IssueNumber }) + } + elseif ($IssueNumbers -and $IssueNumbers.Count -gt 0) { + Info "`nUsing explicit issue list: $($IssueNumbers -join ', ')" + $issues = $IssueNumbers | ForEach-Object { @{ number = $_ } } + } + else { + # Query issues from GitHub + Info "`nQuerying issues with filters:" + Info " Labels: $(if ($Labels) { $Labels } else { '(none)' })" + Info " State: $State" + Info " Sort: $Sort $Order" + Info " Limit: $Limit" + + $issues = Get-GitHubIssues -Labels $Labels -State $State -Sort $Sort -Order $Order -Limit $Limit -Repository $Repository + } + + if ($issues.Count -eq 0) { + Warn "No issues found matching the criteria." + return + } + + Info "`nFound $($issues.Count) issues" + + # Filter out existing reviews if requested + if ($SkipExisting) { + $originalCount = $issues.Count + $issues = $issues | Where-Object { + $result = Get-IssueReviewResult -IssueNumber $_.number -RepoRoot $repoRoot + -not ($result.HasOverview -and $result.HasImplementationPlan) + } + $skipped = $originalCount - $issues.Count + if ($skipped -gt 0) { + Info "Skipping $skipped issues with existing reviews" + } + } + + if ($issues.Count -eq 0) { + Warn "All issues already have reviews. Nothing to do." + return + } + + # Display issue list + Info "`nIssues to review:" + Info ("-" * 80) + foreach ($issue in $issues) { + $labels = ($issue.labels | ForEach-Object { $_.name }) -join ', ' + $reactions = if ($issue.reactions) { $issue.reactions.totalCount } else { 0 } + Info ("#{0,-6} {1,-50} [👍{2}] [{3}]" -f $issue.number, ($issue.title.Substring(0, [Math]::Min(50, $issue.title.Length))), $reactions, $labels) + } + Info ("-" * 80) + + if ($DryRun) { + Warn "`nDry run mode - no reviews started." + Info "Would review $($issues.Count) issues with CLI: $CLIType" + return + } + + # Confirm before proceeding (skip if -Force) + if (-not $Force) { + $confirm = Read-Host "`nProceed with reviewing $($issues.Count) issues using $CLIType? (y/N)" + if ($confirm -notmatch '^[yY]') { + Info "Cancelled." + return + } + } else { + Info "`nProceeding with $($issues.Count) issues (Force mode)" + } + + # Create output directory + $genFiles = Get-GeneratedFilesPath -RepoRoot $repoRoot + Ensure-DirectoryExists -Path (Join-Path $genFiles 'issueReview') + + # Start parallel reviews + Info "`nStarting bulk review..." + Info " Max retries: $MaxRetryCount (delay: ${RetryDelaySeconds}s)" + $startTime = Get-Date + + $results = Start-ParallelIssueReviews ` + -Issues $issues ` + -MaxConcurrent $MaxConcurrent ` + -CLIType $CLIType ` + -RepoRoot $repoRoot ` + -TimeoutMinutes $TimeoutMinutes ` + -MaxRetryCount $MaxRetryCount ` + -RetryDelaySeconds $RetryDelaySeconds ` + -FeedbackContext $feedbackContext ` + -Model $Model + + $duration = (Get-Date) - $startTime + + # Summary + Info "`n" + ("=" * 80) + Info "BULK REVIEW COMPLETE" + Info ("=" * 80) + Info "Total issues: $($results.Total)" + Success "Succeeded: $($results.Succeeded.Count)" + if ($results.Failed.Count -gt 0) { + Err "Failed: $($results.Failed.Count)" + Err "Failed issues: $($results.Failed -join ', ')" + Info "" + Info "Failed Issue Details:" + Info ("-" * 40) + foreach ($failedItem in $results.FailedDetails) { + Err " #$($failedItem.IssueNumber) (attempts: $($failedItem.Attempts)):" + $errorLines = ($failedItem.Error -split "`n" | Select-Object -First 5) -join "`n " + Err " $errorLines" + } + Info ("-" * 40) + } + Info "Duration: $($duration.ToString('hh\:mm\:ss'))" + Info "Output: $genFiles/issueReview/" + Info ("=" * 80) + + # Write signal file for each issue processed + foreach ($issueNum in $results.Succeeded) { + $signalPath = Join-Path $genFiles "issueReview/$issueNum/.signal" + @{ + status = "success" + issueNumber = $issueNum + timestamp = (Get-Date).ToString("o") + outputs = @("overview.md", "implementation-plan.md") + } | ConvertTo-Json | Set-Content $signalPath -Force + Info "Signal: $signalPath" + } + foreach ($issueNum in $results.Failed) { + $signalPath = Join-Path $genFiles "issueReview/$issueNum/.signal" + $failDetail = $results.FailedDetails | Where-Object { $_.IssueNumber -eq $issueNum } + @{ + status = "failure" + issueNumber = $issueNum + timestamp = (Get-Date).ToString("o") + error = $failDetail.Error + } | ConvertTo-Json | Set-Content $signalPath -Force + } + + # Return results for pipeline + return $results +} +catch { + Err "Error: $($_.Exception.Message)" + exit 1 +} +#endregion diff --git a/.github/skills/issue-to-pr-cycle/LICENSE.txt b/.github/skills/issue-to-pr-cycle/LICENSE.txt new file mode 100644 index 000000000000..22aed37e650b --- /dev/null +++ b/.github/skills/issue-to-pr-cycle/LICENSE.txt @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) Microsoft Corporation. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/.github/skills/issue-to-pr-cycle/SKILL.md b/.github/skills/issue-to-pr-cycle/SKILL.md new file mode 100644 index 000000000000..0dba82968ad6 --- /dev/null +++ b/.github/skills/issue-to-pr-cycle/SKILL.md @@ -0,0 +1,287 @@ +--- +name: issue-to-pr-cycle +description: End-to-end orchestration from issue analysis to PR creation and review. This skill is the ORCHESTRATION BRAIN that invokes other skills via CLI and performs VS Code MCP operations directly. +license: Complete terms in LICENSE.txt +--- + +# Issue-to-PR Full Cycle Skill + +**ORCHESTRATION BRAIN** - coordinates other skills and performs VS Code MCP operations. + +## Skill Contents + +``` +.github/skills/issue-to-pr-cycle/ +├── SKILL.md # This file (orchestration brain) +├── LICENSE.txt # MIT License +└── scripts/ + ├── Get-CycleStatus.ps1 # Check status of issues/PRs + ├── IssueReviewLib.ps1 # Shared helpers + └── Start-FullIssueCycle.ps1 # Legacy script (phases A-C) +``` + +**Orchestrates these skills:** +| Skill | Purpose | +|-------|---------| +| `issue-review` | Analyze issues, generate implementation plans | +| `issue-review-review` | Validate review quality, loop until score ≥ 90 | +| `issue-fix` | Create worktrees, apply fixes, create PRs | +| `pr-review` | Comprehensive PR review (13 steps) | +| `pr-fix` | Fix review comments, resolve threads | + +## Prerequisites + +- GitHub CLI (`gh`) installed and authenticated +- Copilot CLI or Claude CLI installed +- PowerShell 7+ +- VS Code with MCP tools (for write operations) + +## Required Variables + +| Variable | Description | Example | +|----------|-------------|---------| +| `{{IssueNumbers}}` | Issue numbers to process | `45363, 45364` | +| (or) `{{PRNumbers}}` | PR numbers for review/fix loop | `45365, 45366` | + +## How This Skill Works + +The orchestrator: +1. **Invokes skills via CLI** - kicks off `copilot` CLI (not `gh copilot`) to run each skill +2. **Runs in parallel** - use PowerShell 7 `ForEach-Object -Parallel` in SINGLE terminal +3. **Waits for signals** - polls for `.signal` files indicating completion +4. **Performs VS Code MCP directly** - for operations that require write access (request reviewer, resolve threads) + +## Quality Gates (CRITICAL) + +**Every PR must pass these quality checks before creation:** + +1. **Real Implementation** - NO placeholder/stub code + - Files must contain actual working code + - Empty classes like `class FixXXX { }` are FORBIDDEN + +2. **Proper PR Title** - Follow Conventional Commits + - Use `.github/prompts/create-commit-title.prompt.md` + - Format: `feat(module): description` or `fix(module): description` + - NEVER use generic titles like "fix: address issue #12345" + +3. **Full PR Description** - Based on actual diff + - Use `.github/prompts/create-pr-summary.prompt.md` + - Run `git diff main...HEAD` to analyze changes + - Fill PR template with real information + +4. **Build Verification** - Code must compile + - Run `tools/build/build.cmd` in worktree + - Exit code 0 = success + +### Checking Worktree Quality + +```powershell +# Check if worktree has real implementation (not stubs) +$files = git diff main --name-only +foreach ($file in $files) { + if ($file -match "src/common/fixes/Fix\d+\.cs") { + Write-Error "STUB FILE DETECTED: $file - Need real implementation" + } +} +``` + +## Signal Files + +Each skill produces a `.signal` file when complete: + +| Skill | Signal Location | Status Values | +|-------|-----------------|---------------| +| `issue-review` | `Generated Files/issueReview/<issue>/.signal` | `success`, `failure` | +| `issue-review-review` | `Generated Files/issueReviewReview/<issue>/.signal` | `success`, `failure` | +| `issue-fix` | `Generated Files/issueFix/<issue>/.signal` | `success`, `failure` | +| `pr-review` | `Generated Files/prReview/<pr>/.signal` | `success`, `failure` | +| `pr-fix` | `Generated Files/prFix/<pr>/.signal` | `success`, `partial`, `failure` | + +Signal format: +```json +{ + "status": "success", + "issueNumber": 45363, + "timestamp": "2026-02-04T10:05:23Z" +} +``` + +## Architecture + +``` +┌─────────────────────────────────────────────────────────────────────────────┐ +│ ORCHESTRATOR (this skill, VS Code agent) │ +│ │ +│ ┌─────────────┐ ┌──────────────────┐ ┌─────────────┐ │ +│ │ issue-review│◄─┤issue-review- │ │ issue-fix │ │ +│ │ (CLI) │ │review (CLI) │ │ (CLI) │ │ +│ └──────┬──────┘ │ loop until ≥90 │ └──────┬──────┘ │ +│ │ └────────┬─────────┘ │ │ +│ └────────►─────────┘ │ │ +│ │ │ +│ ┌─────────────┐ ┌─────────────┐ │ │ +│ │ pr-review │ │ pr-fix │ │ │ +│ │ (CLI) │ │ (CLI) │ │ │ +│ └──────┬──────┘ └──────┬──────┘ │ │ +│ │ │ │ │ +│ ▼ ▼ ▼ │ +│ ┌─────────────────────────────────────────────────────────────────────┐ │ +│ │ Signal Files (Generated Files/*/.signal) │ │ +│ └─────────────────────────────────────────────────────────────────────┘ │ +│ │ +│ VS Code MCP Operations (orchestrator executes directly): │ +│ - mcp_github_request_copilot_review │ +│ - gh api graphql (resolve threads) │ +│ - Post review comments │ +└─────────────────────────────────────────────────────────────────────────────┘ +``` + +## Workflow + +### Phase A: Issue Review + +Use the orchestration script instead of inline commands: + +```powershell +.github/skills/issue-to-pr-cycle/scripts/Start-FullIssueCycle.ps1 -IssueNumbers 45363,45364 +``` + +### Phase A2: Review-Review Loop (Quality Gate) + +After issue-review completes, validate the review quality. Loop until quality score ≥ 90 or max iterations reached. + +**A2.1: Run review-review** +```powershell +.github/skills/issue-review-review/scripts/Start-IssueReviewReviewParallel.ps1 -IssueNumbers 45363,45364 -CLIType copilot -ThrottleLimit 5 -Force +``` + +**A2.2: Check signals** +```powershell +# For each issue, check the review-review signal +$signal = Get-Content "Generated Files/issueReviewReview/45363/.signal" | ConvertFrom-Json +# If signal.needsReReview is true (qualityScore < 90), re-run issue-review with feedback +``` + +**A2.3: Re-run issue-review with feedback (if needed)** +```powershell +# Re-run issue-review, passing the reviewTheReview.md feedback file +.github/skills/issue-review/scripts/Start-BulkIssueReview.ps1 -IssueNumber 45363 -FeedbackFile "Generated Files/issueReviewReview/45363/reviewTheReview.md" -Force +``` + +**A2.4: Loop** — Go back to A2.1 until: +- All issues have quality score ≥ 90, OR +- Maximum 3 iterations reached per issue + +### Phase B: Issue Fix + +Use the parallel runner script: + +```powershell +.github/skills/issue-fix/scripts/Start-IssueFixParallel.ps1 -IssueNumbers 45363,45364 -CLIType copilot -ThrottleLimit 5 -Force +``` + +### Phase C: PR Review + +Use the pr-review script for each PR, or run the full cycle script to orchestrate: + +```powershell +.github/skills/pr-review/scripts/Start-PRReviewWorkflow.ps1 -PRNumber 45392 +``` + +### Phase D: Review/Fix Loop (VS Code Agent Orchestrated) + +This phase requires the VS Code agent to: + +**D1: Request Copilot review (VS Code MCP)** +``` +mcp_github_request_copilot_review: + owner: microsoft + repo: PowerToys + pullNumber: {{PRNumber}} +``` + +**D2: Invoke pr-review skill (CLI, parallel)** +```powershell +gh copilot -p "Run skill pr-review for PR #{{PRNumber}}" +# Wait for: Generated Files/prReview/{{PRNumber}}/.signal +``` + +**D3: Check results** +- Read `Generated Files/prReview/{{PRNumber}}/00-OVERVIEW.md` +- Query unresolved threads via GraphQL + +**D4: Post comments (VS Code MCP) - if medium+ severity** + +**D5: Invoke pr-fix skill in WORKTREE (CLI)** +```powershell +# Find worktree for this PR's branch +$branch = (gh pr view {{PRNumber}} --json headRefName -q .headRefName) +$worktree = git worktree list --porcelain | Select-String "worktree.*$branch" | ... + +# Run fix in worktree +cd $worktreePath +gh copilot -p "Run skill pr-fix for PR #{{PRNumber}}" +# Wait for: Generated Files/prFix/{{PRNumber}}/.signal +``` + +**D6: Resolve threads (VS Code MCP)** +```powershell +# Get thread IDs +gh api graphql -f query='query { repository(owner:"microsoft",name:"PowerToys") { + pullRequest(number:{{PRNumber}}) { reviewThreads(first:50) { nodes { id isResolved } } } +} }' + +# Resolve each (VS Code agent executes this) +gh api graphql -f query='mutation { resolveReviewThread(input:{threadId:"{{ID}}"}) { thread { isResolved } } }' +``` + +**D7: Loop** +- If unresolved issues remain → go to D2 +- If all clear → done + +## Timeout Handling + +Default timeout: 10 minutes per skill invocation. + +If no signal file appears within timeout: +1. Check if the skill process is still running +2. If hung, terminate and mark as `timeout` +3. Log failure and continue with other items + +## Parallel Execution (CRITICAL) + +**DO NOT spawn separate terminals for each operation.** Use the dedicated scripts to run parallel work from a single terminal: + +```powershell +# Issue fixes in parallel +.github/skills/issue-fix/scripts/Start-IssueFixParallel.ps1 -IssueNumbers 28726,13336,27507,3054,37800 -CLIType copilot -Model gpt-5.2-codex -ThrottleLimit 5 -Force + +# PR fixes in parallel +.github/skills/pr-fix/scripts/Start-PRFixParallel.ps1 -PRNumbers 45256,45257,45285,45286 -CLIType copilot -Model gpt-5.2-codex -ThrottleLimit 3 -Force +``` + +## Worktree Mapping + +The orchestrator must track which worktree belongs to which issue/PR: + +```powershell +# Get all worktrees +$worktrees = git worktree list --porcelain | Select-String "worktree|branch" | + ForEach-Object { $_.Line } + +# Parse into mapping +# Q:\PowerToys-ab12 → issue/44044 +# Q:\PowerToys-cd34 → issue/32950 + +# Find worktree for issue +$issueNum = 45363 +$worktreeLine = git worktree list | Select-String "issue/$issueNum" +$worktreePath = ($worktreeLine -split '\s+')[0] +``` + +## When to Use This Skill + +- Process multiple issues end-to-end +- Automate the full issue → PR → review → fix cycle +- Batch process high-confidence issues +- Run continuous review/fix loops until clean diff --git a/.github/skills/issue-to-pr-cycle/scripts/Get-CycleStatus.ps1 b/.github/skills/issue-to-pr-cycle/scripts/Get-CycleStatus.ps1 new file mode 100644 index 000000000000..d228c6f5f1a9 --- /dev/null +++ b/.github/skills/issue-to-pr-cycle/scripts/Get-CycleStatus.ps1 @@ -0,0 +1,370 @@ +<# +.SYNOPSIS + Get the current status of issues/PRs in the issue-to-PR cycle. + +.DESCRIPTION + Checks the status of: + - Issue review completion (has overview.md + implementation-plan.md) + - Issue fix completion (has worktree + commits) + - PR creation status (has open PR) + - PR review status (has review files) + - PR active comments count + +.PARAMETER IssueNumbers + Array of issue numbers to check status for. + +.PARAMETER PRNumbers + Array of PR numbers to check status for. + +.PARAMETER CheckAll + Check all issues with review data and all open PRs with issue/* branches. + +.EXAMPLE + ./Get-CycleStatus.ps1 -IssueNumbers 44044, 32950 + +.EXAMPLE + ./Get-CycleStatus.ps1 -PRNumbers 45234, 45235 + +.EXAMPLE + ./Get-CycleStatus.ps1 -CheckAll +#> + +[CmdletBinding()] +param( + [int[]]$IssueNumbers = @(), + [int[]]$PRNumbers = @(), + [switch]$CheckAll, + [switch]$JsonOutput +) + +$scriptDir = Split-Path -Parent $MyInvocation.MyCommand.Path +. (Join-Path $scriptDir 'IssueReviewLib.ps1') + +$repoRoot = Get-RepoRoot +$genFiles = Get-GeneratedFilesPath -RepoRoot $repoRoot +$worktreeLib = Join-Path $repoRoot 'tools/build/WorktreeLib.ps1' +if (Test-Path $worktreeLib) { + . $worktreeLib +} + +function Get-IssueStatus { + param([int]$IssueNumber) + + $status = @{ + IssueNumber = $IssueNumber + HasReview = $false + HasImplementationPlan = $false + FeasibilityScore = 0 + ClarityScore = 0 + EffortDays = 0 + HasWorktree = $false + WorktreePath = $null + HasCommits = $false + CommitCount = 0 + HasPR = $false + PRNumber = 0 + PRState = $null + PRUrl = $null + ReviewSignalStatus = $null + ReviewSignalTimestamp = $null + ReviewReviewSignalStatus = $null + ReviewReviewQualityScore = 0 + ReviewReviewIteration = 0 + ReviewReviewNeedsReReview = $false + FixSignalStatus = $null + FixSignalTimestamp = $null + } + + # Check review status + $reviewDir = Join-Path $genFiles "issueReview/$IssueNumber" + $overviewPath = Join-Path $reviewDir 'overview.md' + $implPlanPath = Join-Path $reviewDir 'implementation-plan.md' + + if (Test-Path $overviewPath) { + $status.HasReview = $true + $overview = Get-Content $overviewPath -Raw + + if ($overview -match 'Technical Feasibility[^\d]*(\d+)/100') { + $status.FeasibilityScore = [int]$Matches[1] + } + if ($overview -match 'Requirement Clarity[^\d]*(\d+)/100') { + $status.ClarityScore = [int]$Matches[1] + } + if ($overview -match 'Effort Estimate[^|]*\|\s*[\d.]+(?:-(\d+))?\s*days?') { + $status.EffortDays = if ($Matches[1]) { [int]$Matches[1] } else { 1 } + } + } + + if (Test-Path $implPlanPath) { + $status.HasImplementationPlan = $true + } + + # Check review signal + $reviewSignalPath = Join-Path $reviewDir '.signal' + if (Test-Path $reviewSignalPath) { + try { + $reviewSignal = Get-Content $reviewSignalPath -Raw | ConvertFrom-Json + $status.ReviewSignalStatus = $reviewSignal.status + $status.ReviewSignalTimestamp = $reviewSignal.timestamp + } + catch {} + } + + # Check review-review signal + $reviewReviewSignalPath = Join-Path $genFiles "issueReviewReview/$IssueNumber/.signal" + if (Test-Path $reviewReviewSignalPath) { + try { + $rrSignal = Get-Content $reviewReviewSignalPath -Raw | ConvertFrom-Json + $status.ReviewReviewSignalStatus = $rrSignal.status + $status.ReviewReviewQualityScore = [int]$rrSignal.qualityScore + $status.ReviewReviewIteration = [int]$rrSignal.iteration + $status.ReviewReviewNeedsReReview = [bool]$rrSignal.needsReReview + } + catch {} + } + + # Check worktree status + $worktrees = Get-WorktreeEntries | Where-Object { $_.Branch -like "issue/$IssueNumber*" } + if ($worktrees) { + $status.HasWorktree = $true + $status.WorktreePath = $worktrees[0].Path + + # Check for commits + Push-Location $status.WorktreePath + try { + $commits = git log --oneline "main..HEAD" 2>$null + if ($commits) { + $status.HasCommits = $true + $status.CommitCount = @($commits).Count + } + } + finally { + Pop-Location + } + } + + # Check fix signal + $fixSignalPath = Join-Path $genFiles "issueFix/$IssueNumber/.signal" + if (Test-Path $fixSignalPath) { + try { + $fixSignal = Get-Content $fixSignalPath -Raw | ConvertFrom-Json + $status.FixSignalStatus = $fixSignal.status + $status.FixSignalTimestamp = $fixSignal.timestamp + } + catch {} + } + + # Check PR status + $prs = gh pr list --head "issue/$IssueNumber" --state all --json number,url,state 2>$null | ConvertFrom-Json + if (-not $prs -or $prs.Count -eq 0) { + # Try searching by issue reference + $prs = gh pr list --search "fixes #$IssueNumber OR closes #$IssueNumber" --state all --json number,url,state --limit 1 2>$null | ConvertFrom-Json + } + if ($prs -and $prs.Count -gt 0) { + $status.HasPR = $true + $status.PRNumber = $prs[0].number + $status.PRState = $prs[0].state + $status.PRUrl = $prs[0].url + } + + return $status +} + +function Get-PRStatus { + param([int]$PRNumber) + + $status = @{ + PRNumber = $PRNumber + State = $null + IssueNumber = 0 + Branch = $null + HasReviewFiles = $false + ReviewStepCount = 0 + HighSeverityCount = 0 + MediumSeverityCount = 0 + ActiveCommentCount = 0 + UnresolvedThreadCount = 0 + CopilotReviewRequested = $false + ReviewSignalStatus = $null + ReviewSignalTimestamp = $null + FixSignalStatus = $null + FixSignalTimestamp = $null + } + + # Get PR info + $prInfo = gh pr view $PRNumber --json state,headRefName,number 2>$null | ConvertFrom-Json + if (-not $prInfo) { + return $status + } + + $status.State = $prInfo.state + $status.Branch = $prInfo.headRefName + + # Extract issue number from branch + if ($status.Branch -match 'issue/(\d+)') { + $status.IssueNumber = [int]$Matches[1] + } + + # Check review files + $reviewDir = Join-Path $genFiles "prReview/$PRNumber" + if (Test-Path $reviewDir) { + $status.HasReviewFiles = $true + $stepFiles = Get-ChildItem -Path $reviewDir -Filter "*.md" -ErrorAction SilentlyContinue | + Where-Object { $_.Name -match '^\d{2}-' } + $status.ReviewStepCount = $stepFiles.Count + + # Count severity issues + foreach ($stepFile in $stepFiles) { + $content = Get-Content $stepFile.FullName -Raw -ErrorAction SilentlyContinue + if ($content) { + $status.HighSeverityCount += ([regex]::Matches($content, '\*\*Severity:\s*high\*\*', 'IgnoreCase')).Count + $status.HighSeverityCount += ([regex]::Matches($content, '🔴\s*High', 'IgnoreCase')).Count + $status.MediumSeverityCount += ([regex]::Matches($content, '\*\*Severity:\s*medium\*\*', 'IgnoreCase')).Count + $status.MediumSeverityCount += ([regex]::Matches($content, '🟡\s*Medium', 'IgnoreCase')).Count + } + } + } + + # Check review signal + $reviewSignalPath = Join-Path $reviewDir '.signal' + if (Test-Path $reviewSignalPath) { + try { + $reviewSignal = Get-Content $reviewSignalPath -Raw | ConvertFrom-Json + $status.ReviewSignalStatus = $reviewSignal.status + $status.ReviewSignalTimestamp = $reviewSignal.timestamp + } + catch {} + } + + # Check fix signal + $fixSignalPath = Join-Path $genFiles "prFix/$PRNumber/.signal" + if (Test-Path $fixSignalPath) { + try { + $fixSignal = Get-Content $fixSignalPath -Raw | ConvertFrom-Json + $status.FixSignalStatus = $fixSignal.status + $status.FixSignalTimestamp = $fixSignal.timestamp + } + catch {} + } + + # Get active comments (not in reply to another comment) + try { + $commentCount = gh api "repos/microsoft/PowerToys/pulls/$PRNumber/comments" --jq '[.[] | select(.in_reply_to_id == null)] | length' 2>$null + $status.ActiveCommentCount = [int]$commentCount + } + catch { + $status.ActiveCommentCount = 0 + } + + # Get unresolved thread count + try { + $threads = gh api graphql -f query="query { repository(owner: `"microsoft`", name: `"PowerToys`") { pullRequest(number: $PRNumber) { reviewThreads(first: 100) { nodes { isResolved } } } } }" --jq '.data.repository.pullRequest.reviewThreads.nodes | map(select(.isResolved == false)) | length' 2>$null + $status.UnresolvedThreadCount = [int]$threads + } + catch { + $status.UnresolvedThreadCount = 0 + } + + # Check if Copilot review was requested + try { + $reviewers = gh pr view $PRNumber --json reviewRequests --jq '.reviewRequests[].login' 2>$null + if ($reviewers -contains 'copilot' -or $reviewers -contains 'github-copilot') { + $status.CopilotReviewRequested = $true + } + } + catch {} + + return $status +} + +# Main execution +$results = @{ + Issues = @() + PRs = @() + Timestamp = Get-Date -Format 'yyyy-MM-dd HH:mm:ss' +} + +# Gather issue numbers to check +$issuesToCheck = @() +$prsToCheck = @() + +if ($CheckAll) { + # Get all reviewed issues + $reviewDir = Join-Path $genFiles 'issueReview' + if (Test-Path $reviewDir) { + $issuesToCheck = Get-ChildItem -Path $reviewDir -Directory | + Where-Object { $_.Name -match '^\d+$' } | + ForEach-Object { [int]$_.Name } + } + + # Get all open PRs with issue/* branches + $openPRs = gh pr list --state open --json number,headRefName 2>$null | ConvertFrom-Json | + Where-Object { $_.headRefName -like 'issue/*' } + $prsToCheck = @($openPRs | ForEach-Object { $_.number }) +} +else { + $issuesToCheck = $IssueNumbers + $prsToCheck = $PRNumbers +} + +# Get issue statuses +foreach ($issueNum in $issuesToCheck) { + $status = Get-IssueStatus -IssueNumber $issueNum + $results.Issues += $status +} + +# Get PR statuses +foreach ($prNum in $prsToCheck) { + $status = Get-PRStatus -PRNumber $prNum + $results.PRs += $status +} + +# Output +if ($JsonOutput) { + $results | ConvertTo-Json -Depth 5 + return +} +else { + if ($results.Issues.Count -gt 0) { + Write-Host "`n=== ISSUE STATUS ===" -ForegroundColor Cyan + Write-Host ("-" * 120) + Write-Host ("{0,-8} {1,-8} {2,-8} {3,-5} {4,-5} {5,-8} {6,-8} {7,-8} {8,-8} {9,-8} {10,-8}" -f "Issue", "Review", "Plan", "Feas", "Clar", "RR Scr", "Worktree", "PR", "RevSig", "RRSig", "FixSig") + Write-Host ("-" * 120) + foreach ($issue in $results.Issues | Sort-Object IssueNumber) { + $reviewMark = if ($issue.HasReview) { "✓" } else { "-" } + $planMark = if ($issue.HasImplementationPlan) { "✓" } else { "-" } + $wtMark = if ($issue.HasWorktree) { "✓" } else { "-" } + $commitMark = if ($issue.HasCommits) { $issue.CommitCount } else { "-" } + $prMark = if ($issue.HasPR) { "#$($issue.PRNumber) ($($issue.PRState))" } else { "-" } + $reviewSignalMark = if ($issue.ReviewSignalStatus) { $issue.ReviewSignalStatus } else { "-" } + $fixSignalMark = if ($issue.FixSignalStatus) { $issue.FixSignalStatus } else { "-" } + $rrScoreMark = if ($issue.ReviewReviewSignalStatus) { "$($issue.ReviewReviewQualityScore)" } else { "-" } + $rrSignalMark = if ($issue.ReviewReviewSignalStatus) { + if ($issue.ReviewReviewNeedsReReview) { "redo" } else { "pass" } + } else { "-" } + + Write-Host ("{0,-8} {1,-8} {2,-8} {3,-5} {4,-5} {5,-8} {6,-8} {7,-8} {8,-8} {9,-8} {10,-8}" -f + "#$($issue.IssueNumber)", $reviewMark, $planMark, $issue.FeasibilityScore, $issue.ClarityScore, $rrScoreMark, $wtMark, $prMark, $reviewSignalMark, $rrSignalMark, $fixSignalMark) + } + } + + if ($results.PRs.Count -gt 0) { + Write-Host "`n=== PR STATUS ===" -ForegroundColor Cyan + Write-Host ("-" * 120) + Write-Host ("{0,-8} {1,-10} {2,-10} {3,-8} {4,-8} {5,-10} {6,-12} {7,-10} {8,-8} {9,-8}" -f "PR", "State", "Issue", "Reviews", "High", "Medium", "Comments", "Unresolved", "RevSig", "FixSig") + Write-Host ("-" * 120) + foreach ($pr in $results.PRs | Sort-Object PRNumber) { + $reviewMark = if ($pr.HasReviewFiles) { "$($pr.ReviewStepCount) steps" } else { "-" } + $issueMark = if ($pr.IssueNumber -gt 0) { "#$($pr.IssueNumber)" } else { "-" } + $reviewSignalMark = if ($pr.ReviewSignalStatus) { $pr.ReviewSignalStatus } else { "-" } + $fixSignalMark = if ($pr.FixSignalStatus) { $pr.FixSignalStatus } else { "-" } + + Write-Host ("{0,-8} {1,-10} {2,-10} {3,-8} {4,-8} {5,-10} {6,-12} {7,-10} {8,-8} {9,-8}" -f + "#$($pr.PRNumber)", $pr.State, $issueMark, $reviewMark, $pr.HighSeverityCount, $pr.MediumSeverityCount, $pr.ActiveCommentCount, $pr.UnresolvedThreadCount, $reviewSignalMark, $fixSignalMark) + } + } + + Write-Host "`nTimestamp: $($results.Timestamp)" -ForegroundColor Gray +} + +return $results diff --git a/.github/skills/issue-to-pr-cycle/scripts/IssueReviewLib.ps1 b/.github/skills/issue-to-pr-cycle/scripts/IssueReviewLib.ps1 new file mode 100644 index 000000000000..c25e297cc240 --- /dev/null +++ b/.github/skills/issue-to-pr-cycle/scripts/IssueReviewLib.ps1 @@ -0,0 +1,123 @@ +# IssueReviewLib.ps1 - Helpers for full issue-to-PR cycle workflow +# Part of the PowerToys GitHub Copilot/Claude Code issue review system +# This is a trimmed version with only what issue-to-pr-cycle needs + +#region Console Output Helpers +function Info { param([string]$Message) Write-Host $Message -ForegroundColor Cyan } +function Warn { param([string]$Message) Write-Host $Message -ForegroundColor Yellow } +function Err { param([string]$Message) Write-Host $Message -ForegroundColor Red } +function Success { param([string]$Message) Write-Host $Message -ForegroundColor Green } +#endregion + +#region Repository Helpers +function Get-RepoRoot { + $root = git rev-parse --show-toplevel 2>$null + if (-not $root) { throw 'Not inside a git repository.' } + return (Resolve-Path $root).Path +} + +function Get-GeneratedFilesPath { + param([string]$RepoRoot) + return Join-Path $RepoRoot 'Generated Files' +} +#endregion + +#region Issue Review Results Helpers +function Get-HighConfidenceIssues { + <# + .SYNOPSIS + Find issues with high confidence for auto-fix based on review results. + .PARAMETER RepoRoot + Repository root path. + .PARAMETER MinFeasibilityScore + Minimum Technical Feasibility score (0-100). Default: 70. + .PARAMETER MinClarityScore + Minimum Requirement Clarity score (0-100). Default: 60. + .PARAMETER MaxEffortDays + Maximum effort estimate in days. Default: 2 (S = Small). + .PARAMETER FilterIssueNumbers + Optional array of issue numbers to filter to. If specified, only these issues are considered. + #> + param( + [Parameter(Mandatory)] + [string]$RepoRoot, + [int]$MinFeasibilityScore = 70, + [int]$MinClarityScore = 60, + [int]$MaxEffortDays = 2, + [int[]]$FilterIssueNumbers = @() + ) + + $genFiles = Get-GeneratedFilesPath -RepoRoot $RepoRoot + $reviewDir = Join-Path $genFiles 'issueReview' + + if (-not (Test-Path $reviewDir)) { + return @() + } + + $highConfidence = @() + + Get-ChildItem -Path $reviewDir -Directory | ForEach-Object { + $issueNum = [int]$_.Name + + # Skip if filter is specified and this issue is not in the filter list + if ($FilterIssueNumbers.Count -gt 0 -and $issueNum -notin $FilterIssueNumbers) { + return + } + + $overviewPath = Join-Path $_.FullName 'overview.md' + $implPlanPath = Join-Path $_.FullName 'implementation-plan.md' + + if (-not (Test-Path $overviewPath) -or -not (Test-Path $implPlanPath)) { + return + } + + # Parse overview.md to extract scores + $overview = Get-Content $overviewPath -Raw + + # Extract scores using regex (looking for score table or inline scores) + $feasibility = 0 + $clarity = 0 + $effortDays = 999 + + # Try to extract from At-a-Glance Score Table + if ($overview -match 'Technical Feasibility[^\d]*(\d+)/100') { + $feasibility = [int]$Matches[1] + } + if ($overview -match 'Requirement Clarity[^\d]*(\d+)/100') { + $clarity = [int]$Matches[1] + } + # Match effort formats like "0.5-1 day", "1-2 days", "2-3 days" - extract the upper bound + if ($overview -match 'Effort Estimate[^|]*\|\s*[\d.]+(?:-(\d+))?\s*days?') { + if ($Matches[1]) { + $effortDays = [int]$Matches[1] + } elseif ($overview -match 'Effort Estimate[^|]*\|\s*(\d+)\s*days?') { + $effortDays = [int]$Matches[1] + } + } + # Also check for XS/S sizing in the table + if ($overview -match 'Effort Estimate[^|]*\|[^|]*\|\s*(XS|S)\b') { + if ($Matches[1] -eq 'XS') { $effortDays = 1 } else { $effortDays = 2 } + } elseif ($overview -match 'Effort Estimate[^|]*\|[^|]*\(XS\)') { + $effortDays = 1 + } elseif ($overview -match 'Effort Estimate[^|]*\|[^|]*\(S\)') { + $effortDays = 2 + } + + if ($feasibility -ge $MinFeasibilityScore -and + $clarity -ge $MinClarityScore -and + $effortDays -le $MaxEffortDays) { + + $highConfidence += @{ + IssueNumber = $issueNum + FeasibilityScore = $feasibility + ClarityScore = $clarity + EffortDays = $effortDays + OverviewPath = $overviewPath + ImplementationPlanPath = $implPlanPath + } + } + } + + return $highConfidence | Sort-Object -Property FeasibilityScore -Descending +} +#endregion diff --git a/.github/skills/issue-to-pr-cycle/scripts/Start-FullIssueCycle.ps1 b/.github/skills/issue-to-pr-cycle/scripts/Start-FullIssueCycle.ps1 new file mode 100644 index 000000000000..4c50b4812694 --- /dev/null +++ b/.github/skills/issue-to-pr-cycle/scripts/Start-FullIssueCycle.ps1 @@ -0,0 +1,679 @@ +<#! +.SYNOPSIS + Run the complete issue-to-PR cycle: fix issues, create PRs, review, and fix comments. + +.DESCRIPTION + Orchestrates the full workflow: + 1. Find high-confidence issues matching criteria + 2. Create worktrees and run auto-fix for each issue + 3. Commit changes and create PRs + 4. Run PR review workflow in a loop until no issues remain: + a. Review PR and post comments + b. Fix PR comments + c. Re-review to check for remaining issues + d. Repeat until clean or max iterations reached + +.PARAMETER MinFeasibilityScore + Minimum Technical Feasibility score. Default: 70. + +.PARAMETER MinClarityScore + Minimum Requirement Clarity score. Default: 70. + +.PARAMETER MaxEffortDays + Maximum effort in days. Default: 10. + +.PARAMETER MaxReviewIterations + Maximum review/fix iterations per PR before giving up. Default: 3. + +.PARAMETER ExcludeIssues + Array of issue numbers to exclude (already processed). + +.PARAMETER CLIType + AI CLI to use: copilot or claude. Default: copilot. + +.PARAMETER DryRun + Show what would be done without executing. + +.PARAMETER SkipExisting + Skip issues that already have worktrees or PRs. + +.EXAMPLE + ./Start-FullIssueCycle.ps1 -MinFeasibilityScore 70 -MinClarityScore 70 -MaxEffortDays 10 + +.EXAMPLE + ./Start-FullIssueCycle.ps1 -ExcludeIssues 44044,45029,32950,35703,44480 -DryRun +#> + +[CmdletBinding()] +param( + [string]$Labels = '', + [int]$Limit = 500, # GitHub API max is 1000, default to 500 to get most issues + [int]$MinFeasibilityScore = 70, + [int]$MinClarityScore = 70, + [int]$MaxEffortDays = 10, + [int]$MaxReviewIterations = 3, + [int[]]$ExcludeIssues = @(), + [ValidateSet('copilot', 'claude')] + [string]$CLIType = 'copilot', + [int]$FixThrottleLimit = 5, + [int]$PRThrottleLimit = 5, + [int]$ReviewMaxConcurrent = 3, + [ValidateSet('high', 'medium', 'low', 'info')] + [string]$MinSeverityForLoop = 'medium', + [switch]$DryRun, + [switch]$SkipExisting, + [switch]$SkipReview, + [switch]$Force, + [switch]$Help +) + +$scriptDir = Split-Path -Parent $MyInvocation.MyCommand.Path +$skillsDir = Split-Path -Parent (Split-Path -Parent $scriptDir) # <configRoot>/skills (e.g. .github/skills or .claude/skills) +. (Join-Path $scriptDir 'IssueReviewLib.ps1') + +# Paths to other skills' scripts +$issueFixScript = Join-Path $skillsDir 'issue-fix/scripts/Start-IssueAutoFix.ps1' +$submitPRScript = Join-Path $skillsDir 'issue-fix/scripts/Submit-IssueFix.ps1' +$prReviewScript = Join-Path $skillsDir 'pr-review/scripts/Start-PRReviewWorkflow.ps1' +$prFixScript = Join-Path $skillsDir 'pr-fix/scripts/Start-PRFix.ps1' + +$repoRoot = Get-RepoRoot +$worktreeLib = Join-Path $repoRoot 'tools/build/WorktreeLib.ps1' +if (Test-Path $worktreeLib) { + . $worktreeLib +} + +if ($Help) { + Get-Help $MyInvocation.MyCommand.Path -Full + return +} + +#region Helper Functions +function Get-ExistingIssuePRs { + <# + .SYNOPSIS + Get ALL issues that already have PRs (open, closed, or merged) - checking GitHub directly. + #> + param( + [int[]]$IssueNumbers + ) + + $existingPRs = @{} + + foreach ($issueNum in $IssueNumbers) { + # Check if there's a PR that mentions this issue (any state: open, closed, merged) + $prs = gh pr list --search "fixes #$issueNum OR closes #$issueNum OR resolves #$issueNum" --state all --json number,url,headRefName,state 2>$null | ConvertFrom-Json + if ($prs -and $prs.Count -gt 0) { + $existingPRs[$issueNum] = @{ + PRNumber = $prs[0].number + PRUrl = $prs[0].url + Branch = $prs[0].headRefName + State = $prs[0].state + } + continue + } + + # Also check for branch pattern issue/<number>* (any state) + $branchPrs = gh pr list --head "issue/$issueNum" --state all --json number,url,headRefName,state 2>$null | ConvertFrom-Json + if (-not $branchPrs -or $branchPrs.Count -eq 0) { + # Try with wildcard search via gh api + $branchPrs = gh pr list --state all --json number,url,headRefName,state 2>$null | ConvertFrom-Json | Where-Object { $_.headRefName -like "issue/$issueNum*" } + } + if ($branchPrs -and $branchPrs.Count -gt 0) { + $existingPRs[$issueNum] = @{ + PRNumber = $branchPrs[0].number + PRUrl = $branchPrs[0].url + Branch = $branchPrs[0].headRefName + State = $branchPrs[0].state + } + } + } + + return $existingPRs +} + +function Get-ExistingWorktrees { + <# + .SYNOPSIS + Get issues that already have worktrees. + #> + $existingWorktrees = @{} + $worktrees = Get-WorktreeEntries | Where-Object { $_.Branch -like 'issue/*' } + + foreach ($wt in $worktrees) { + if ($wt.Branch -match 'issue/(\d+)') { + $issueNum = [int]$Matches[1] + $existingWorktrees[$issueNum] = $wt.Path + } + } + + return $existingWorktrees +} + +function Get-PRReviewIssueCount { + <# + .SYNOPSIS + Count high/medium severity issues from the review overview file. + #> + param( + [int]$PRNumber, + [string]$MinSeverity = 'medium' + ) + + $overviewPath = Join-Path $repoRoot "Generated Files/prReview/$PRNumber/00-OVERVIEW.md" + + if (-not (Test-Path $overviewPath)) { + return -1 # No review yet + } + + $content = Get-Content $overviewPath -Raw + + # Parse "High severity issues: <count>" from the overview + $highCount = 0 + $mediumCount = 0 + + if ($content -match 'High severity issues:\s*(\d+)') { + $highCount = [int]$Matches[1] + } + + # Also check step files for medium severity + $stepFiles = Get-ChildItem -Path (Split-Path $overviewPath) -Filter "*.md" | Where-Object { $_.Name -match '^\d{2}-' } + foreach ($stepFile in $stepFiles) { + $stepContent = Get-Content $stepFile.FullName -Raw + # Count severity markers + $mediumCount += ([regex]::Matches($stepContent, '\*\*Severity:\s*medium\*\*', 'IgnoreCase')).Count + $mediumCount += ([regex]::Matches($stepContent, '🟡\s*Medium', 'IgnoreCase')).Count + } + + switch ($MinSeverity) { + 'high' { return $highCount } + 'medium' { return $highCount + $mediumCount } + default { return $highCount + $mediumCount } + } +} + +function Get-PRActiveCommentCount { + <# + .SYNOPSIS + Count active (unresolved) review comments on a PR. + #> + param( + [int]$PRNumber + ) + + try { + # Get all review comments + $comments = gh api "repos/microsoft/PowerToys/pulls/$PRNumber/comments" --jq '[.[] | select(.in_reply_to_id == null)] | length' 2>$null + if ($comments) { + return [int]$comments + } + return 0 + } + catch { + return 0 + } +} + +function Clear-PRReviewCache { + <# + .SYNOPSIS + Clear the review cache to force a fresh review. + #> + param( + [int]$PRNumber + ) + + $reviewPath = Join-Path $repoRoot "Generated Files/prReview/$PRNumber" + if (Test-Path $reviewPath) { + # Keep logs but remove review files + Get-ChildItem $reviewPath -Filter "*.md" | Remove-Item -Force + } +} + +function Invoke-PRReviewFixLoop { + <# + .SYNOPSIS + Run the review/fix loop until no issues remain or max iterations reached. + #> + param( + [int]$PRNumber, + [int]$IssueNumber, + [string]$WorktreePath, + [string]$CLIType = 'copilot', + [string]$MinSeverity = 'medium', + [int]$MaxIterations = 3 + ) + + $iteration = 0 + $issuesRemaining = $true + + while ($issuesRemaining -and $iteration -lt $MaxIterations) { + $iteration++ + Info " [PR #$PRNumber] Review/Fix iteration $iteration of $MaxIterations" + + # Step 1: Run PR review (assign Copilot, review, post comments) + Info " [PR #$PRNumber] Running review..." + try { + # Clear previous review to force fresh analysis + if ($iteration -gt 1) { + Clear-PRReviewCache -PRNumber $PRNumber + } + + & $prReviewScript -PRNumbers $PRNumber -CLIType $CLIType -Force 2>&1 | Out-Null + } + catch { + Warn " [PR #$PRNumber] Review failed: $($_.Exception.Message)" + break + } + + # Step 2: Check if there are issues found + $issueCount = Get-PRReviewIssueCount -PRNumber $PRNumber -MinSeverity $MinSeverity + $activeComments = Get-PRActiveCommentCount -PRNumber $PRNumber + + Info " [PR #$PRNumber] Found $issueCount issues (severity >= $MinSeverity), $activeComments active comments" + + if ($issueCount -le 0 -and $activeComments -le 0) { + Info " [PR #$PRNumber] ✓ No issues remaining!" + $issuesRemaining = $false + break + } + + # Step 3: Run fix for active comments + if ($activeComments -gt 0 -or $issueCount -gt 0) { + Info " [PR #$PRNumber] Fixing $activeComments active comments..." + try { + # Run fix via pr-fix skill (review script only does reviews) + & $prFixScript -PRNumber $PRNumber -CLIType $CLIType -Force 2>&1 | Out-Null + } + catch { + Warn " [PR #$PRNumber] Fix failed: $($_.Exception.Message)" + } + } + + # Brief pause to let GitHub sync + Start-Sleep -Seconds 2 + } + + if ($issuesRemaining) { + Warn " [PR #$PRNumber] Max iterations reached, some issues may remain" + } + + return @{ + PRNumber = $PRNumber + IssueNumber = $IssueNumber + Iterations = $iteration + IssuesRemaining = $issuesRemaining + FinalIssueCount = (Get-PRReviewIssueCount -PRNumber $PRNumber -MinSeverity $MinSeverity) + } +} +#endregion + +#region Main Script +try { + $startTime = Get-Date + + Info "=" * 80 + Info "FULL ISSUE-TO-PR CYCLE" + Info "=" * 80 + Info "Repository root: $repoRoot" + Info "CLI type: $CLIType" + if ($Labels) { + Info "Labels filter: $Labels" + } + Info "Criteria: Feasibility >= $MinFeasibilityScore, Clarity >= $MinClarityScore, Effort <= $MaxEffortDays days" + + # Step 0: Review issues first (if labels specified and not skipping review) + if ($Labels -and -not $SkipReview) { + Info "`n" + ("=" * 60) + Info "STEP 0: Reviewing issues with label '$Labels'" + Info ("=" * 60) + + $reviewScript = Join-Path $scriptDir '../../issue-review/scripts/Start-BulkIssueReview.ps1' + if (Test-Path $reviewScript) { + $reviewArgs = @{ + Labels = $Labels + Limit = $Limit + CLIType = $CLIType + Force = $Force + } + if ($DryRun) { + Info "[DRY RUN] Would run: Start-BulkIssueReview.ps1 -Labels '$Labels' -Limit $Limit -CLIType $CLIType -Force" + } else { + Info "Running bulk issue review..." + & $reviewScript @reviewArgs + } + } else { + Warn "Review script not found at: $reviewScript" + Warn "Proceeding with existing review data..." + } + } + + # Step 1: Find high-confidence issues + Info "`n" + ("=" * 60) + Info "STEP 1: Finding high-confidence issues" + Info ("=" * 60) + + # If labels specified, get the list of issue numbers with that label first + # This ensures we ONLY look at issues with the specified label, not all reviewed issues + $filterIssueNumbers = @() + if ($Labels) { + Info "Fetching issues with label '$Labels' from GitHub..." + $labeledIssues = gh issue list --repo microsoft/PowerToys --label "$Labels" --state open --limit $Limit --json number 2>$null | ConvertFrom-Json + $filterIssueNumbers = @($labeledIssues | ForEach-Object { $_.number }) + Info "Found $($filterIssueNumbers.Count) issues with label '$Labels'" + } + + $highConfidence = Get-HighConfidenceIssues ` + -RepoRoot $repoRoot ` + -MinFeasibilityScore $MinFeasibilityScore ` + -MinClarityScore $MinClarityScore ` + -MaxEffortDays $MaxEffortDays ` + -FilterIssueNumbers $filterIssueNumbers + + Info "Found $($highConfidence.Count) high-confidence issues matching criteria" + + if ($highConfidence.Count -eq 0) { + Warn "No issues found matching criteria." + return + } + + # Get issue numbers for checking + $issueNumbers = $highConfidence | ForEach-Object { $_.IssueNumber } + + # Get existing PRs to skip (check GitHub directly) + Info "Checking for existing PRs..." + $existingPRs = Get-ExistingIssuePRs -IssueNumbers $issueNumbers + Info "Found $($existingPRs.Count) issues with existing PRs" + + # Filter out excluded issues and those with existing PRs + $issuesToProcess = $highConfidence | Where-Object { + $issueNum = $_.IssueNumber + $excluded = $issueNum -in $ExcludeIssues + $hasPR = $existingPRs.ContainsKey($issueNum) + + if ($excluded) { + Info " Excluding #$issueNum (in exclude list)" + } + if ($hasPR -and $SkipExisting) { + $prState = $existingPRs[$issueNum].State + Info " Skipping #$issueNum (has $prState PR #$($existingPRs[$issueNum].PRNumber))" + } + + -not $excluded -and (-not $hasPR -or -not $SkipExisting) + } + + if ($issuesToProcess.Count -eq 0) { + Warn "No new issues to process after filtering." + return + } + + Info "`nIssues to process: $($issuesToProcess.Count)" + Info ("-" * 80) + foreach ($issue in $issuesToProcess) { + $prInfo = if ($existingPRs.ContainsKey($issue.IssueNumber)) { + $state = $existingPRs[$issue.IssueNumber].State + " [has $state PR #$($existingPRs[$issue.IssueNumber].PRNumber)]" + } else { "" } + Info ("#{0,-6} [F:{1}, C:{2}, E:{3}d]{4}" -f $issue.IssueNumber, $issue.FeasibilityScore, $issue.ClarityScore, $issue.EffortDays, $prInfo) + } + Info ("-" * 80) + + if ($DryRun) { + Warn "`nDry run mode - showing what would be done:" + Info " 1. Create worktrees for $($issuesToProcess.Count) issues (parallel)" + Info " 2. Run Copilot auto-fix in each worktree (parallel)" + Info " 3. Commit and create PRs (parallel)" + Info " 4. Run PR review/fix loop (up to $MaxReviewIterations iterations per PR)" + Info " - Review PR and post comments (severity >= $MinSeverityForLoop)" + Info " - Fix active comments" + Info " - Repeat until clean or max iterations" + return + } + + # Confirm + if (-not $Force) { + $confirm = Read-Host "`nProceed with full cycle for $($issuesToProcess.Count) issues? (y/N)" + if ($confirm -notmatch '^[yY]') { + Info "Cancelled." + return + } + } + + # Track results + $results = @{ + FixSucceeded = [System.Collections.Concurrent.ConcurrentBag[object]]::new() + FixFailed = [System.Collections.Concurrent.ConcurrentBag[object]]::new() + PRCreated = [System.Collections.Concurrent.ConcurrentBag[object]]::new() + PRFailed = [System.Collections.Concurrent.ConcurrentBag[object]]::new() + PRSkipped = [System.Collections.Concurrent.ConcurrentBag[object]]::new() + ReviewSucceeded = [System.Collections.Concurrent.ConcurrentBag[object]]::new() + ReviewFailed = [System.Collections.Concurrent.ConcurrentBag[object]]::new() + } + + # ======================================== + # PHASE 1: Create worktrees and fix issues (PARALLEL) + # ======================================== + Info "`n" + ("=" * 60) + Info "PHASE 1: Auto-Fix Issues (Parallel)" + Info ("=" * 60) + + $issuesNeedingFix = $issuesToProcess | Where-Object { -not $existingPRs.ContainsKey($_.IssueNumber) } + $issuesWithPR = $issuesToProcess | Where-Object { $existingPRs.ContainsKey($_.IssueNumber) } + + Info "Issues needing fix: $($issuesNeedingFix.Count)" + Info "Issues with existing PR (skip to review): $($issuesWithPR.Count)" + + if ($issuesNeedingFix.Count -gt 0) { + $issuesNeedingFix | ForEach-Object -ThrottleLimit $FixThrottleLimit -Parallel { + $issue = $_ + $issueNum = $issue.IssueNumber + $issueFixScript = $using:issueFixScript + $CLIType = $using:CLIType + $results = $using:results + + try { + Write-Host "[Issue #$issueNum] Starting auto-fix..." -ForegroundColor Cyan + & $issueFixScript -IssueNumber $issueNum -CLIType $CLIType -Force 2>&1 | Out-Null + $results.FixSucceeded.Add($issueNum) + Write-Host "[Issue #$issueNum] ✓ Fix completed" -ForegroundColor Green + } + catch { + $results.FixFailed.Add(@{ IssueNumber = $issueNum; Error = $_.Exception.Message }) + Write-Host "[Issue #$issueNum] ✗ Fix failed: $($_.Exception.Message)" -ForegroundColor Red + } + } + } + + Info "`nPhase 1 complete: $($results.FixSucceeded.Count) succeeded, $($results.FixFailed.Count) failed" + + # ======================================== + # PHASE 2: Commit and create PRs (PARALLEL) + # ======================================== + Info "`n" + ("=" * 60) + Info "PHASE 2: Submit PRs (Parallel)" + Info ("=" * 60) + + $fixedIssues = $results.FixSucceeded.ToArray() + + if ($fixedIssues.Count -gt 0) { + $fixedIssues | ForEach-Object -ThrottleLimit $PRThrottleLimit -Parallel { + $issueNum = $_ + $submitPRScript = $using:submitPRScript + $CLIType = $using:CLIType + $results = $using:results + + try { + Write-Host "[Issue #$issueNum] Creating PR..." -ForegroundColor Cyan + $submitResult = & $submitPRScript -IssueNumbers $issueNum -CLIType $CLIType -Force 2>&1 + + # Parse output to find PR URL + $prUrl = $null + $prNum = 0 + + if ($submitResult -match 'https://github.com/[^/]+/[^/]+/pull/(\d+)') { + $prUrl = $Matches[0] + $prNum = [int]$Matches[1] + } + + if ($prNum -gt 0) { + $results.PRCreated.Add(@{ IssueNumber = $issueNum; PRNumber = $prNum; PRUrl = $prUrl }) + Write-Host "[Issue #$issueNum] ✓ PR #$prNum created" -ForegroundColor Green + } else { + # Check if PR was already created + $existingPr = gh pr list --head "issue/$issueNum" --state open --json number,url 2>$null | ConvertFrom-Json + if ($existingPr -and $existingPr.Count -gt 0) { + $results.PRSkipped.Add(@{ IssueNumber = $issueNum; PRNumber = $existingPr[0].number; PRUrl = $existingPr[0].url; Reason = "Already exists" }) + Write-Host "[Issue #$issueNum] PR already exists: #$($existingPr[0].number)" -ForegroundColor Yellow + } else { + $results.PRFailed.Add(@{ IssueNumber = $issueNum; Error = "No PR created" }) + Write-Host "[Issue #$issueNum] ✗ PR creation failed" -ForegroundColor Red + } + } + } + catch { + $results.PRFailed.Add(@{ IssueNumber = $issueNum; Error = $_.Exception.Message }) + Write-Host "[Issue #$issueNum] ✗ PR failed: $($_.Exception.Message)" -ForegroundColor Red + } + } + } + + Info "`nPhase 2 complete: $($results.PRCreated.Count) created, $($results.PRSkipped.Count) skipped, $($results.PRFailed.Count) failed" + + # ======================================== + # PHASE 3: Review and Fix PRs (ITERATIVE LOOP) + # ======================================== + Info "`n" + ("=" * 60) + Info "PHASE 3: Review & Fix PRs (Iterative Loop)" + Info ("=" * 60) + Info "Max iterations per PR: $MaxReviewIterations" + Info "Min severity to fix: $MinSeverityForLoop" + + # Collect all PRs to review (newly created + existing) + $prsToReview = @() + + foreach ($pr in $results.PRCreated.ToArray()) { + $prsToReview += @{ IssueNumber = $pr.IssueNumber; PRNumber = $pr.PRNumber } + } + foreach ($pr in $results.PRSkipped.ToArray()) { + $prsToReview += @{ IssueNumber = $pr.IssueNumber; PRNumber = $pr.PRNumber } + } + foreach ($issue in $issuesWithPR) { + $prInfo = $existingPRs[$issue.IssueNumber] + # Only include open PRs + if ($prInfo.State -eq 'OPEN') { + $prsToReview += @{ IssueNumber = $issue.IssueNumber; PRNumber = $prInfo.PRNumber } + } + } + + Info "PRs to review: $($prsToReview.Count)" + + # Track review loop results + $reviewLoopResults = [System.Collections.Concurrent.ConcurrentBag[object]]::new() + + if ($prsToReview.Count -gt 0) { + # Process sequentially to avoid overwhelming the AI CLI + foreach ($pr in $prsToReview) { + $issueNum = $pr.IssueNumber + $prNum = $pr.PRNumber + + Info "`n [PR #$prNum for Issue #$issueNum] Starting review/fix loop..." + + try { + $loopResult = Invoke-PRReviewFixLoop ` + -PRNumber $prNum ` + -IssueNumber $issueNum ` + -CLIType $CLIType ` + -MinSeverity $MinSeverityForLoop ` + -MaxIterations $MaxReviewIterations + + $reviewLoopResults.Add($loopResult) + + if (-not $loopResult.IssuesRemaining) { + $results.ReviewSucceeded.Add(@{ IssueNumber = $issueNum; PRNumber = $prNum; Iterations = $loopResult.Iterations }) + Success " [PR #$prNum] ✓ Clean after $($loopResult.Iterations) iteration(s)" + } else { + $results.ReviewFailed.Add(@{ IssueNumber = $issueNum; PRNumber = $prNum; Iterations = $loopResult.Iterations; RemainingIssues = $loopResult.FinalIssueCount }) + Warn " [PR #$prNum] ⚠ $($loopResult.FinalIssueCount) issues remain after $($loopResult.Iterations) iterations" + } + } + catch { + $results.ReviewFailed.Add(@{ IssueNumber = $issueNum; PRNumber = $prNum; Error = $_.Exception.Message }) + Err " [PR #$prNum] ✗ Review loop failed: $($_.Exception.Message)" + } + } + } + + Info "`nPhase 3 complete: $($results.ReviewSucceeded.Count) clean, $($results.ReviewFailed.Count) with remaining issues" + + # Final Summary + $duration = (Get-Date) - $startTime + + Info "`n" + ("=" * 80) + Info "FULL CYCLE COMPLETE" + Info ("=" * 80) + Info "Duration: $($duration.ToString('hh\:mm\:ss'))" + Info "" + Info "Issues processed: $($issuesToProcess.Count)" + Success "Fixes succeeded: $($results.FixSucceeded.Count)" + if ($results.FixFailed.Count -gt 0) { + Err "Fixes failed: $($results.FixFailed.Count)" + } + Success "PRs created: $($results.PRCreated.Count)" + if ($results.PRSkipped.Count -gt 0) { + Warn "PRs skipped: $($results.PRSkipped.Count) (already existed)" + } + if ($results.PRFailed.Count -gt 0) { + Err "PRs failed: $($results.PRFailed.Count)" + } + Success "PRs clean (no issues): $($results.ReviewSucceeded.Count)" + if ($results.ReviewFailed.Count -gt 0) { + Warn "PRs with remaining issues: $($results.ReviewFailed.Count)" + } + + Info "" + Info "Summary by issue:" + foreach ($issue in $issuesToProcess) { + $issueNum = $issue.IssueNumber + $prInfo = $results.PRCreated.ToArray() | Where-Object { $_.IssueNumber -eq $issueNum } | Select-Object -First 1 + if (-not $prInfo) { + $prInfo = $results.PRSkipped.ToArray() | Where-Object { $_.IssueNumber -eq $issueNum } | Select-Object -First 1 + } + if (-not $prInfo -and $existingPRs.ContainsKey($issueNum)) { + $prInfo = @{ PRNumber = $existingPRs[$issueNum].PRNumber } + } + + $prNum = if ($prInfo) { "PR #$($prInfo.PRNumber)" } else { "No PR" } + $fixStatus = if ($results.FixSucceeded.ToArray() -contains $issueNum) { "✓" } elseif ($results.FixFailed.ToArray().IssueNumber -contains $issueNum) { "✗" } else { "-" } + + # Check review status with iteration count + $reviewResult = $results.ReviewSucceeded.ToArray() | Where-Object { $_.IssueNumber -eq $issueNum -or $_.PRNumber -eq $prInfo.PRNumber } | Select-Object -First 1 + $reviewFailResult = $results.ReviewFailed.ToArray() | Where-Object { $_.IssueNumber -eq $issueNum -or $_.PRNumber -eq $prInfo.PRNumber } | Select-Object -First 1 + + if ($reviewResult) { + $reviewStatus = "✓($($reviewResult.Iterations))" + } elseif ($reviewFailResult) { + $reviewStatus = "⚠($($reviewFailResult.RemainingIssues) left)" + } else { + $reviewStatus = "-" + } + + Info (" Issue #{0,-6} [{1}Fix] [{2}Review] -> {3}" -f $issueNum, $fixStatus, $reviewStatus, $prNum) + } + + Info ("=" * 80) + + return @{ + FixSucceeded = $results.FixSucceeded.ToArray() + FixFailed = $results.FixFailed.ToArray() + PRCreated = $results.PRCreated.ToArray() + PRSkipped = $results.PRSkipped.ToArray() + PRFailed = $results.PRFailed.ToArray() + ReviewSucceeded = $results.ReviewSucceeded.ToArray() + ReviewFailed = $results.ReviewFailed.ToArray() + } +} +catch { + Err "Error: $($_.Exception.Message)" + exit 1 +} +#endregion diff --git a/.github/skills/parallel-job-orchestrator/LICENSE.txt b/.github/skills/parallel-job-orchestrator/LICENSE.txt new file mode 100644 index 000000000000..22aed37e650b --- /dev/null +++ b/.github/skills/parallel-job-orchestrator/LICENSE.txt @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) Microsoft Corporation. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/.github/skills/parallel-job-orchestrator/SKILL.md b/.github/skills/parallel-job-orchestrator/SKILL.md new file mode 100644 index 000000000000..c7f6fbedbdc8 --- /dev/null +++ b/.github/skills/parallel-job-orchestrator/SKILL.md @@ -0,0 +1,151 @@ +--- +name: parallel-job-orchestrator +description: Generic parallel job orchestrator for running copilot, claude, or any CLI tool concurrently with queuing, monitoring, retry, and cleanup. Use when asked to run multiple jobs in parallel, batch process PRs or issues with copilot/claude, orchestrate concurrent CLI executions, run parallel reviews, run parallel triage, or execute any batch of shell commands concurrently. ALL skills that need parallel execution MUST use this orchestrator — do NOT use Start-Job, ForEach-Object -Parallel, or Start-Process directly. +license: Complete terms in LICENSE.txt +--- + +# Parallel Job Orchestrator + +The **single, canonical way** to run multiple jobs concurrently in this repository. Every skill that needs to run copilot, claude, or any CLI tool in parallel **MUST** use this orchestrator. Do NOT use `Start-Job`, `ForEach-Object -Parallel`, or `Start-Process` directly — those approaches have known PowerShell 7 crash bugs that took 48 hours to diagnose and fix. + +## When to Use This Skill + +- Running copilot or claude CLI on multiple PRs/issues simultaneously +- Any batch processing that spawns multiple CLI processes +- Parallel review, triage, fix, or rework workflows +- Any skill that needs concurrent execution with retry and monitoring + +## Why This Orchestrator Exists + +PowerShell 7 has **silent host-process crash bugs** triggered by: + +1. `[CmdletBinding()]`, `[Parameter(Mandatory)]`, `[ValidateSet()]` attributes propagating `ErrorActionPreference='Stop'` through child scopes +2. `Start-Job` called from within functions inside `while` loops — crashes after ~10-15 jobs +3. Accumulated completed `Job` objects consuming runspace resources +4. `ForEach-Object -Parallel` swallowing errors and losing context + +This orchestrator avoids all of these by: + +- **No advanced-function attributes** on the script itself +- **Inlined** all `Start-Job`/`Stop-Job`/`Remove-Job` calls (never in functions) +- **Immediately** `Receive-Job` + `Remove-Job` on completion +- **`$ErrorActionPreference = 'Continue'`** in the monitoring loop +- **Write-Host on every iteration** (PS7 kills the host if no output for ~8s in child-script loops) + +## Quick Start + +### Step 1: Build Job Definitions + +Each job is a hashtable with this exact structure: + +```powershell +$jobDef = @{ + Label = 'copilot-pr-12345' # unique human-readable label + ExecutionParameters = @{ + JobName = 'copilot-pr-12345' # PS job name + Command = 'copilot' # executable to run + Arguments = @('-p', 'Review PR #12345', '--yolo') # argument array + WorkingDir = 'C:\repo' # working directory + OutputDir = 'C:\repo\output\copilot\12345' # output directory (auto-created) + LogPath = 'C:\repo\output\copilot\12345\review.log' # stdout+stderr log + } + MonitorFiles = @('C:\repo\output\copilot\12345\review.log') # files to watch for activity + CleanupTask = $null # optional scriptblock: { param($Tracker) ... } +} +``` + +### Step 2: Call the Orchestrator + +```powershell +# CRITICAL: Set ErrorActionPreference to Continue before calling +$savedEAP = $ErrorActionPreference +$ErrorActionPreference = 'Continue' + +$results = & '.github/skills/parallel-job-orchestrator/scripts/Invoke-SimpleJobOrchestrator.ps1' ` + -JobDefinitions $jobDefs ` + -MaxConcurrent 4 ` + -InactivityTimeoutSeconds 60 ` + -MaxRetryCount 3 ` + -PollIntervalSeconds 5 ` + -LogDir 'C:\repo\output' + +$ErrorActionPreference = $savedEAP +``` + +### Step 3: Process Results + +The orchestrator returns an array of result objects: + +```powershell +$results | Format-Table Label, Status, JobState, ExitCode, RetryCount -AutoSize +``` + +| Property | Type | Description | +|----------|------|-------------| +| `Label` | string | Job label from definition | +| `JobId` | int | Last PowerShell job ID | +| `Status` | string | `Completed`, `Failed`, `Abandoned` | +| `JobState` | string | PowerShell job state | +| `ExitCode` | int | Process exit code | +| `RetryCount` | int | Number of retries performed | +| `OutputDir` | string | Output directory path | +| `LogPath` | string | Log file path | + +## Parameters + +| Parameter | Type | Default | Description | +|-----------|------|---------|-------------| +| `-JobDefinitions` | hashtable[] | **(required)** | Array of job definition hashtables | +| `-MaxConcurrent` | int | 4 | Maximum simultaneous jobs | +| `-InactivityTimeoutSeconds` | int | 60 | Seconds of zero log-file growth before stale | +| `-MaxRetryCount` | int | 3 | Retry attempts before abandoning | +| `-PollIntervalSeconds` | int | 5 | Health-check interval | +| `-LogDir` | string | `$env:TEMP` | Directory for orchestrator's own log | + +## Job Definition Schema + +See [references/job-definition-schema.md](./references/job-definition-schema.md) for the complete schema, copilot/claude examples, and the CleanupTask API. + +## Critical Rules for Callers + +1. **Set `$ErrorActionPreference = 'Continue'`** before calling the orchestrator +2. **Do NOT** wrap the orchestrator call in a `try/catch` that re-throws +3. **Do NOT** use `[CmdletBinding()]` or `[Parameter(Mandatory)]` on your runner script +4. **Do NOT** use `Start-Job`, `ForEach-Object -Parallel`, or `Start-Process` for parallel work — use this orchestrator +5. **Do** use manual validation (`if (-not $param) { Write-Error ...; return }`) instead of parameter attributes + +## Scripts + +| Script | Purpose | +|--------|---------| +| [Invoke-SimpleJobOrchestrator.ps1](./scripts/Invoke-SimpleJobOrchestrator.ps1) | The orchestrator — the ONLY parallel execution engine | +| [Test-OrchestratorEdgeCases.ps1](./scripts/Test-OrchestratorEdgeCases.ps1) | 28-scenario stress test suite | + +## Execution & Monitoring Rules + +The orchestrator is a long-running poll loop. The agent calling it MUST: + +1. **Never exit early** — monitor the orchestrator log until it prints "All N jobs finished." +2. **For VS Code terminal usage**, launch the parent script as a detached process (`Start-Process -WindowStyle Hidden`) with `Tee-Object` to a log file. VS Code kills idle background terminals after ~60s. +3. **Poll the log every 30–120 seconds** and report concise progress (done/total, running jobs, retries). +4. **On unexpected termination**, check the orchestrator log's last entries, diagnose the failure, and relaunch. +5. **Only report done** after the orchestrator returns results and all downstream processing is complete. + +## Post-Execution Review + +After using the orchestrator: + +1. Check the orchestrator log in `$LogDir/orchestrator-*.log` for errors +2. Verify all expected jobs show `Completed` status in results +3. Check `RetryCount` — high retries may indicate CLI instability +4. Review `Abandoned` jobs — these hit `MaxRetryCount` and need manual attention + +## Troubleshooting + +| Symptom | Cause | Fix | +|---------|-------|-----| +| PS7 crashes silently | Advanced-function attributes on caller | Remove `[CmdletBinding()]`, `[Parameter()]` from runner script | +| PS7 crashes after ~10 jobs | `Start-Job` inside functions in while loops | Already fixed in orchestrator; don't re-introduce functions | +| Jobs stuck as "Running" | `InactivityTimeoutSeconds` too high | Lower timeout or check CLI isn't hanging | +| All jobs `Abandoned` | CLI tool not installed or auth expired | Test CLI manually: `copilot -p "hello" --yolo` | +| Orchestrator itself crashes at iter ~9 | Too many VS Code terminals open | Kill all terminals, restart VS Code, run in single terminal | diff --git a/.github/skills/parallel-job-orchestrator/references/job-definition-schema.md b/.github/skills/parallel-job-orchestrator/references/job-definition-schema.md new file mode 100644 index 000000000000..cdc68dbd8891 --- /dev/null +++ b/.github/skills/parallel-job-orchestrator/references/job-definition-schema.md @@ -0,0 +1,166 @@ +# Job Definition Schema + +This document defines the exact hashtable structure required by the +`Invoke-SimpleJobOrchestrator.ps1` script. Every skill that needs parallel +execution builds an array of these hashtables and passes them to the +orchestrator. + +## Schema + +```powershell +@{ + Label = [string] # REQUIRED: unique human-readable label (e.g. 'copilot-pr-12345') + ExecutionParameters = @{ + JobName = [string] # REQUIRED: PowerShell background job name + Command = [string] # REQUIRED: executable to run (e.g. 'copilot', 'claude', 'gh') + Arguments = [string[]] # REQUIRED: argument array splatted to Command + WorkingDir = [string] # REQUIRED: working directory for the job + OutputDir = [string] # REQUIRED: output directory (auto-created by orchestrator) + LogPath = [string] # REQUIRED: path for stdout+stderr capture + } + MonitorFiles = [string[]] # REQUIRED: files to watch for activity (typically LogPath or a debug log) + CleanupTask = [scriptblock] # OPTIONAL: runs after job finishes or is abandoned +} +``` + +## Field Details + +### Label +A unique string identifying the job in logs and results. Convention: +`{cli-type}-{skill}-{id}` — e.g. `copilot-pr-45601`, `claude-issue-1234`. + +### ExecutionParameters + +| Field | Description | +|-------|-------------| +| `JobName` | Name for `Start-Job -Name`. Should match `Label`. | +| `Command` | The executable. Must be in `$PATH` or an absolute path. | +| `Arguments` | Array of arguments. Splatted via `@ArgList`. | +| `WorkingDir` | The job sets `Set-Location` to this before running. | +| `OutputDir` | The orchestrator creates this directory automatically. | +| `LogPath` | All stdout+stderr is redirected here via `*> $LogFile`. | + +### MonitorFiles +Array of file paths the orchestrator watches for growth. If none of these +files grow for `InactivityTimeoutSeconds`, the job is considered stale and +retried. + +**For copilot CLI**: Monitor the `LogPath` (stdout/stderr). +**For claude CLI**: Monitor the debug log (`--debug-file` path) — claude +writes progress there more frequently than to stdout. + +### CleanupTask +Optional scriptblock that receives the tracker hashtable as its single +parameter. Runs after the job completes, fails, or is abandoned. Use for +cleaning up large temporary files. + +```powershell +CleanupTask = { + param($Tracker) + $debugLog = Join-Path $Tracker.ExecutionParameters.OutputDir '_debug.log' + if (Test-Path $debugLog) { Remove-Item $debugLog -Force } +} +``` + +## Examples + +### Copilot CLI Job + +```powershell +@{ + Label = 'copilot-pr-45601' + ExecutionParameters = @{ + JobName = 'copilot-pr-45601' + Command = 'copilot' + Arguments = @('-p', 'Review PR #45601 in microsoft/PowerToys...', '--yolo') + WorkingDir = 'C:\s\PowerToys' + OutputDir = 'C:\s\PowerToys\output\copilot\45601' + LogPath = 'C:\s\PowerToys\output\copilot\45601\_copilot-review.log' + } + MonitorFiles = @('C:\s\PowerToys\output\copilot\45601\_copilot-review.log') + CleanupTask = $null +} +``` + +### Claude CLI Job + +```powershell +@{ + Label = 'claude-pr-45601' + ExecutionParameters = @{ + JobName = 'claude-pr-45601' + Command = 'claude' + Arguments = @('-p', 'Review PR #45601 in microsoft/PowerToys...', + '--dangerously-skip-permissions', + '--debug', 'all', '--debug-file', 'C:\output\claude\45601\_claude-debug.log') + WorkingDir = 'C:\s\PowerToys' + OutputDir = 'C:\s\PowerToys\output\claude\45601' + LogPath = 'C:\s\PowerToys\output\claude\45601\_claude-review.log' + } + MonitorFiles = @('C:\s\PowerToys\output\claude\45601\_claude-debug.log') + CleanupTask = { + param($Tracker) + $dbg = Join-Path $Tracker.ExecutionParameters.OutputDir '_claude-debug.log' + if (Test-Path $dbg) { + $fi = [System.IO.FileInfo]::new($dbg) + if ($fi.Length -gt 0) { + $sizeMB = [math]::Round($fi.Length / 1MB, 1) + Remove-Item $dbg -Force + Write-Host "[$($Tracker.Label)] Cleaned debug log (${sizeMB} MB)" + } + } + } +} +``` + +### Generic Shell Command Job + +```powershell +@{ + Label = 'lint-module-fancyzones' + ExecutionParameters = @{ + JobName = 'lint-fancyzones' + Command = 'dotnet' + Arguments = @('build', '--no-restore', '-warnaserror') + WorkingDir = 'C:\s\PowerToys\src\modules\fancyzones' + OutputDir = 'C:\s\PowerToys\output\lint\fancyzones' + LogPath = 'C:\s\PowerToys\output\lint\fancyzones\build.log' + } + MonitorFiles = @('C:\s\PowerToys\output\lint\fancyzones\build.log') + CleanupTask = $null +} +``` + +## Caller Template + +Every skill that builds job definitions and calls the orchestrator should +follow this pattern: + +```powershell +# Build definitions +$jobDefs = @(foreach ($item in $items) { + @{ + Label = "myskill-$($item.Id)" + ExecutionParameters = @{ ... } + MonitorFiles = @(...) + CleanupTask = $null + } +}) + +# Resolve orchestrator path +$orchestratorPath = Join-Path $PSScriptRoot '..\..\parallel-job-orchestrator\scripts\Invoke-SimpleJobOrchestrator.ps1' + +# CRITICAL: Lower ErrorActionPreference before calling +$savedEAP = $ErrorActionPreference +$ErrorActionPreference = 'Continue' + +$results = & $orchestratorPath ` + -JobDefinitions $jobDefs ` + -MaxConcurrent 4 ` + -LogDir $outputPath + +$ErrorActionPreference = $savedEAP + +# Process results +$results | Format-Table Label, Status, ExitCode, RetryCount -AutoSize +``` diff --git a/.github/skills/parallel-job-orchestrator/scripts/Invoke-SimpleJobOrchestrator.ps1 b/.github/skills/parallel-job-orchestrator/scripts/Invoke-SimpleJobOrchestrator.ps1 new file mode 100644 index 000000000000..3eb08e4a75d4 --- /dev/null +++ b/.github/skills/parallel-job-orchestrator/scripts/Invoke-SimpleJobOrchestrator.ps1 @@ -0,0 +1,358 @@ +<# +.SYNOPSIS + Generic job orchestrator: queues, starts, monitors, retries, and cleans up + PowerShell background jobs with configurable concurrency. + +.DESCRIPTION + Accepts an array of job definitions (created via New-JobDefinition), queues + them in memory, and runs up to MaxConcurrent at a time. Jobs are retried + up to MaxRetryCount times when they: + - Exit with a non-zero exit code + - Finish with a Failed or NotFound job state + - Stall (log-file inactivity exceeds InactivityTimeoutSeconds) + When a job finishes or is abandoned, its optional CleanupTask scriptblock + runs. + + Returns an array of result objects with final state, exit code, retry count, + and output directory for every definition. + + This is the CANONICAL parallel execution engine for this repository. + ALL skills that need to run copilot, claude, or any CLI tool in parallel + MUST use this orchestrator. Do NOT use Start-Job, ForEach-Object -Parallel, + or Start-Process directly — those approaches have known PowerShell 7 crash + bugs. + + Part of the parallel-job-orchestrator skill: + <configRoot>/skills/parallel-job-orchestrator/SKILL.md + +.PARAMETER JobDefinitions + Array of job-definition hashtables created by New-JobDefinition. + +.PARAMETER MaxConcurrent + Maximum number of jobs running simultaneously. Default 4. + +.PARAMETER InactivityTimeoutSeconds + Seconds of zero log-file growth before a job is considered stale. Default 60. + +.PARAMETER MaxRetryCount + How many times to restart a stale job before giving up. Default 3. + +.PARAMETER PollIntervalSeconds + How often (seconds) to check job health. Default 5. + +.PARAMETER LogDir + Directory for the orchestrator's own progress log. Default: TEMP. +#> +# NOTE: Do NOT use [CmdletBinding()] here. When a caller sets +# $ErrorActionPreference='Stop', CmdletBinding propagates that as the implicit +# -ErrorAction common parameter, overriding any local assignment. A monitoring +# loop must be resilient, so we intentionally stay as a simple script. +# IMPORTANT: Do not use [Parameter()], [ValidateSet()] or any attribute on params +# either — those ALSO implicitly enable advanced-script behaviour. +param( + [hashtable[]]$JobDefinitions, + + [int]$MaxConcurrent = 4, + + [int]$InactivityTimeoutSeconds = 60, + + [int]$MaxRetryCount = 3, + + [int]$PollIntervalSeconds = 5, + + [string]$LogDir +) + +# Manual mandatory check (replacing [Parameter(Mandatory)] which makes this +# an advanced script and re-enables ErrorActionPreference propagation). +if (-not $JobDefinitions -or $JobDefinitions.Count -eq 0) { + Write-Error 'Invoke-SimpleJobOrchestrator: -JobDefinitions is required and must not be empty.' + return @() +} + +# Orchestrator must be resilient — individual operations handle their own errors. +$ErrorActionPreference = 'Continue' + +# ── logging ────────────────────────────────────────────────────────────── +# Verbose progress goes to a log file to avoid terminal-output issues that +# can silently terminate the script when run inside VS Code / IDE terminals. +# Only summary-level messages go to Write-Host (console). + +if (-not $LogDir) { $LogDir = $env:TEMP } +New-Item -ItemType Directory -Path $LogDir -Force -ErrorAction SilentlyContinue | Out-Null +$script:_orchestratorLog = Join-Path $LogDir "orchestrator-$(Get-Date -Format 'yyyyMMdd-HHmmss').log" + +function Write-Log { + param([string]$Message) + $ts = Get-Date -Format 'HH:mm:ss' + $line = "[$ts] $Message" + try { Add-Content -Path $script:_orchestratorLog -Value $line -ErrorAction SilentlyContinue } + catch { } +} + +function Write-ProgressMessage { + <# Write to both console and log file. Use sparingly. #> + param([string]$Message) + Write-Log $Message + Write-Host $Message +} + +# ── helpers ────────────────────────────────────────────────────────────── + +# IMPORTANT: Start-TrackedJob is deliberately NOT a function. PowerShell 7 +# silently crashes the host process when Start-Job is called from within a +# function that is invoked inside a while loop in a .ps1 script file (~10-15 +# jobs triggers it). Inline the Start-Job call at every call site instead. + +# Shared scriptblock for all tracked jobs (defined once, reused). +$_jobScriptBlock = { + param($Cmd, $ArgList, $WorkDir, $LogFile) + Set-Location $WorkDir + if (Test-Path $LogFile) { Remove-Item $LogFile -Force } + & $Cmd @ArgList *> $LogFile + [PSCustomObject]@{ + Command = $Cmd + ExitCode = $LASTEXITCODE + LogPath = $LogFile + } +} + +# NOTE: Test-MonitorFilesActive, Stop-TrackedJob, and Invoke-CleanupTask +# are deliberately NOT functions. PowerShell 7 silently crashes the host when +# certain cmdlets (Stop-Job, Remove-Job, Get-Job, Get-Item) are called from +# within a function in a while loop inside a .ps1 script. Their logic is +# inlined at every call site below. + +function Get-TrackerResult { + param([hashtable]$Tracker) + + # Job output was collected and stored in _ReceivedOutput / _FinalJobState + # at completion time (before Remove-Job). Fall back to live query only if + # the tracker somehow missed the collection step. + $received = $Tracker._ReceivedOutput + $state = $Tracker._FinalJobState + + if (-not $state) { + $jobObj = Get-Job -Id $Tracker.JobId -ErrorAction SilentlyContinue + $received = if ($jobObj) { + Receive-Job -Id $Tracker.JobId -Keep -ErrorAction SilentlyContinue + } + else { $null } + $state = if ($jobObj) { $jobObj.State } else { 'Removed' } + } + + [PSCustomObject]@{ + Label = $Tracker.Label + JobId = $Tracker.JobId + Status = $Tracker.Status + JobState = $state + ExitCode = if ($received) { $received.ExitCode } else { $null } + RetryCount = $Tracker.RetryCount + OutputDir = $Tracker.ExecutionParameters.OutputDir + LogPath = $Tracker.ExecutionParameters.LogPath + } +} + +# ── build tracker list ─────────────────────────────────────────────────── + +$queue = [System.Collections.Generic.Queue[hashtable]]::new() +$running = [System.Collections.Generic.List[hashtable]]::new() +$finished = [System.Collections.Generic.List[hashtable]]::new() + +foreach ($def in $JobDefinitions) { + $tracker = @{ + Label = $def.Label + ExecutionParameters = $def.ExecutionParameters + MonitorFiles = $def.MonitorFiles + CleanupTask = $def.CleanupTask + Status = 'Queued' + JobId = $null + RetryCount = 0 + LastFileSizes = @{} + LastChangeTime = [DateTime]::UtcNow + } + $queue.Enqueue($tracker) +} + +Write-ProgressMessage "Orchestrator: $($queue.Count) jobs queued, max $MaxConcurrent concurrent. Log: $script:_orchestratorLog" + +# ── main loop ──────────────────────────────────────────────────────────── + +$loopIteration = 0 + +while ($queue.Count -gt 0 -or $running.Count -gt 0) { + $loopIteration++ + + try { + $ErrorActionPreference = 'Continue' + + # fill slots from queue + while ($running.Count -lt $MaxConcurrent -and $queue.Count -gt 0) { + $t = $queue.Dequeue() + Write-Log "Dequeued $($t.Label); about to start job (running=$($running.Count), queue=$($queue.Count))" + try { + # ── inline Start-TrackedJob (see note above about PS7 crash) ── + $ep = $t.ExecutionParameters + New-Item -ItemType Directory -Path $ep.OutputDir -Force | Out-Null + $job = Start-Job -Name $ep.JobName -ScriptBlock $_jobScriptBlock ` + -ArgumentList $ep.Command, $ep.Arguments, $ep.WorkingDir, $ep.LogPath + $t.JobId = $job.Id + $t.Status = 'Running' + $t.LastFileSizes = @{} + $t.LastChangeTime = [DateTime]::UtcNow + foreach ($f in $t.MonitorFiles) { $t.LastFileSizes[$f] = 0L } + Write-Log "[$($t.Label)] Started job $($job.Id)" + } + catch { + Write-Log "Start job FAILED for $($t.Label): $_" + $t.Status = 'Failed' + $finished.Add($t) + continue + } + $running.Add($t) + } + + Write-Log "Sleeping ${PollIntervalSeconds}s..." + + Start-Sleep -Seconds $PollIntervalSeconds + + # evaluate every running tracker + $toRemove = [System.Collections.Generic.List[hashtable]]::new() + + foreach ($t in $running) { + $jobObj = Get-Job -Id $t.JobId -ErrorAction SilentlyContinue + $jobState = if ($jobObj) { $jobObj.State } else { 'NotFound' } + + # ── finished naturally ──────────────────────────────────── + if ($jobState -in 'Completed', 'Failed', 'NotFound') { + # Collect job output before deciding whether to retry. + $received = $null + if ($jobObj) { + $received = Receive-Job -Id $t.JobId -ErrorAction SilentlyContinue + } + Remove-Job -Id $t.JobId -Force -ErrorAction SilentlyContinue + + $exitCode = if ($received) { $received.ExitCode } else { $null } + $isFailedExit = ($jobState -in 'Failed', 'NotFound') -or + ($null -ne $exitCode -and $exitCode -ne 0) + + # ── retry if the process exited with failure ────────── + if ($isFailedExit -and $t.RetryCount -lt $MaxRetryCount) { + $t.RetryCount++ + Write-Log "[$($t.Label)] Exited with failure (state=$jobState, exit=$exitCode) — retry $($t.RetryCount)/$MaxRetryCount" + # ── inline cleanup before retry (no function — see PS7 crash note) ── + if ($t.CleanupTask) { + try { & $t.CleanupTask $t } + catch { Write-Log "[$($t.Label)] Cleanup failed: $_" } + } + # ── inline Start-TrackedJob for retry (see note about PS7 crash) ── + $ep = $t.ExecutionParameters + New-Item -ItemType Directory -Path $ep.OutputDir -Force | Out-Null + $job = Start-Job -Name $ep.JobName -ScriptBlock $_jobScriptBlock ` + -ArgumentList $ep.Command, $ep.Arguments, $ep.WorkingDir, $ep.LogPath + $t.JobId = $job.Id + $t.Status = 'Running' + $t.LastFileSizes = @{} + $t.LastChangeTime = [DateTime]::UtcNow + foreach ($f in $t.MonitorFiles) { $t.LastFileSizes[$f] = 0L } + Write-Log "[$($t.Label)] Retry started job $($job.Id)" + continue + } + + $t.Status = if ($isFailedExit) { 'Failed' } else { $jobState } + Write-Log "[$($t.Label)] Finished (state=$jobState, exit=$exitCode) after $($t.RetryCount) retries." + + $t._ReceivedOutput = $received + $t._FinalJobState = $jobState + + # ── inline cleanup (no function — see PS7 crash note) ── + if ($t.CleanupTask) { + try { & $t.CleanupTask $t } + catch { Write-Log "[$($t.Label)] Cleanup failed: $_" } + } + $toRemove.Add($t) + continue + } + + # ── still running — check monitor files ────────────────── + $active = $false + try { + # ── inline file-activity check (no function — see PS7 crash note) ── + $_anyGrew = $false + foreach ($_f in $t.MonitorFiles) { + $_sz = 0L + if (Test-Path $_f) { $_sz = ([System.IO.FileInfo]::new($_f)).Length } + if ($_sz -ne $t.LastFileSizes[$_f]) { + $t.LastFileSizes[$_f] = $_sz + $_anyGrew = $true + } + } + $active = $_anyGrew + } + catch { $active = $true } + + if ($active) { + $t.LastChangeTime = [DateTime]::UtcNow + continue + } + + $staleSecs = [math]::Round(([DateTime]::UtcNow - $t.LastChangeTime).TotalSeconds) + if ($staleSecs -lt $InactivityTimeoutSeconds) { continue } + + # ── stale — retry or give up ───────────────────────────── + if ($t.RetryCount -ge $MaxRetryCount) { + Write-Log "[$($t.Label)] Max retries ($MaxRetryCount) after ${staleSecs}s stale. Giving up." + $t.Status = 'Abandoned' + # ── inline stop + cleanup (no function — see PS7 crash note) ── + Stop-Job -Id $t.JobId -ErrorAction SilentlyContinue + Remove-Job -Id $t.JobId -Force -ErrorAction SilentlyContinue + if ($t.CleanupTask) { + try { & $t.CleanupTask $t } + catch { Write-Log "[$($t.Label)] Cleanup failed: $_" } + } + $toRemove.Add($t) + continue + } + + $t.RetryCount++ + Write-Log "[$($t.Label)] Stale ${staleSecs}s — retry $($t.RetryCount)/$MaxRetryCount" + # ── inline stop (no function — see PS7 crash note) ── + Stop-Job -Id $t.JobId -ErrorAction SilentlyContinue + Remove-Job -Id $t.JobId -Force -ErrorAction SilentlyContinue + # ── inline Start-TrackedJob for retry (see note about PS7 crash) ── + $ep = $t.ExecutionParameters + New-Item -ItemType Directory -Path $ep.OutputDir -Force | Out-Null + $job = Start-Job -Name $ep.JobName -ScriptBlock $_jobScriptBlock ` + -ArgumentList $ep.Command, $ep.Arguments, $ep.WorkingDir, $ep.LogPath + $t.JobId = $job.Id + $t.Status = 'Running' + $t.LastFileSizes = @{} + $t.LastChangeTime = [DateTime]::UtcNow + foreach ($f in $t.MonitorFiles) { $t.LastFileSizes[$f] = 0L } + Write-Log "[$($t.Label)] Retry started job $($job.Id)" + } + + foreach ($r in $toRemove) { + $running.Remove($r) | Out-Null + $finished.Add($r) + } + } + catch { + Write-Log "Loop error (iter $loopIteration): $_ | $($_.Exception.GetType().FullName)" + } + + # log every iteration; console progress every iteration (REQUIRED: + # PowerShell 7 silently kills the host process when a child-script + # while loop produces no Write-Host output for ~8+ seconds). + $qc = $queue.Count; $rc = $running.Count; $fc = $finished.Count + Write-Log "queue=$qc running=$rc done=$fc (iter=$loopIteration)" + $runLabels = ($running | ForEach-Object { $_.Label }) -join ', ' + Write-Host " [$((Get-Date).ToString('HH:mm:ss'))] queue=$qc running=$rc done=$fc (iter=$loopIteration) [$runLabels]" +} + +# ── results ────────────────────────────────────────────────────────────── + +Write-ProgressMessage "All $($finished.Count) jobs finished. Log: $script:_orchestratorLog" + +$results = foreach ($t in $finished) { Get-TrackerResult $t } +return $results diff --git a/.github/skills/parallel-job-orchestrator/scripts/Test-OrchestratorEdgeCases.ps1 b/.github/skills/parallel-job-orchestrator/scripts/Test-OrchestratorEdgeCases.ps1 new file mode 100644 index 000000000000..b63b32741c8b --- /dev/null +++ b/.github/skills/parallel-job-orchestrator/scripts/Test-OrchestratorEdgeCases.ps1 @@ -0,0 +1,352 @@ +<# +.SYNOPSIS + Stress-tests Invoke-SimpleJobOrchestrator.ps1 with edge-case scenarios. + +.DESCRIPTION + Creates job definitions that simulate various failure modes: + 1. Happy-path jobs (should complete normally) + 2. Jobs that throw exceptions (should be detected as Failed) + 3. Jobs that hang with no log output (stale → retry → abandon) + 4. Jobs that write to the log once then hang (stale after initial burst) + 5. Jobs with a cleanup task (verify cleanup runs on completion) + 6. Jobs with a cleanup task that itself throws + 7. Concurrency pressure: many fast jobs queued beyond MaxConcurrent + 8. Mixed bag: all of the above in one run + + Each scenario prints PASS / FAIL and the script exits with the total + failure count so CI can gate on it. + +.PARAMETER Scenario + Which scenario to run. Default 'All' runs every scenario sequentially. + +.PARAMETER OutputRoot + Base directory for test artefacts. Cleaned before each scenario. +#> +# NOTE: Do NOT use [CmdletBinding()] or parameter attributes such as +# [ValidateSet()] / [Parameter()] here. Any of those make this an "advanced +# script", which propagates the caller's ErrorActionPreference via the implicit +# -ErrorAction common parameter — silently terminating the entire script when +# stray non-terminating errors bubble up from Stop-Job, Remove-Job, or file +# locks between scenarios. +param( + [string]$Scenario = 'All', + [string]$OutputRoot = 'Generated Files/orch-stress-test' +) + +# Manual validation instead of [ValidateSet()] to keep this a simple script. +$validScenarios = @('All', 'HappyPath', 'ThrowException', 'StaleNoLog', + 'StaleThenHang', 'CleanupRuns', 'CleanupThrows', 'ConcurrencyPressure', 'MixedBag') +if ($Scenario -notin $validScenarios) { + Write-Error "Invalid -Scenario '$Scenario'. Valid values: $($validScenarios -join ', ')" + return +} + +# Test scripts use 'Continue' globally. Individual assertions use try/catch. +# Using 'Stop' causes stray non-terminating errors from completed-job cleanup, +# file locks, Start-Job, etc. to silently terminate the whole script. +$ErrorActionPreference = 'Continue' +$repoRoot = (Resolve-Path (Join-Path $PSScriptRoot '..\..\..\..')).Path +$orchPath = Join-Path $PSScriptRoot 'Invoke-SimpleJobOrchestrator.ps1' + +if (-not [System.IO.Path]::IsPathRooted($OutputRoot)) { + $OutputRoot = Join-Path $repoRoot $OutputRoot +} + +# ── helper: build a single synthetic job definition ────────────────────── + +function New-TestJob { + param( + [string]$Label, + [string]$InlineScript, # PowerShell code to run inside the job + [string]$OutDir, + [scriptblock]$CleanupTask = $null + ) + + $logPath = Join-Path $OutDir "$Label.log" + + return @{ + Label = $Label + ExecutionParameters = @{ + JobName = $Label + Command = 'powershell' + Arguments = @('-NoProfile', '-Command', $InlineScript) + WorkingDir = $repoRoot + OutputDir = $OutDir + LogPath = $logPath + } + MonitorFiles = @($logPath) + CleanupTask = $CleanupTask + } +} + +# ── helper: run one scenario ───────────────────────────────────────────── + +$script:passCount = 0 +$script:failCount = 0 + +function Invoke-Scenario { + param( + [string]$Name, + [hashtable[]]$Defs, + [int]$MaxConcurrent = 10, + [int]$InactivityTimeout = 8, + [int]$MaxRetry = 1, + [int]$PollInterval = 2, + [scriptblock]$Assertions # receives $results array + ) + + Write-Host "`n╔══════════════════════════════════════════════════════╗" -ForegroundColor Yellow + Write-Host "║ Scenario: $Name" -ForegroundColor Yellow + Write-Host "╚══════════════════════════════════════════════════════╝" -ForegroundColor Yellow + + $scenarioDir = Join-Path $OutputRoot $Name + + # ── aggressive cleanup ─────────────────────────────────────────────── + # Previous stale-job scenarios may leave background processes with file + # locks. Stop ALL jobs (not just the current scenario's) and wait a + # moment for handles to release before wiping the directory. + Get-Job | Stop-Job -ErrorAction SilentlyContinue + Get-Job | Remove-Job -Force -ErrorAction SilentlyContinue + + if (Test-Path $scenarioDir) { + Start-Sleep -Milliseconds 500 + Remove-Item $scenarioDir -Recurse -Force -ErrorAction SilentlyContinue + # Retry once if first attempt failed (file lock race) + if (Test-Path $scenarioDir) { + Start-Sleep -Seconds 1 + Remove-Item $scenarioDir -Recurse -Force -ErrorAction SilentlyContinue + } + } + + $results = & $orchPath ` + -JobDefinitions $Defs ` + -MaxConcurrent $MaxConcurrent ` + -InactivityTimeoutSeconds $InactivityTimeout ` + -MaxRetryCount $MaxRetry ` + -PollIntervalSeconds $PollInterval ` + -LogDir $scenarioDir + + # run caller assertions + try { + & $Assertions $results + } + catch { + Write-Host " FAIL (assertion error): $_" -ForegroundColor Red + $script:failCount++ + } +} + +function Assert-True { + param([bool]$Condition, [string]$Message) + if ($Condition) { + Write-Host " PASS: $Message" -ForegroundColor Green + $script:passCount++ + } + else { + Write-Host " FAIL: $Message" -ForegroundColor Red + $script:failCount++ + } +} + +# ── scenario definitions ───────────────────────────────────────────────── + +$scenarios = @{} + +# 1. Happy path — 3 jobs that complete quickly +$scenarios['HappyPath'] = { + $dir = Join-Path $OutputRoot 'HappyPath' + $defs = @(1..3 | ForEach-Object { + New-TestJob -Label "happy-$_" -OutDir $dir ` + -InlineScript "Write-Output 'hello from $_'; Start-Sleep -Milliseconds 500; Write-Output 'done $_'" + }) + + Invoke-Scenario -Name 'HappyPath' -Defs $defs -Assertions { + param($r) + Assert-True ($r.Count -eq 3) 'Got 3 results' + Assert-True (($r | Where-Object Status -eq 'Completed').Count -eq 3) 'All 3 completed' + Assert-True (($r | Where-Object RetryCount -eq 0).Count -eq 3) 'Zero retries' + } +} + +# 2. Throw exception — the command errors out immediately +$scenarios['ThrowException'] = { + $dir = Join-Path $OutputRoot 'ThrowException' + $defs = @( + (New-TestJob -Label 'throw-1' -OutDir $dir ` + -InlineScript "throw 'Simulated fatal error'"), + (New-TestJob -Label 'good-1' -OutDir $dir ` + -InlineScript "Write-Output 'I am fine'; Start-Sleep -Milliseconds 300") + ) + + Invoke-Scenario -Name 'ThrowException' -Defs $defs -Assertions { + param($r) + Assert-True ($r.Count -eq 2) 'Got 2 results' + Assert-True (($r | Where-Object Label -eq 'throw-1').Status -in 'Completed','Failed') 'Throw job detected as finished' + Assert-True (($r | Where-Object Label -eq 'good-1').Status -eq 'Completed') 'Good job completed' + } +} + +# 3. Stale — no log output, job sleeps forever (beyond timeout) +$scenarios['StaleNoLog'] = { + $dir = Join-Path $OutputRoot 'StaleNoLog' + $defs = @( + (New-TestJob -Label 'stale-nolog' -OutDir $dir ` + -InlineScript "Start-Sleep -Seconds 120") + ) + + # Timeout 8 s, poll 2 s, max retry 1 → should retry once then abandon + Invoke-Scenario -Name 'StaleNoLog' -Defs $defs ` + -InactivityTimeout 8 -MaxRetry 1 -PollInterval 2 ` + -Assertions { + param($r) + Assert-True ($r.Count -eq 1) 'Got 1 result' + Assert-True ($r[0].Status -eq 'Abandoned') 'Marked as Abandoned' + Assert-True ($r[0].RetryCount -eq 1) 'Retried once before giving up' + } +} + +# 4. Writes once then hangs — log grows initially then stops +$scenarios['StaleThenHang'] = { + $dir = Join-Path $OutputRoot 'StaleThenHang' + $defs = @( + (New-TestJob -Label 'burst-hang' -OutDir $dir ` + -InlineScript "Write-Output 'initial burst'; Start-Sleep -Seconds 120") + ) + + Invoke-Scenario -Name 'StaleThenHang' -Defs $defs ` + -InactivityTimeout 8 -MaxRetry 1 -PollInterval 2 ` + -Assertions { + param($r) + Assert-True ($r.Count -eq 1) 'Got 1 result' + Assert-True ($r[0].Status -eq 'Abandoned') 'Marked as Abandoned' + Assert-True ($r[0].RetryCount -ge 1) 'Retried at least once' + } +} + +# 5. Cleanup task runs on completion +$scenarios['CleanupRuns'] = { + $dir = Join-Path $OutputRoot 'CleanupRuns' + $marker = Join-Path $dir 'cleanup-ran.marker' + + $cleanupBlock = [scriptblock]::Create( + "param(`$Tracker); New-Item -ItemType File -Path '$($marker -replace "'","''")' -Force | Out-Null" + ) + + $defs = @( + (New-TestJob -Label 'cleanup-ok' -OutDir $dir ` + -InlineScript "Write-Output 'will be cleaned'" ` + -CleanupTask $cleanupBlock) + ) + + Invoke-Scenario -Name 'CleanupRuns' -Defs $defs -Assertions { + param($r) + Assert-True ($r.Count -eq 1) 'Got 1 result' + Assert-True ($r[0].Status -eq 'Completed') 'Job completed' + Assert-True (Test-Path $marker) 'Cleanup marker file exists' + } +} + +# 6. Cleanup task that itself throws — should not crash the orchestrator +$scenarios['CleanupThrows'] = { + $dir = Join-Path $OutputRoot 'CleanupThrows' + + $badCleanup = { param($Tracker); throw 'Cleanup explosion!' } + + $defs = @( + (New-TestJob -Label 'cleanup-boom' -OutDir $dir ` + -InlineScript "Write-Output 'boom prep'" ` + -CleanupTask $badCleanup), + (New-TestJob -Label 'after-boom' -OutDir $dir ` + -InlineScript "Write-Output 'I should still finish'") + ) + + Invoke-Scenario -Name 'CleanupThrows' -Defs $defs -Assertions { + param($r) + Assert-True ($r.Count -eq 2) 'Got 2 results' + Assert-True (($r | Where-Object Label -eq 'cleanup-boom').Status -eq 'Completed') 'Boom job completed despite bad cleanup' + Assert-True (($r | Where-Object Label -eq 'after-boom').Status -eq 'Completed') 'Next job also completed' + } +} + +# 7. Concurrency pressure — 20 fast jobs, MaxConcurrent=5 +$scenarios['ConcurrencyPressure'] = { + $dir = Join-Path $OutputRoot 'ConcurrencyPressure' + $defs = @(1..20 | ForEach-Object { + New-TestJob -Label "conc-$_" -OutDir $dir ` + -InlineScript "Write-Output 'job $_ at $(Get-Date -f s)'; Start-Sleep -Milliseconds $(Get-Random -Min 200 -Max 1500)" + }) + + Invoke-Scenario -Name 'ConcurrencyPressure' -Defs $defs ` + -MaxConcurrent 5 -InactivityTimeout 15 -PollInterval 2 ` + -Assertions { + param($r) + Assert-True ($r.Count -eq 20) 'Got 20 results' + Assert-True (($r | Where-Object Status -eq 'Completed').Count -eq 20) 'All 20 completed' + # Verify logs have content + $withContent = ($r | Where-Object { + (Test-Path $_.LogPath) -and (Get-Item $_.LogPath).Length -gt 0 + }).Count + Assert-True ($withContent -eq 20) 'All 20 logs have content' + } +} + +# 8. Mixed bag — happy + throw + stale + cleanup in one run +$scenarios['MixedBag'] = { + $dir = Join-Path $OutputRoot 'MixedBag' + $marker = Join-Path $dir 'mixed-cleanup.marker' + + $cleanupOk = [scriptblock]::Create( + "param(`$Tracker); New-Item -ItemType File -Path '$($marker -replace "'","''")' -Force | Out-Null" + ) + + $defs = @( + (New-TestJob -Label 'mix-happy' -OutDir $dir -InlineScript "Write-Output 'happy'; Start-Sleep -Milliseconds 500"), + (New-TestJob -Label 'mix-throw' -OutDir $dir -InlineScript "throw 'kaboom'"), + (New-TestJob -Label 'mix-stale' -OutDir $dir -InlineScript "Start-Sleep -Seconds 120"), + (New-TestJob -Label 'mix-cleanup' -OutDir $dir -InlineScript "Write-Output 'with cleanup'" -CleanupTask $cleanupOk) + ) + + Invoke-Scenario -Name 'MixedBag' -Defs $defs ` + -MaxConcurrent 10 -InactivityTimeout 8 -MaxRetry 1 -PollInterval 2 ` + -Assertions { + param($r) + Assert-True ($r.Count -eq 4) 'Got 4 results' + Assert-True (($r | Where-Object Label -eq 'mix-happy').Status -eq 'Completed') 'Happy completed' + Assert-True (($r | Where-Object Label -eq 'mix-throw').Status -in 'Completed','Failed') 'Throw detected' + Assert-True (($r | Where-Object Label -eq 'mix-stale').Status -eq 'Abandoned') 'Stale abandoned' + Assert-True (($r | Where-Object Label -eq 'mix-stale').RetryCount -ge 1) 'Stale retried' + Assert-True (($r | Where-Object Label -eq 'mix-cleanup').Status -eq 'Completed') 'Cleanup job completed' + Assert-True (Test-Path $marker) 'Mixed cleanup marker exists' + } +} + +# ── run selected scenarios ─────────────────────────────────────────────── + +$toRun = if ($Scenario -eq 'All') { $scenarios.Keys | Sort-Object } else { @($Scenario) } + +$sw = [System.Diagnostics.Stopwatch]::StartNew() + +foreach ($name in $toRun) { + & $scenarios[$name] + + # ── inter-scenario cleanup ───────────────────────────────── + # Kill any leftover jobs (especially long-running stale-sim sleeps), + # force garbage collection, and pause briefly so handles release. + Get-Job | Stop-Job -ErrorAction SilentlyContinue + Get-Job | Remove-Job -Force -ErrorAction SilentlyContinue + [System.GC]::Collect() + Start-Sleep -Seconds 2 +} + +$sw.Stop() + +# ── summary ────────────────────────────────────────────────────────────── + +Write-Host "`n════════════════════════════════════════════════════════" -ForegroundColor Cyan +Write-Host " RESULTS: $($script:passCount) passed, $($script:failCount) failed ($([math]::Round($sw.Elapsed.TotalSeconds, 1))s)" -ForegroundColor Cyan +Write-Host "════════════════════════════════════════════════════════" -ForegroundColor Cyan + +# clean up jobs +Get-Job | Remove-Job -Force -ErrorAction SilentlyContinue + +exit $script:failCount diff --git a/.github/skills/pr-fix/LICENSE.txt b/.github/skills/pr-fix/LICENSE.txt new file mode 100644 index 000000000000..22aed37e650b --- /dev/null +++ b/.github/skills/pr-fix/LICENSE.txt @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) Microsoft Corporation. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/.github/skills/pr-fix/SKILL.md b/.github/skills/pr-fix/SKILL.md new file mode 100644 index 000000000000..ff7ccd4c932d --- /dev/null +++ b/.github/skills/pr-fix/SKILL.md @@ -0,0 +1,238 @@ +--- +name: pr-fix +description: Fix active PR review comments and resolve threads. Use when asked to fix PR comments, address review feedback, resolve review threads, implement PR fixes, or handle review iterations. Works with VS Code MCP tools to resolve GitHub threads after fixes are applied. +license: Complete terms in LICENSE.txt +--- + +# PR Fix Skill + +Fix active pull request review comments and resolve threads. This skill handles the **fix** part of the PR review cycle, separate from the review itself. + +## ⚠️ Critical Architecture + +This skill requires **both** CLI scripts AND VS Code MCP tools: + +| Operation | Execution Method | +|-----------|------------------| +| Apply code fixes | Copilot/Claude CLI via script | +| Resolve review threads | **VS Code Agent** via `gh api graphql` | +| Check status | Script (read-only) | + +**WHY**: Copilot CLI's MCP is **read-only**. Only VS Code can resolve threads. + +## Skill Contents + +``` +.github/skills/pr-fix/ +├── SKILL.md # This file +├── LICENSE.txt # MIT License +├── references/ +│ ├── fix-pr-comments.prompt.md # AI prompt for fixing comments +│ └── mcp-config.json # MCP configuration +└── scripts/ + ├── Start-PRFix.ps1 # Main fix script + ├── Start-PRFixParallel.ps1 # Parallel runner (single terminal) + ├── Resolve-PRThreads.ps1 # Resolve threads helper + ├── Get-UnresolvedThreads.ps1 # Get threads needing resolution + └── IssueReviewLib.ps1 # Shared helpers +``` + +## Output + +- **Code changes**: Applied in the PR's worktree +- **Signal file**: `Generated Files/prFix/<pr>/.signal` + +## Signal File + +On completion, a `.signal` file is created for orchestrator coordination: + +```json +{ + "status": "success", + "prNumber": 45365, + "timestamp": "2026-02-04T10:05:23Z", + "unresolvedBefore": 3, + "unresolvedAfter": 0 +} +``` + +Status values: `success`, `partial` (some threads remain), `failure` + +## When to Use This Skill + +- Fix active review comments on a PR +- Address reviewer feedback +- Resolve review threads after fixing +- Run the fix portion of review/fix loop +- Implement changes requested in PR reviews + +## Prerequisites + +- GitHub CLI (`gh`) installed and authenticated +- Copilot CLI or Claude CLI installed +- PowerShell 7+ +- PR has active review comments to fix + +## Required Variables + +| Variable | Description | Example | +|----------|-------------|---------| +| `{{PRNumber}}` | Pull request number to fix | `45286` | + +## Workflow + +### Step 1: Check Unresolved Threads + +```powershell +# See what needs to be fixed +.github/skills/pr-fix/scripts/Get-UnresolvedThreads.ps1 -PRNumber {{PRNumber}} +``` + +### Step 2: Run Fix (CLI Script) + +```powershell +# Apply AI-generated fixes to address comments +.github/skills/pr-fix/scripts/Start-PRFix.ps1 -PRNumber {{PRNumber}} -CLIType copilot -Force +``` + +### Step 3: Resolve Threads (VS Code Agent) + +After fixes are pushed, **you (the VS Code agent) must resolve threads**: + +```powershell +# Get unresolved thread IDs +gh api graphql -f query=' + query { + repository(owner: "microsoft", name: "PowerToys") { + pullRequest(number: {{PRNumber}}) { + reviewThreads(first: 50) { + nodes { id isResolved path line } + } + } + } + } +' --jq '.data.repository.pullRequest.reviewThreads.nodes[] | select(.isResolved == false)' +``` + +```powershell +# Resolve each thread +gh api graphql -f query=' + mutation { + resolveReviewThread(input: {threadId: "{{threadId}}"}) { + thread { isResolved } + } + } +' +``` + +### Step 4: Verify All Resolved + +```powershell +# Confirm no unresolved threads remain +.github/skills/pr-fix/scripts/Get-UnresolvedThreads.ps1 -PRNumber {{PRNumber}} +``` + +## CLI Options + +| Parameter | Description | Default | +|-----------|-------------|---------| +| `-PRNumber` | PR number to fix | Required | +| `-CLIType` | AI CLI: `copilot` or `claude` | `copilot` | +| `-Model` | Copilot model (e.g., `gpt-5.2-codex`) | (optional) | +| `-Force` | Skip confirmation prompts | `false` | +| `-DryRun` | Show what would be done | `false` | + +## Review/Fix Loop Integration + +This skill is typically used with `pr-review` in a loop: + +``` +┌─────────────────┐ +│ pr-review │ ← Generate review, post comments +└────────┬────────┘ + │ + ▼ +┌─────────────────┐ +│ pr-fix │ ← Fix comments, resolve threads +└────────┬────────┘ + │ + ▼ +┌─────────────────┐ +│ Check status │ ← Any threads unresolved? +└────────┬────────┘ + │ + ┌────┴────┐ + │ YES │ NO + ▼ ▼ + (loop) ✓ Done +``` + +## VS Code Agent Operations + +These operations **must** be done by the VS Code agent (not scripts): + +| Operation | Method | +|-----------|--------| +| Resolve thread | `gh api graphql` with `resolveReviewThread` mutation | +| Unresolve thread | `gh api graphql` with `unresolveReviewThread` mutation | + +### Batch Resolve All Threads + +```powershell +# Get all unresolved thread IDs and resolve them +$threads = gh api graphql -f query=' + query { + repository(owner: "microsoft", name: "PowerToys") { + pullRequest(number: {{PRNumber}}) { + reviewThreads(first: 100) { + nodes { id isResolved } + } + } + } + } +' --jq '.data.repository.pullRequest.reviewThreads.nodes[] | select(.isResolved == false) | .id' + +foreach ($threadId in $threads) { + gh api graphql -f query="mutation { resolveReviewThread(input: {threadId: `"$threadId`"}) { thread { isResolved } } }" +} +``` + +## Troubleshooting + +| Problem | Solution | +|---------|----------| +| "Cannot resolve thread" | Use VS Code agent, not Copilot CLI | +| Fix not applied | Check worktree is on correct branch | +| Thread ID not found | Re-fetch threads, ID may have changed | +| Fix pushed but thread unresolved | Must explicitly resolve via GraphQL | + +## Batch Processing Multiple PRs (CRITICAL) + +**DO NOT spawn separate terminals for each PR.** Use the dedicated scripts: + +```powershell +# Run fixes in parallel via orchestrator (single terminal) +.github/skills/pr-fix/scripts/Start-PRFixParallel.ps1 -PRNumbers 45256,45257,45285,45286 -CLIType copilot -MaxConcurrent 3 -Force + +# Resolve threads (VS Code agent) +.github/skills/pr-fix/scripts/Resolve-PRThreads.ps1 -PRNumber 45256 +``` + +## Dependencies + +| Skill | Used For | +|-------|----------| +| `parallel-job-orchestrator` | Parallel execution of fix jobs across multiple PRs | + +`Start-PRFixParallel.ps1` delegates all parallel execution to the shared orchestrator. +Do NOT introduce custom `ForEach-Object -Parallel`, `Start-Job`, or `Start-Process` +patterns — use the orchestrator instead. + +## Related Skills + +| Skill | Purpose | +|-------|--------| +| `pr-review` | Review PR, generate findings, post comments | +| `parallel-job-orchestrator` | Shared parallel execution engine | +| `issue-fix` | Fix issues and create PRs | +| `issue-to-pr-cycle` | Full orchestration | diff --git a/.github/skills/pr-fix/references/fix-pr-comments.prompt.md b/.github/skills/pr-fix/references/fix-pr-comments.prompt.md new file mode 100644 index 000000000000..4d7c67d98628 --- /dev/null +++ b/.github/skills/pr-fix/references/fix-pr-comments.prompt.md @@ -0,0 +1,70 @@ +--- +description: 'Fix active pull request comments with scoped changes' +name: 'fix-pr-active-comments' +agent: 'agent' +argument-hint: 'PR number or active PR URL' +--- + +# Fix Active PR Comments + +## Mission +Resolve active pull request comments by applying only simple fixes. For complex refactors, write a plan instead of changing code. + +## Scope & Preconditions +- You must have an active pull request context or a provided PR number. +- Only implement simple changes. Do not implement large refactors. +- If required context is missing, request it and stop. + +## Inputs +- Required: ${input:pr_number:PR number or URL} +- Optional: ${input:comment_scope:files or areas to focus on} +- Optional: ${input:fixing_guidelines:additional fixing guidelines from the user} + +## Workflow +1. Locate all active (unresolved) PR review comments for the given PR. +2. For each comment, classify the change scope: + - Simple change: limited edits, localized fix, low risk, no broad redesign. + - Large refactor: multi-file redesign, architecture change, or risky behavior change. +3. For each large refactor request: + - Do not modify code. + - Write a planning document to Generated Files/prReview/${input:pr_number}/fixPlan/. +4. For each simple change request: + - Implement the fix with minimal edits. + - Run quick checks if needed. + - Commit and push the change. +5. For comments that seem invalid, unclear, or not applicable (even if simple): + - Do not change code. + - Add the item to a summary table in Generated Files/prReview/${input:pr_number}/fixPlan/overview.md. + - Consult back to the end user in a friendly, polite tone. +6. Respond to each comment that you fixed: + - Reply in the active conversation. + - Use a polite or friendly tone. + - Keep the response under 200 words. + - Resolve the comment after replying. + +## Output Expectations +- Simple fixes: code changes committed and pushed. +- Large refactors: a plan file saved to Generated Files/prReview/${input:pr_number}/fixPlan/. +- Invalid or unclear comments: captured in Generated Files/prReview/${input:pr_number}/fixPlan/overview.md. +- Each fixed comment has a reply under 200 words and is resolved. + +## Plan File Template +Use this template for each large refactor item: + +# Fix Plan: <short title> + +## Context +- Comment link: +- Impacted areas: + +## Overview Table Template +Use this table in Generated Files/prReview/${input:pr_number}/fixPlan/overview.md: + +| Comment link | Summary | Reason not applied | Suggested follow-up | +| --- | --- | --- | --- | +| | | | | + +## Quality Assurance +- Verify plan file path exists. +- Ensure no code changes were made for large refactor items. +- Confirm replies are under 200 words and comments are resolved. diff --git a/.github/skills/pr-fix/references/mcp-config.json b/.github/skills/pr-fix/references/mcp-config.json new file mode 100644 index 000000000000..5af15d54218c --- /dev/null +++ b/.github/skills/pr-fix/references/mcp-config.json @@ -0,0 +1,9 @@ +{ + "mcpServers": { + "github-artifacts": { + "command": "cmd", + "args": ["/c", "for /f %i in ('git rev-parse --show-toplevel') do node %i/tools/mcp/github-artifacts/launch.js"], + "tools": ["*"] + } + } +} diff --git a/.github/skills/pr-fix/scripts/Get-UnresolvedThreads.ps1 b/.github/skills/pr-fix/scripts/Get-UnresolvedThreads.ps1 new file mode 100644 index 000000000000..53e7f868607f --- /dev/null +++ b/.github/skills/pr-fix/scripts/Get-UnresolvedThreads.ps1 @@ -0,0 +1,112 @@ +<# +.SYNOPSIS + Get unresolved review threads on a PR. + +.DESCRIPTION + Lists all unresolved review threads with their IDs, paths, and comment bodies. + This information is needed to resolve threads via GraphQL. + +.PARAMETER PRNumber + PR number to check. + +.PARAMETER JsonOutput + Output as JSON for programmatic use. + +.EXAMPLE + ./Get-UnresolvedThreads.ps1 -PRNumber 45286 + +.EXAMPLE + ./Get-UnresolvedThreads.ps1 -PRNumber 45286 -JsonOutput +#> + +[CmdletBinding()] +param( + [Parameter(Mandatory)] + [int]$PRNumber, + + [switch]$JsonOutput +) + +$scriptDir = Split-Path -Parent $MyInvocation.MyCommand.Path +. (Join-Path $scriptDir 'IssueReviewLib.ps1') + +try { + $query = @" +query { + repository(owner: "microsoft", name: "PowerToys") { + pullRequest(number: $PRNumber) { + reviewThreads(first: 100) { + nodes { + id + isResolved + path + line + comments(first: 1) { + nodes { + body + author { login } + createdAt + } + } + } + } + } + } +} +"@ + + $result = gh api graphql -f query=$query 2>$null | ConvertFrom-Json + + if (-not $result -or -not $result.data) { + throw "Failed to fetch PR threads" + } + + $threads = $result.data.repository.pullRequest.reviewThreads.nodes + $unresolvedThreads = $threads | Where-Object { -not $_.isResolved } + + if ($JsonOutput) { + $unresolvedThreads | ConvertTo-Json -Depth 5 + return + } + + if ($unresolvedThreads.Count -eq 0) { + Write-Host "✓ No unresolved threads on PR #$PRNumber" -ForegroundColor Green + return + } + + Write-Host "" + Write-Host "=== UNRESOLVED THREADS ON PR #$PRNumber ===" -ForegroundColor Cyan + Write-Host ("-" * 80) + + foreach ($thread in $unresolvedThreads) { + $comment = $thread.comments.nodes[0] + $preview = if ($comment.body.Length -gt 100) { + $comment.body.Substring(0, 100) + "..." + } else { + $comment.body + } + + Write-Host "" + Write-Host "Thread ID: " -NoNewline -ForegroundColor Yellow + Write-Host $thread.id + Write-Host "File: " -NoNewline -ForegroundColor Gray + Write-Host "$($thread.path):$($thread.line)" + Write-Host "Author: " -NoNewline -ForegroundColor Gray + Write-Host $comment.author.login + Write-Host "Comment: " -ForegroundColor Gray + Write-Host " $preview" + } + + Write-Host "" + Write-Host ("-" * 80) + Write-Host "Total unresolved: $($unresolvedThreads.Count)" -ForegroundColor Yellow + Write-Host "" + Write-Host "To resolve a thread:" -ForegroundColor Cyan + Write-Host ' gh api graphql -f query=''mutation { resolveReviewThread(input: {threadId: "THREAD_ID"}) { thread { isResolved } } }''' + + return $unresolvedThreads +} +catch { + Write-Host "Error: $($_.Exception.Message)" -ForegroundColor Red + exit 1 +} diff --git a/.github/skills/pr-fix/scripts/IssueReviewLib.ps1 b/.github/skills/pr-fix/scripts/IssueReviewLib.ps1 new file mode 100644 index 000000000000..b5fb8b9c754b --- /dev/null +++ b/.github/skills/pr-fix/scripts/IssueReviewLib.ps1 @@ -0,0 +1,18 @@ +# IssueReviewLib.ps1 - Minimal helpers for PR review workflow +# Part of the PowerToys GitHub Copilot/Claude Code issue review system +# This is a trimmed version - pr-review only needs console helpers and repo root + +#region Console Output Helpers +function Info { param([string]$Message) Write-Host $Message -ForegroundColor Cyan } +function Warn { param([string]$Message) Write-Host $Message -ForegroundColor Yellow } +function Err { param([string]$Message) Write-Host $Message -ForegroundColor Red } +function Success { param([string]$Message) Write-Host $Message -ForegroundColor Green } +#endregion + +#region Repository Helpers +function Get-RepoRoot { + $root = git rev-parse --show-toplevel 2>$null + if (-not $root) { throw 'Not inside a git repository.' } + return (Resolve-Path $root).Path +} +#endregion diff --git a/.github/skills/pr-fix/scripts/Resolve-PRThreads.ps1 b/.github/skills/pr-fix/scripts/Resolve-PRThreads.ps1 new file mode 100644 index 000000000000..84226b2e0c15 --- /dev/null +++ b/.github/skills/pr-fix/scripts/Resolve-PRThreads.ps1 @@ -0,0 +1,31 @@ +<# +.SYNOPSIS + Resolve all unresolved review threads for a PR. + +.PARAMETER PRNumber + PR number to resolve. +#> +[CmdletBinding()] +param( + [Parameter(Mandatory)] + [int]$PRNumber +) + +$repoRoot = Resolve-Path (Join-Path $PSScriptRoot '..\..\..\..') +Set-Location $repoRoot + +$query = 'query { repository(owner:"microsoft", name:"PowerToys") { pullRequest(number:' + $PRNumber + ') { reviewThreads(first:100) { nodes { id isResolved } } } } }' +$threads = gh api graphql -f query=$query --jq '.data.repository.pullRequest.reviewThreads.nodes[] | select(.isResolved==false) | .id' + +foreach ($threadId in $threads) { + $mutation = 'mutation { resolveReviewThread(input:{threadId:"' + $threadId + '"}) { thread { isResolved } } }' + gh api graphql -f query=$mutation | Out-Null +} + +$threadsAfter = gh api graphql -f query=$query --jq '.data.repository.pullRequest.reviewThreads.nodes[] | select(.isResolved==false) | .id' + +if ($threadsAfter) { + Write-Warning "Unresolved threads remain for PR #$PRNumber" +} else { + Write-Host "All threads resolved for PR #$PRNumber" +} \ No newline at end of file diff --git a/.github/skills/pr-fix/scripts/Start-PRFix.ps1 b/.github/skills/pr-fix/scripts/Start-PRFix.ps1 new file mode 100644 index 000000000000..0def6d27a031 --- /dev/null +++ b/.github/skills/pr-fix/scripts/Start-PRFix.ps1 @@ -0,0 +1,349 @@ +<# +.SYNOPSIS + Fix active PR review comments using AI CLI. + +.DESCRIPTION + Kicks off Copilot/Claude CLI to address active review comments on a PR. + Does NOT resolve threads - that must be done by VS Code agent via GraphQL. + +.PARAMETER PRNumber + PR number to fix. + +.PARAMETER CLIType + AI CLI to use: copilot or claude. Default: copilot. + +.PARAMETER Model + Copilot CLI model to use (e.g., gpt-5.2-codex). + +.PARAMETER WorktreePath + Path to the worktree containing the PR branch. Auto-detected if not specified. + +.PARAMETER DryRun + Show what would be done without executing. + +.PARAMETER Force + Skip confirmation prompts. + +.EXAMPLE + ./Start-PRFix.ps1 -PRNumber 45286 -CLIType copilot -Force + +.NOTES + After this script completes, use VS Code agent to resolve threads via GraphQL. +#> + +# NOTE: Do NOT use [CmdletBinding()], [Parameter(Mandatory)], or [ValidateSet()] +# here. These make the script "advanced" which propagates ErrorActionPreference +# through PS7's plumbing and can silently crash the orchestrator's monitoring loop. +param( + [int]$PRNumber, + + [string]$CLIType = 'copilot', + + [string]$Model, + + [string]$WorktreePath, + + [switch]$DryRun, + + [switch]$Force, + + [switch]$Help +) + +$ErrorActionPreference = 'Stop' + +# Manual validation +if (-not $PRNumber -or $PRNumber -eq 0) { + Write-Error 'Start-PRFix: -PRNumber is required.' + return +} +if ($CLIType -notin 'copilot', 'claude') { + Write-Error "Start-PRFix: Invalid -CLIType '$CLIType'. Must be 'copilot' or 'claude'." + return +} + +$scriptDir = Split-Path -Parent $MyInvocation.MyCommand.Path +. (Join-Path $scriptDir 'IssueReviewLib.ps1') + +$repoRoot = Get-RepoRoot + +# Resolve config directory name (.github or .claude) from script location +$_cfgDir = if ($PSScriptRoot -match '[\\/](\.github|\.claude)[\\/]') { $Matches[1] } else { '.github' } +$worktreeLib = Join-Path $repoRoot 'tools/build/WorktreeLib.ps1' +if (Test-Path $worktreeLib) { + . $worktreeLib +} + +if ($Help) { + Get-Help $MyInvocation.MyCommand.Path -Full + return +} + +function Get-PRBranch { + param([int]$PRNumber) + + $prInfo = gh pr view $PRNumber --json headRefName 2>$null | ConvertFrom-Json + if ($prInfo) { + return $prInfo.headRefName + } + return $null +} + +function Find-WorktreeForPR { + param([int]$PRNumber) + + $branch = Get-PRBranch -PRNumber $PRNumber + if (-not $branch) { + return $null + } + + $worktrees = Get-WorktreeEntries + $wt = $worktrees | Where-Object { $_.Branch -eq $branch } | Select-Object -First 1 + + if ($wt) { + return $wt.Path + } + + # If no dedicated worktree, check if we're on that branch in main repo + Push-Location $repoRoot + try { + $currentBranch = git branch --show-current 2>$null + if ($currentBranch -eq $branch) { + return $repoRoot + } + } + finally { + Pop-Location + } + + return $null +} + +function Get-ActiveComments { + param([int]$PRNumber) + + try { + $comments = gh api "repos/microsoft/PowerToys/pulls/$PRNumber/comments" 2>$null | ConvertFrom-Json + # Filter to root comments (not replies) + $rootComments = $comments | Where-Object { $null -eq $_.in_reply_to_id } + return $rootComments + } + catch { + return @() + } +} + +function Get-UnresolvedThreadCount { + param([int]$PRNumber) + + try { + $result = gh api graphql -f query="query { repository(owner: `"microsoft`", name: `"PowerToys`") { pullRequest(number: $PRNumber) { reviewThreads(first: 100) { nodes { isResolved } } } } }" 2>$null | ConvertFrom-Json + $threads = $result.data.repository.pullRequest.reviewThreads.nodes + $unresolved = $threads | Where-Object { -not $_.isResolved } + return @($unresolved).Count + } + catch { + return 0 + } +} + +#region Main +try { + Info "=" * 60 + Info "PR FIX - PR #$PRNumber" + Info "=" * 60 + + # Get PR info + $prInfo = gh pr view $PRNumber --json state,headRefName,url 2>$null | ConvertFrom-Json + if (-not $prInfo) { + throw "PR #$PRNumber not found" + } + + if ($prInfo.state -ne 'OPEN') { + Warn "PR #$PRNumber is $($prInfo.state), not OPEN" + return + } + + Info "PR URL: $($prInfo.url)" + Info "Branch: $($prInfo.headRefName)" + Info "CLI: $CLIType" + + # Find worktree + if (-not $WorktreePath) { + $WorktreePath = Find-WorktreeForPR -PRNumber $PRNumber + } + + if (-not $WorktreePath -or -not (Test-Path $WorktreePath)) { + Warn "No worktree found for PR #$PRNumber" + Warn "Using main repo root. Make sure the PR branch is checked out." + $WorktreePath = $repoRoot + } + + Info "Working directory: $WorktreePath" + + # Check for active comments + $comments = Get-ActiveComments -PRNumber $PRNumber + $unresolvedCount = Get-UnresolvedThreadCount -PRNumber $PRNumber + + Info "" + Info "Active review comments: $($comments.Count)" + Info "Unresolved threads: $unresolvedCount" + + if ($comments.Count -eq 0 -and $unresolvedCount -eq 0) { + Success "No active comments or unresolved threads to fix!" + return @{ PRNumber = $PRNumber; Status = 'NothingToFix' } + } + + if ($DryRun) { + Info "" + Warn "[DRY RUN] Would run AI CLI to fix comments" + Info "Comments to address:" + foreach ($c in $comments | Select-Object -First 5) { + Info " - $($c.path):$($c.line) - $($c.body.Substring(0, [Math]::Min(80, $c.body.Length)))..." + } + return @{ PRNumber = $PRNumber; Status = 'DryRun' } + } + + # Confirm + if (-not $Force) { + $confirm = Read-Host "Fix $($comments.Count) comments on PR #$PRNumber? (y/N)" + if ($confirm -notmatch '^[yY]') { + Info "Cancelled." + return + } + } + + # Build prompt + $prompt = @" +You are fixing review comments on PR #$PRNumber. + +Read the active review comments using GitHub tools and address each one: +1. Fetch the PR review comments +2. For each comment, understand what change is requested +3. Make the code changes to address the feedback +4. Build and verify your changes work + +Focus on the reviewer's feedback and make targeted fixes. +"@ + + # Ensure config dirs exist in worktree (agents, skills, instructions, prompts, top-level md) + # These aren't on the PR branch so the CLI can't find them without this. + if ($WorktreePath -ne $repoRoot) { + $sourceCfg = Join-Path $repoRoot $_cfgDir + $destCfg = Join-Path $WorktreePath $_cfgDir + if (Test-Path $sourceCfg) { + if (-not (Test-Path $destCfg)) { + New-Item -ItemType Directory -Path $destCfg -Force | Out-Null + } + foreach ($sub in @('agents', 'skills', 'instructions', 'prompts')) { + $src = Join-Path $sourceCfg $sub + $dst = Join-Path $destCfg $sub + if ((Test-Path $src) -and -not (Test-Path $dst)) { + Copy-Item -Path $src -Destination $dst -Recurse -Force + Info "Copied $_cfgDir/$sub to worktree" + } + } + foreach ($mdFile in @('copilot-instructions.md', 'CLAUDE.md')) { + $src = Join-Path $sourceCfg $mdFile + $dst = Join-Path $destCfg $mdFile + if ((Test-Path $src) -and -not (Test-Path $dst)) { + Copy-Item -Path $src -Destination $dst -Force + Info "Copied $_cfgDir/$mdFile to worktree" + } + } + } + } + + # MCP config + $mcpConfig = "@$_cfgDir/skills/pr-fix/references/mcp-config.json" + + Info "" + Info "Starting AI fix..." + + Push-Location $WorktreePath + try { + switch ($CLIType) { + 'copilot' { + $copilotArgs = @('--additional-mcp-config', $mcpConfig, '-p', $prompt, '--yolo', '--agent', 'FixPR') + if ($Model) { + $copilotArgs += @('--model', $Model) + } + $output = & copilot @copilotArgs 2>&1 + # Log output + $logPath = Join-Path $repoRoot "Generated Files/prReview/$PRNumber" + if (-not (Test-Path $logPath)) { + New-Item -ItemType Directory -Path $logPath -Force | Out-Null + } + $output | Out-File -FilePath (Join-Path $logPath "_fix.log") -Force + } + 'claude' { + $output = & claude --print --dangerously-skip-permissions --agent FixPR --prompt $prompt 2>&1 + $logPath = Join-Path $repoRoot "Generated Files/prReview/$PRNumber" + if (-not (Test-Path $logPath)) { + New-Item -ItemType Directory -Path $logPath -Force | Out-Null + } + $output | Out-File -FilePath (Join-Path $logPath "_fix.log") -Force + } + } + } + finally { + Pop-Location + } + + # Check results + $newUnresolvedCount = Get-UnresolvedThreadCount -PRNumber $PRNumber + + Info "" + Info "Fix complete." + Info "Unresolved threads before: $unresolvedCount" + Info "Unresolved threads after: $newUnresolvedCount" + + if ($newUnresolvedCount -gt 0) { + Warn "" + Warn "⚠️ $newUnresolvedCount threads still unresolved." + Warn "Use VS Code agent to resolve them via GraphQL:" + Warn " gh api graphql -f query='mutation { resolveReviewThread(input: {threadId: \"THREAD_ID\"}) { thread { isResolved } } }'" + } + else { + Success "✓ All threads resolved!" + } + + # Write signal file + $signalDir = Join-Path $repoRoot "Generated Files/prFix/$PRNumber" + if (-not (Test-Path $signalDir)) { New-Item -ItemType Directory -Path $signalDir -Force | Out-Null } + @{ + status = if ($newUnresolvedCount -eq 0) { "success" } else { "partial" } + prNumber = $PRNumber + timestamp = (Get-Date).ToString("o") + unresolvedBefore = $unresolvedCount + unresolvedAfter = $newUnresolvedCount + } | ConvertTo-Json | Set-Content "$signalDir/.signal" -Force + + return @{ + PRNumber = $PRNumber + Status = 'FixApplied' + UnresolvedBefore = $unresolvedCount + UnresolvedAfter = $newUnresolvedCount + } +} +catch { + Err "Error: $($_.Exception.Message)" + + # Write failure signal + $signalDir = Join-Path $repoRoot "Generated Files/prFix/$PRNumber" + if (-not (Test-Path $signalDir)) { New-Item -ItemType Directory -Path $signalDir -Force | Out-Null } + @{ + status = "failure" + prNumber = $PRNumber + timestamp = (Get-Date).ToString("o") + error = $_.Exception.Message + } | ConvertTo-Json | Set-Content "$signalDir/.signal" -Force + + return @{ + PRNumber = $PRNumber + Status = 'FixFailed' + Error = $_.Exception.Message + } +} +#endregion + diff --git a/.github/skills/pr-fix/scripts/Start-PRFixParallel.ps1 b/.github/skills/pr-fix/scripts/Start-PRFixParallel.ps1 new file mode 100644 index 000000000000..30f0aa0267b9 --- /dev/null +++ b/.github/skills/pr-fix/scripts/Start-PRFixParallel.ps1 @@ -0,0 +1,165 @@ +<# +.SYNOPSIS + Run pr-fix in parallel via the parallel-job-orchestrator skill. + +.DESCRIPTION + Builds one job definition per PR and delegates to the shared + parallel-job-orchestrator. Each job invokes Start-PRFix.ps1 for a + single PR in its worktree. + + DO NOT add [CmdletBinding()], [Parameter(Mandatory)], or [ValidateSet()] + here — those attributes make the script "advanced" which propagates + ErrorActionPreference and can crash the orchestrator's monitoring loop. + +.PARAMETER PRNumbers + PR numbers to fix (required). + +.PARAMETER MaxConcurrent + Maximum parallel fix jobs. Default: 3. + +.PARAMETER CLIType + AI CLI type: copilot or claude. Default: copilot. + +.PARAMETER Model + Copilot CLI model to use (e.g., gpt-5.2-codex). + +.PARAMETER InactivityTimeoutSeconds + Kill job if log doesn't grow for this many seconds. Default: 120. + +.PARAMETER MaxRetryCount + Retry attempts after inactivity kill. Default: 2. + +.PARAMETER Force + Skip confirmation prompts in Start-PRFix.ps1. + +.EXAMPLE + ./Start-PRFixParallel.ps1 -PRNumbers 45286, 45287, 45288 -MaxConcurrent 4 +#> +param( + [int[]]$PRNumbers, + + [int]$MaxConcurrent = 3, + + [string]$CLIType = 'copilot', + + [string]$Model, + + [int]$InactivityTimeoutSeconds = 120, + + [int]$MaxRetryCount = 2, + + [switch]$Force +) + +$ErrorActionPreference = 'Stop' + +# Manual validation +if (-not $PRNumbers -or $PRNumbers.Count -eq 0) { + Write-Error 'Start-PRFixParallel: -PRNumbers is required.' + return +} +if ($CLIType -notin 'copilot', 'claude') { + Write-Error "Start-PRFixParallel: Invalid -CLIType '$CLIType'. Must be 'copilot' or 'claude'." + return +} + +$scriptDir = Split-Path -Parent $MyInvocation.MyCommand.Path +$repoRoot = Resolve-Path (Join-Path $scriptDir '..\..\..\..') +$fixScript = Join-Path $scriptDir 'Start-PRFix.ps1' +$orchPath = Join-Path $scriptDir '..\..\parallel-job-orchestrator\scripts\Invoke-SimpleJobOrchestrator.ps1' + +if (-not (Test-Path $fixScript)) { + Write-Error "Start-PRFix.ps1 not found: $fixScript" + return +} +if (-not (Test-Path $orchPath)) { + Write-Error "Orchestrator not found: $orchPath" + return +} + +# Output root for logs +$outputRoot = Join-Path $repoRoot 'Generated Files' 'prFix' +if (-not (Test-Path $outputRoot)) { + New-Item -ItemType Directory -Path $outputRoot -Force | Out-Null +} + +# Build job definitions +$jobDefs = @(foreach ($pr in $PRNumbers) { + # Resolve worktree for this PR + $branch = $null + try { $branch = (gh pr view $pr --json headRefName -q .headRefName 2>$null) } catch { } + + $worktree = $null + if ($branch) { + $wtLine = git worktree list 2>$null | Select-String $branch | Select-Object -First 1 + if ($wtLine) { $worktree = ($wtLine -split '\s+')[0] } + } + + if (-not $worktree) { + Write-Host "[pr-$pr] No worktree found for branch '$branch' — using repo root" -ForegroundColor Yellow + $worktree = $repoRoot + } + + $prOutputDir = Join-Path $outputRoot "$pr" + New-Item -ItemType Directory -Path $prOutputDir -Force | Out-Null + $logFile = Join-Path $prOutputDir "_fix.log" + + # Build the command arguments for Start-PRFix.ps1 + $fixArgs = @( + '-File', $fixScript, + '-PRNumber', $pr, + '-CLIType', $CLIType, + '-WorktreePath', $worktree, + '-Force' + ) + if ($Model) { $fixArgs += @('-Model', $Model) } + + @{ + Label = "fix-pr-$pr" + ExecutionParameters = @{ + JobName = "fix-pr-$pr" + Command = 'pwsh' + Arguments = $fixArgs + WorkingDir = [string]$worktree + OutputDir = $prOutputDir + LogPath = $logFile + } + MonitorFiles = @($logFile) + CleanupTask = $null + } +}) + +Write-Host "`nBuilt $($jobDefs.Count) fix job(s):" -ForegroundColor Cyan +$jobDefs | ForEach-Object { Write-Host " $($_.Label)" -ForegroundColor Gray } + +# Run via orchestrator +$savedEAP = $ErrorActionPreference +$ErrorActionPreference = 'Continue' + +$results = & $orchPath ` + -JobDefinitions $jobDefs ` + -MaxConcurrent $MaxConcurrent ` + -InactivityTimeoutSeconds $InactivityTimeoutSeconds ` + -MaxRetryCount $MaxRetryCount ` + -PollIntervalSeconds 5 ` + -LogDir $outputRoot + +$ErrorActionPreference = $savedEAP + +# Summary +$succeeded = @($results | Where-Object { $_.Status -eq 'Completed' }) +$failed = @($results | Where-Object { $_.Status -ne 'Completed' }) + +Write-Host "`n$("=" * 60)" -ForegroundColor Cyan +Write-Host "PR FIX PARALLEL COMPLETE" -ForegroundColor Cyan +Write-Host ("=" * 60) -ForegroundColor Cyan +Write-Host "Total: $($results.Count)" +Write-Host "Succeeded: $($succeeded.Count)" -ForegroundColor Green +if ($failed.Count -gt 0) { + Write-Host "Failed: $($failed.Count)" -ForegroundColor Red + foreach ($r in $failed) { Write-Host " $($r.Label) — $($r.Status)" -ForegroundColor Red } +} + +$results | Format-Table Label, Status, JobState, ExitCode, RetryCount -AutoSize + +return $results diff --git a/.github/skills/pr-review/LICENSE.txt b/.github/skills/pr-review/LICENSE.txt new file mode 100644 index 000000000000..22aed37e650b --- /dev/null +++ b/.github/skills/pr-review/LICENSE.txt @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) Microsoft Corporation. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/.github/skills/pr-review/SKILL.md b/.github/skills/pr-review/SKILL.md new file mode 100644 index 000000000000..fe40b987c3d2 --- /dev/null +++ b/.github/skills/pr-review/SKILL.md @@ -0,0 +1,114 @@ +--- +name: pr-review +description: Comprehensive pull request review with multi-step analysis and comment posting. Use when asked to review a PR, analyze pull request changes, check PR for issues, post review comments, validate PR quality, run code review on a PR, or audit pull request. Generates 13 review step files covering functionality, security, performance, accessibility, and more. For FIXING PR comments, use the pr-fix skill instead. +license: Complete terms in LICENSE.txt +--- + +# PR Review Skill + +**Review** PRs only. To **fix** review comments, use `pr-fix`. + +## What to Do + +Run the review script with the PR number(s): + +```powershell +.github/skills/pr-review/scripts/Start-PRReviewWorkflow.ps1 -PRNumbers <N> +``` + +The script spawns Copilot CLI, which follows [review-pr.prompt.md](./references/review-pr.prompt.md) to execute 13 review steps and write results to `Generated Files/prReview/<N>/`. + +### Options + +| Parameter | Description | Default | +|-----------|-------------|---------| +| `-PRNumbers` | PR number(s) **(required)** | — | +| `-CLIType` | `copilot` or `claude` | `copilot` | +| `-Model` | Model override | (default) | +| `-MinSeverity` | Min severity to post: `high` / `medium` / `low` / `info` | `medium` | +| `-MaxConcurrent` | Max parallel review jobs (via orchestrator) | `4` | +| `-InactivityTimeoutSeconds` | Kill CLI if log doesn't grow | `60` | +| `-MaxRetryCount` | Retry attempts after inactivity kill | `3` | +| `-OutputRoot` | Review output root folder | `Generated Files/prReview` | +| `-LogPath` | Workflow log file path | `Start-PRReviewWorkflow.log` | +| `-Force` | Re-review PRs that already have output | `false` | +| `-DryRun` | Preview without executing | `false` | + +Completed reviews are auto-skipped. Use `-Force` to redo. + +### If You ARE the Reviewer + +When running inside Copilot CLI (i.e. you were spawned by the script), follow [review-pr.prompt.md](./references/review-pr.prompt.md) directly. It tells you: + +1. Fetch PR data with `gh` +2. Execute each step by loading its prompt file on-demand +3. Write each step's output to `Generated Files/prReview/<N>/XX-name.md` +4. Update `.signal` after every step +5. Generate `00-OVERVIEW.md` after all steps + +Each step prompt also has `## External references (MUST research)` — fetch those URLs and include a `## References consulted` section citing specific violation IDs (WCAG 1.4.3, OWASP A03, etc.). + +### Step Prompts (loaded on-demand) + +| Step | Prompt | Focus | +|------|--------|-------| +| 01 | [Functionality](./references/01-functionality.prompt.md) | Correctness, edge cases | +| 02 | [Compatibility](./references/02-compatibility.prompt.md) | Breaking changes, versioning | +| 03 | [Performance](./references/03-performance.prompt.md) | Perf implications, async | +| 04 | [Accessibility](./references/04-accessibility.prompt.md) | WCAG 2.1, a11y | +| 05 | [Security](./references/05-security.prompt.md) | OWASP, CWE, SDL | +| 06 | [Localization](./references/06-localization.prompt.md) | L10n readiness | +| 07 | [Globalization](./references/07-globalization.prompt.md) | BiDi, ICU, date/time | +| 08 | [Extensibility](./references/08-extensibility.prompt.md) | Plugin API, SemVer | +| 09 | [SOLID Design](./references/09-solid-design.prompt.md) | Design principles | +| 10 | [Repo Patterns](./references/10-repo-patterns.prompt.md) | PowerToys conventions | +| 11 | [Docs & Automation](./references/11-docs-automation.prompt.md) | Documentation | +| 12 | [Code Comments](./references/12-code-comments.prompt.md) | Comment quality | +| 13 | [Copilot Guidance](./references/13-copilot-guidance.prompt.md) | Agent/prompt files | + +## Scripts + +| Script | Purpose | +|--------|---------| +| [Start-PRReviewWorkflow.ps1](./scripts/Start-PRReviewWorkflow.ps1) | Orchestrator — run this | +| [Post-ReviewComments.ps1](./scripts/Post-ReviewComments.ps1) | Post comments to GitHub | +| [Get-GitHubPrFilePatch.ps1](./scripts/Get-GitHubPrFilePatch.ps1) | Fetch PR file diffs | +| [Get-GitHubRawFile.ps1](./scripts/Get-GitHubRawFile.ps1) | Download repo files at a ref | +| [Get-PrIncrementalChanges.ps1](./scripts/Get-PrIncrementalChanges.ps1) | Detect changes since last review | +| [Test-IncrementalReview.ps1](./scripts/Test-IncrementalReview.ps1) | Preview incremental detection | + +## Execution & Monitoring Rules + +Batch reviews take **5–30 minutes** depending on PR count and complexity. The agent MUST: + +1. **Launch as a detached process** for batch runs (>2 PRs) — VS Code terminal idle detection kills background processes. Use `Start-Process -WindowStyle Hidden` with `Tee-Object` to a log file. +2. **Poll the orchestrator log every 60–120 seconds** until all jobs report `Completed`, `Failed`, or `Abandoned`. +3. **Do NOT exit or ask the user to check back** — keep monitoring until the orchestrator finishes. +4. **On process death**, check the orchestrator log, clean up partial output, and relaunch automatically. +5. **Report final results** with a table showing per-PR status, exit codes, and retry counts. + +## Post-Execution Review + +After each run, quickly validate quality and update guidance when needed: + +1. Confirm outputs exist under the configured `-OutputRoot` for each PR. +2. Spot-check `00-OVERVIEW.md` and 2-3 step files for correctness and completeness. +3. If repeated gaps are found, refine the relevant prompt in [references](./references). +4. If behavior changed, update this file’s Options/Workflow docs in the same change. +5. Record concrete examples of failures to prevent repeating ambiguous guidance. + +## Dependencies + +This skill depends on the **parallel-job-orchestrator** skill for batch execution. +The runner script (`Invoke-PRReviewSimpleRunner.ps1`) builds job definitions and +delegates to `parallel-job-orchestrator/scripts/Invoke-SimpleJobOrchestrator.ps1` +for queuing, monitoring, retry, and cleanup. Do NOT use `Start-Job`, +`ForEach-Object -Parallel`, or `Start-Process` directly. + +## Related Skills + +| Skill | Purpose | +|-------|---------| +| `parallel-job-orchestrator` | Parallel execution engine (REQUIRED for batch runs) | +| `pr-fix` | Fix review comments after this skill identifies issues | +| `issue-to-pr-cycle` | Full orchestration (review → fix loop) | diff --git a/.github/skills/pr-review/references/01-functionality.prompt.md b/.github/skills/pr-review/references/01-functionality.prompt.md new file mode 100644 index 000000000000..2b663caa4542 --- /dev/null +++ b/.github/skills/pr-review/references/01-functionality.prompt.md @@ -0,0 +1,78 @@ +# Step 01: Functionality Review + +**Goal**: Verify the PR's code changes correctly implement the intended functionality without introducing regressions. + +## Output file +`Generated Files/prReview/{{pr_number}}/01-functionality.md` + +## Checks to execute + +### Core functionality +- [ ] Does the code do what the PR description/linked issue claims? +- [ ] Are all acceptance criteria from the linked issue addressed? +- [ ] Do new features work correctly in both enabled and disabled states? +- [ ] Are feature flags/settings properly respected? + +### Logic correctness +- [ ] Are conditional branches handling all expected cases? +- [ ] Are loops terminating correctly (no infinite loops, off-by-one errors)? +- [ ] Are null/empty checks in place where needed? +- [ ] Are error conditions handled gracefully? +- [ ] Are edge cases considered (empty input, max values, boundary conditions)? + +### State management +- [ ] Is state properly initialized before use? +- [ ] Is state cleaned up appropriately (disposal, event unsubscribe)? +- [ ] Are race conditions possible with shared state? +- [ ] Is state persisted/loaded correctly for settings? + +### Integration points +- [ ] Do changes integrate correctly with existing code paths? +- [ ] Are dependencies properly injected/resolved? +- [ ] Do IPC/inter-process communications work correctly? +- [ ] Are module enable/disable transitions handled? + +### PowerToys-specific checks +- [ ] Does the module interface contract remain intact? +- [ ] Are hotkey registration and unregistration balanced? +- [ ] Does the feature work correctly with Runner lifecycle? +- [ ] Are Settings UI changes reflected in the module behavior? + +## File template +```md +# Functionality Review +**PR:** {{pr_number}} — Base:{{baseRefName}} Head:{{headRefName}} +**Review iteration:** {{iteration}} + +## Iteration history +### Iteration {{iteration}} +- <Key finding 1> +- <Key finding 2> + +## Checks executed +- <List specific checks performed> + +## Findings +(If none, write **None**. Otherwise use mcp-review-comment blocks:) + +```mcp-review-comment +{"file":"path/to/file.cs","start_line":123,"end_line":125,"severity":"high|medium|low|info","tags":["functionality","pr-{{pr_number}}"],"body":"Problem → Why it matters → Concrete fix."} +``` +``` + +## Severity guidelines +- **High**: Code doesn't work as intended, crashes, data loss possible +- **Medium**: Partial functionality, edge cases broken, degraded experience +- **Low**: Minor issues, cosmetic problems, suboptimal but working +- **Info**: Suggestions for improvement, not blocking + +## External references (MUST research) +Before completing this step, fetch and check the PR against these authoritative sources: + +| Reference | URL | Check for | +| --- | --- | --- | +| C# Design Guidelines | https://docs.microsoft.com/en-us/dotnet/csharp/fundamentals/coding-style/coding-conventions | Coding conventions violations | +| .NET API Design | https://docs.microsoft.com/en-us/dotnet/standard/design-guidelines/ | API design issues | +| PowerToys Module Interface | `doc/devdocs/modules/interface.md` | Module contract violations | + +**Enforcement**: In the output file, include a `## References consulted` section listing which guidelines were checked and any violations found. diff --git a/.github/skills/pr-review/references/02-compatibility.prompt.md b/.github/skills/pr-review/references/02-compatibility.prompt.md new file mode 100644 index 000000000000..5d9a14a61207 --- /dev/null +++ b/.github/skills/pr-review/references/02-compatibility.prompt.md @@ -0,0 +1,89 @@ +# Step 02: Compatibility Review + +**Goal**: Ensure changes maintain compatibility with supported Windows versions, architectures, and don't introduce breaking changes. + +## Output file +`Generated Files/prReview/{{pr_number}}/02-compatibility.md` + +## Checks to execute + +### Windows version compatibility +- [ ] Are Win32 APIs available on all supported Windows versions (10 1803+)? +- [ ] Are any APIs marked as Windows 11 only used conditionally? +- [ ] Are version checks in place for newer APIs? +- [ ] Are manifest compatibility settings correct? + +### Architecture compatibility +- [ ] Does code work on both x64 and ARM64? +- [ ] Are pointer sizes handled correctly (IntPtr vs int)? +- [ ] Are P/Invoke signatures correct for both architectures? +- [ ] Are any architecture-specific paths handled? + +### .NET compatibility +- [ ] Are target frameworks consistent across projects? +- [ ] Are nullable reference types handled correctly? +- [ ] Are any APIs deprecated in target .NET version? +- [ ] Do AOT-compiled components avoid reflection issues? + +### Breaking changes +- [ ] Are settings schema changes backward compatible? +- [ ] Are IPC message formats versioned/compatible? +- [ ] Are file format changes backward compatible? +- [ ] Are public API signatures preserved? +- [ ] Are GPO policy keys unchanged or properly migrated? + +### Dependency compatibility +- [ ] Are NuGet package versions compatible? +- [ ] Are native DLL dependencies available on all targets? +- [ ] Are any dependencies deprecated or end-of-life? +- [ ] Do WinUI/WPF versions match project requirements? + +### Interoperability +- [ ] Are COM interfaces properly defined? +- [ ] Are shell extensions compatible with Explorer versions? +- [ ] Are context menu handlers working on Win10 and Win11? +- [ ] Are clipboard/drag-drop formats standard? + +## PowerToys-specific checks +- [ ] Is the module interface version compatible? +- [ ] Do settings migrations handle all previous versions? +- [ ] Are hotkey codes platform-independent? +- [ ] Does the installer handle upgrades correctly? + +## File template +```md +# Compatibility Review +**PR:** {{pr_number}} — Base:{{baseRefName}} Head:{{headRefName}} +**Review iteration:** {{iteration}} + +## Iteration history +### Iteration {{iteration}} +- <Key finding 1> +- <Key finding 2> + +## Checks executed +- <List specific checks performed> + +## Findings +```mcp-review-comment +{"file":"path/to/file.cs","start_line":123,"end_line":125,"severity":"high|medium|low|info","tags":["compatibility","pr-{{pr_number}}"],"body":"Problem → Why it matters → Concrete fix."} +``` +``` + +## Severity guidelines +- **High**: Breaks on supported Windows version, crashes on ARM64, data migration failure +- **Medium**: Degraded functionality on some platforms, deprecated API usage +- **Low**: Minor compatibility warnings, future deprecation concerns +- **Info**: Suggestions for broader compatibility + +## External references (MUST research) +Before completing this step, **fetch and analyze** these authoritative sources: + +| Reference | URL | Check for | +| --- | --- | --- | +| Windows Version Info | https://docs.microsoft.com/en-us/windows/release-health/supported-versions-windows-client | Supported version requirements | +| .NET Breaking Changes | https://docs.microsoft.com/en-us/dotnet/core/compatibility/ | Breaking change patterns | +| Win32 API Availability | https://docs.microsoft.com/en-us/windows/win32/apiindex/windows-api-list | API version requirements | +| WinAppSDK Release Notes | https://docs.microsoft.com/en-us/windows/apps/windows-app-sdk/stable-channel | SDK compatibility notes | + +**Enforcement**: Include `## References consulted` section listing checked guidelines and violations found. diff --git a/.github/skills/pr-review/references/03-performance.prompt.md b/.github/skills/pr-review/references/03-performance.prompt.md new file mode 100644 index 000000000000..d2b5894d5b05 --- /dev/null +++ b/.github/skills/pr-review/references/03-performance.prompt.md @@ -0,0 +1,93 @@ +# Step 03: Performance Review + +**Goal**: Identify performance regressions, inefficiencies, and resource management issues. + +## Output file +`Generated Files/prReview/{{pr_number}}/03-performance.md` + +## Checks to execute + +### CPU efficiency +- [ ] Are there unnecessary loops or repeated calculations? +- [ ] Are LINQ queries efficient (avoiding multiple enumerations)? +- [ ] Are regular expressions compiled if used frequently? +- [ ] Are string operations using StringBuilder for concatenation? +- [ ] Are hot paths optimized (avoid logging, allocations)? + +### Memory management +- [ ] Are IDisposable objects properly disposed? +- [ ] Are event handlers unsubscribed to prevent leaks? +- [ ] Are large objects pooled or reused where appropriate? +- [ ] Are caches bounded to prevent unbounded growth? +- [ ] Are WeakReferences used for optional caches? + +### Async/threading +- [ ] Are async methods truly asynchronous (not blocking)? +- [ ] Is ConfigureAwait(false) used in library code? +- [ ] Are locks held for minimal duration? +- [ ] Are thread-safe collections used for shared data? +- [ ] Are cancellation tokens propagated correctly? + +### I/O efficiency +- [ ] Are file operations buffered appropriately? +- [ ] Are network calls batched where possible? +- [ ] Is file watching efficient (not polling)? +- [ ] Are settings read/written efficiently (not on every keystroke)? + +### UI responsiveness +- [ ] Are long operations off the UI thread? +- [ ] Is virtualization used for large lists? +- [ ] Are images loaded asynchronously? +- [ ] Are animations smooth (60fps target)? +- [ ] Is UI updated efficiently (batch updates, not per-item)? + +### Startup performance +- [ ] Are modules lazy-loaded where possible? +- [ ] Is initialization parallelized where safe? +- [ ] Are expensive operations deferred until needed? +- [ ] Is the critical path to first interaction minimized? + +## PowerToys-specific checks +- [ ] Does the module minimize CPU when idle (no busy loops)? +- [ ] Are global hooks efficient (minimal processing in callback)? +- [ ] Are IPC messages batched/throttled appropriately? +- [ ] Does the module release resources when disabled? +- [ ] Are thumbnail/preview generations cached? + +## File template +```md +# Performance Review +**PR:** {{pr_number}} — Base:{{baseRefName}} Head:{{headRefName}} +**Review iteration:** {{iteration}} + +## Iteration history +### Iteration {{iteration}} +- <Key finding 1> +- <Key finding 2> + +## Checks executed +- <List specific checks performed> + +## Findings +```mcp-review-comment +{"file":"path/to/file.cs","start_line":123,"end_line":125,"severity":"high|medium|low|info","tags":["performance","pr-{{pr_number}}"],"body":"Problem → Impact estimate → Concrete fix."} +``` +``` + +## Severity guidelines +- **High**: Significant CPU/memory regression, UI freezes, memory leaks +- **Medium**: Noticeable slowdown, inefficient algorithm, unbounded growth +- **Low**: Minor inefficiency, premature optimization opportunity +- **Info**: Performance improvement suggestions + +## External references (MUST research) +Before completing this step, **fetch and analyze** these authoritative sources: + +| Reference | URL | Check for | +| --- | --- | --- | +| .NET Performance Tips | https://docs.microsoft.com/en-us/dotnet/framework/performance/performance-tips | Anti-pattern violations | +| Async Best Practices | https://docs.microsoft.com/en-us/archive/msdn-magazine/2013/march/async-await-best-practices-in-asynchronous-programming | Async/await issues | +| Memory Management | https://docs.microsoft.com/en-us/dotnet/standard/garbage-collection/fundamentals | GC pressure patterns | +| WPF Performance | https://docs.microsoft.com/en-us/dotnet/desktop/wpf/advanced/optimizing-performance | UI virtualization, binding | + +**Enforcement**: Include `## References consulted` section listing checked guidelines and violations found. diff --git a/.github/skills/pr-review/references/04-accessibility.prompt.md b/.github/skills/pr-review/references/04-accessibility.prompt.md new file mode 100644 index 000000000000..4a62dd5f2e53 --- /dev/null +++ b/.github/skills/pr-review/references/04-accessibility.prompt.md @@ -0,0 +1,97 @@ +# Step 04: Accessibility Review + +**Goal**: Ensure UI changes are accessible to users with disabilities, following WCAG guidelines and Windows accessibility standards. + +## Output file +`Generated Files/prReview/{{pr_number}}/04-accessibility.md` + +## Checks to execute + +### Screen reader support +- [ ] Do all interactive elements have accessible names (AutomationProperties.Name)? +- [ ] Are images with meaning given alt text (AutomationProperties.Name)? +- [ ] Are decorative images marked as such (AutomationProperties.AccessibilityView="Raw")? +- [ ] Is live region support used for dynamic content updates? +- [ ] Are landmarks/headings used for navigation structure? + +### Keyboard navigation +- [ ] Can all functionality be accessed via keyboard alone? +- [ ] Is tab order logical and complete? +- [ ] Are custom controls keyboard accessible? +- [ ] Are keyboard shortcuts documented and non-conflicting? +- [ ] Is focus visible and properly managed? +- [ ] Are focus traps avoided (dialogs excepted)? + +### Color and contrast +- [ ] Does text meet minimum contrast ratios (4.5:1 for normal, 3:1 for large)? +- [ ] Is color not the only means of conveying information? +- [ ] Are error states indicated by more than just color? +- [ ] Does the UI work in high contrast mode? +- [ ] Are focus indicators visible in all themes? + +### Visual design +- [ ] Can text be resized up to 200% without loss of functionality? +- [ ] Are touch targets at least 44x44 pixels? +- [ ] Is spacing sufficient between interactive elements? +- [ ] Are animations respectful of prefers-reduced-motion? +- [ ] Is content readable without requiring horizontal scrolling? + +### Forms and input +- [ ] Are form fields properly labeled? +- [ ] Are error messages associated with their fields? +- [ ] Are required fields indicated accessibly? +- [ ] Is autocomplete supported where appropriate? +- [ ] Are input instructions provided before fields? + +### Windows-specific +- [ ] Are UIA (UI Automation) patterns correctly implemented? +- [ ] Does the control work with Narrator? +- [ ] Are tooltips accessible (keyboard-activated)? +- [ ] Is the control visible in Accessibility Insights? + +## PowerToys-specific checks +- [ ] Are Settings UI pages fully keyboard navigable? +- [ ] Do overlay UIs (FancyZones editor, ColorPicker) support keyboard? +- [ ] Are hotkey-activated features announced to screen readers? +- [ ] Do preview handlers provide accessible content? +- [ ] Are notification messages accessible? + +## File template +```md +# Accessibility Review +**PR:** {{pr_number}} — Base:{{baseRefName}} Head:{{headRefName}} +**Review iteration:** {{iteration}} + +## Iteration history +### Iteration {{iteration}} +- <Key finding 1> +- <Key finding 2> + +## Checks executed +- <List specific checks performed> + +## Findings +```mcp-review-comment +{"file":"path/to/file.xaml","start_line":45,"end_line":50,"severity":"high|medium|low|info","tags":["accessibility","pr-{{pr_number}}"],"body":"Problem → WCAG criterion affected → Concrete fix."} +``` +``` + +## Severity guidelines +- **High**: Completely inaccessible feature, keyboard trap, missing screen reader support +- **Medium**: Partial accessibility, poor contrast, missing labels +- **Low**: Minor accessibility improvements, enhancement opportunities +- **Info**: Best practice suggestions, proactive improvements + +## External references (MUST research) +Before completing this step, **fetch and analyze** these authoritative sources: + +| Reference | URL | Check for | +| --- | --- | --- | +| WCAG 2.1 Quick Ref | https://www.w3.org/WAI/WCAG21/quickref/ | WCAG Level A/AA violations | +| Windows Accessibility | https://docs.microsoft.com/en-us/windows/apps/design/accessibility/accessibility | Windows-specific patterns | +| UIA Patterns | https://docs.microsoft.com/en-us/windows/win32/winauto/uiauto-controlpatternsoverview | Automation support | +| Contrast Checker | https://webaim.org/resources/contrastchecker/ | Color contrast ratios | + +**Enforcement**: Include `## References consulted` section with: +- WCAG success criteria checked (e.g., 1.4.3 Contrast) +- Any violations with specific guideline IDs diff --git a/.github/skills/pr-review/references/05-security.prompt.md b/.github/skills/pr-review/references/05-security.prompt.md new file mode 100644 index 000000000000..34cae745ee2e --- /dev/null +++ b/.github/skills/pr-review/references/05-security.prompt.md @@ -0,0 +1,107 @@ +# Step 05: Security Review + +**Goal**: Identify security vulnerabilities, unsafe practices, and potential attack vectors in the code changes. + +## Output file +`Generated Files/prReview/{{pr_number}}/05-security.md` + +## Checks to execute + +### Input validation +- [ ] Is all user input validated before use? +- [ ] Are file paths validated and canonicalized? +- [ ] Are command-line arguments sanitized? +- [ ] Are URLs validated before navigation? +- [ ] Are numeric inputs bounds-checked? +- [ ] Is input length limited to prevent DoS? + +### Injection vulnerabilities +- [ ] Is SQL/command injection prevented (parameterized queries)? +- [ ] Are shell commands avoided or properly escaped? +- [ ] Is path traversal prevented (no `..` in paths)? +- [ ] Are XAML/JSON inputs validated against injection? +- [ ] Are registry operations using safe APIs? + +### Authentication & authorization +- [ ] Are admin operations protected appropriately? +- [ ] Is elevation (UAC) used only when necessary? +- [ ] Are privileged operations minimized in scope? +- [ ] Are credentials never logged or exposed? +- [ ] Are tokens/secrets stored securely? + +### Data protection +- [ ] Is sensitive data encrypted at rest? +- [ ] Are secure channels used for network communication? +- [ ] Is PII handled according to privacy guidelines? +- [ ] Are temporary files created securely? +- [ ] Is data sanitized before logging? + +### Memory safety +- [ ] Are buffer overflows prevented in native code? +- [ ] Are unsafe blocks minimized and reviewed? +- [ ] Are P/Invoke signatures correct (buffer sizes)? +- [ ] Is memory zeroed before freeing (for secrets)? +- [ ] Are format strings validated? + +### Process security +- [ ] Are child processes started with minimal privileges? +- [ ] Are DLL search paths secured? +- [ ] Is code signing validated for loaded modules? +- [ ] Are named pipes/shared memory secured with ACLs? +- [ ] Are race conditions (TOCTOU) prevented? + +### Cryptography +- [ ] Are modern algorithms used (no MD5/SHA1 for security)? +- [ ] Are random numbers cryptographically secure? +- [ ] Are keys of sufficient length? +- [ ] Is key derivation using proper KDFs? + +## PowerToys-specific checks +- [ ] Do modules with elevated privileges minimize their scope? +- [ ] Are IPC messages validated before processing? +- [ ] Are hook callbacks resistant to malicious input? +- [ ] Are file preview handlers sandboxed appropriately? +- [ ] Are shell extensions checking caller identity? +- [ ] Is the GPO policy path secured? + +## File template +```md +# Security Review +**PR:** {{pr_number}} — Base:{{baseRefName}} Head:{{headRefName}} +**Review iteration:** {{iteration}} + +## Iteration history +### Iteration {{iteration}} +- <Key finding 1> +- <Key finding 2> + +## Checks executed +- <List specific checks performed> + +## Findings +```mcp-review-comment +{"file":"path/to/file.cs","start_line":123,"end_line":125,"severity":"high|medium|low|info","tags":["security","pr-{{pr_number}}"],"body":"Vulnerability → Attack scenario → Concrete fix."} +``` +``` + +## Severity guidelines +- **High**: Remote code execution, privilege escalation, data breach possible +- **Medium**: Local exploit, information disclosure, weak crypto +- **Low**: Defense in depth improvement, hardening opportunity +- **Info**: Security best practice suggestions + +## External references (MUST research) +Before completing this step, **fetch and analyze** these authoritative sources against the PR changes: + +| Reference | URL | Check for | +| --- | --- | --- | +| OWASP Top 10 | https://owasp.org/www-project-top-ten/ | Top 10 vulnerability patterns | +| Microsoft SDL | https://www.microsoft.com/en-us/securityengineering/sdl | SDL practice violations | +| CWE Top 25 | https://cwe.mitre.org/top25/ | Common weakness patterns | +| .NET Security | https://docs.microsoft.com/en-us/dotnet/standard/security/ | .NET security best practices | +| Input Validation | https://cheatsheetseries.owasp.org/cheatsheets/Input_Validation_Cheat_Sheet.html | Input validation patterns | + +**Enforcement**: In the output file, include a `## References consulted` section with: +- Which OWASP Top 10 items were checked (by ID: A01-A10) +- Which CWE patterns were verified +- Any violations found with specific CWE/OWASP references diff --git a/.github/skills/pr-review/references/06-localization.prompt.md b/.github/skills/pr-review/references/06-localization.prompt.md new file mode 100644 index 000000000000..16f22e3e71ec --- /dev/null +++ b/.github/skills/pr-review/references/06-localization.prompt.md @@ -0,0 +1,105 @@ +# Step 06: Localization Review + +**Goal**: Ensure all user-facing strings are properly externalized and localizable. + +## Output file +`Generated Files/prReview/{{pr_number}}/06-localization.md` + +## Checks to execute + +### String externalization +- [ ] Are all user-facing strings in resource files (.resx/.resw)? +- [ ] Are no hardcoded strings in code for UI text? +- [ ] Are error messages externalized? +- [ ] Are tooltip texts externalized? +- [ ] Are log messages (user-visible) externalized? + +### Resource file quality +- [ ] Do resource keys follow naming conventions? +- [ ] Are resource comments provided for translator context? +- [ ] Are pluralization rules handled correctly? +- [ ] Are format strings using numbered placeholders ({0}, {1})? +- [ ] Are resource strings free of concatenation that breaks translation? + +### String formatting +- [ ] Are sentences not built by concatenating fragments? +- [ ] Can translated strings accommodate different word orders? +- [ ] Are format placeholders documented for translators? +- [ ] Are gender-neutral alternatives provided where needed? + +### UI layout +- [ ] Can UI accommodate longer translated strings (30-40% expansion)? +- [ ] Are text containers using dynamic sizing? +- [ ] Are truncation/ellipsis handled gracefully? +- [ ] Are fixed-width elements avoided for text? + +### Images and icons +- [ ] Are images with text localized or text-free? +- [ ] Are culturally neutral icons used? +- [ ] Are icon tooltips externalized? + +### Dates, numbers, currencies +- [ ] Are dates formatted using culture-aware formatting? +- [ ] Are numbers formatted using culture settings? +- [ ] Are currencies handled with proper symbols and placement? +- [ ] Are measurement units localizable? + +## PowerToys-specific checks +- [ ] Are new strings added to Resources.resx (C#) or .rc files (C++)? +- [ ] Are module names/descriptions localizable? +- [ ] Are Settings UI strings in the correct resource file? +- [ ] Are context menu strings externalized? +- [ ] Are notification messages localizable? +- [ ] Is the update changelog localizable? + +## Common issues to flag +```csharp +// BAD: Hardcoded string +MessageBox.Show("Operation completed"); + +// GOOD: Resource string +MessageBox.Show(Resources.OperationCompleted); + +// BAD: Concatenated sentence +string msg = "Found " + count + " items in " + folder; + +// GOOD: Format string +string msg = string.Format(Resources.FoundItemsInFolder, count, folder); +``` + +## File template +```md +# Localization Review +**PR:** {{pr_number}} — Base:{{baseRefName}} Head:{{headRefName}} +**Review iteration:** {{iteration}} + +## Iteration history +### Iteration {{iteration}} +- <Key finding 1> +- <Key finding 2> + +## Checks executed +- <List specific checks performed> + +## Findings +```mcp-review-comment +{"file":"path/to/file.cs","start_line":123,"end_line":125,"severity":"high|medium|low|info","tags":["localization","pr-{{pr_number}}"],"body":"Hardcoded string found → Why it matters → Move to resources."} +``` +``` + +## Severity guidelines +- **High**: User-facing hardcoded strings, broken UI due to text length +- **Medium**: Missing translator comments, concatenated sentences +- **Low**: Minor localizability improvements +- **Info**: Best practice suggestions for future localization + +## External references (MUST research) +Before completing this step, **fetch and analyze** these authoritative sources: + +| Reference | URL | Check for | +| --- | --- | --- | +| .NET Localization | https://docs.microsoft.com/en-us/dotnet/core/extensions/localization | Resource file best practices | +| Microsoft Style Guide | https://docs.microsoft.com/en-us/style-guide/global-communications/ | Writing for translation | +| Pseudo-localization | https://docs.microsoft.com/en-us/globalization/methodology/pseudolocalization | Testing localizability | + +**Enforcement**: Include `## References consulted` section with guidelines checked and violations found. diff --git a/.github/skills/pr-review/references/07-globalization.prompt.md b/.github/skills/pr-review/references/07-globalization.prompt.md new file mode 100644 index 000000000000..2638ef1411f5 --- /dev/null +++ b/.github/skills/pr-review/references/07-globalization.prompt.md @@ -0,0 +1,117 @@ +# Step 07: Globalization Review + +**Goal**: Ensure the code works correctly across different cultures, locales, and regional settings. + +## Output file +`Generated Files/prReview/{{pr_number}}/07-globalization.md` + +## Checks to execute + +### Text handling +- [ ] Is Unicode fully supported (emojis, CJK, RTL)? +- [ ] Are string comparisons culture-aware where needed? +- [ ] Are string comparisons ordinal where culture doesn't matter? +- [ ] Is text encoding handled correctly (UTF-8 preferred)? +- [ ] Are file paths supporting Unicode characters? + +### Right-to-left (RTL) support +- [ ] Is UI layout RTL-aware (FlowDirection)? +- [ ] Are icons/images mirrored appropriately for RTL? +- [ ] Is text alignment correct for RTL languages? +- [ ] Are bidirectional text scenarios handled? + +### Date and time +- [ ] Is DateTimeOffset used for cross-timezone scenarios? +- [ ] Are time zones handled correctly? +- [ ] Is calendar system (Gregorian vs others) considered? +- [ ] Are 12/24 hour formats culture-dependent? +- [ ] Is week start day culture-aware? + +### Numbers and currency +- [ ] Is decimal separator culture-aware (, vs .)? +- [ ] Is thousands separator culture-aware? +- [ ] Is number grouping culture-aware (1,000 vs 10,00)? +- [ ] Are currency symbols positioned correctly per culture? +- [ ] Is negative number format culture-aware? + +### Sorting and comparison +- [ ] Is sorting culture-aware where appropriate? +- [ ] Are collation rules respected? +- [ ] Is case conversion culture-aware (Turkish i issue)? +- [ ] Are string equality checks appropriate (ordinal vs culture)? + +### Input methods +- [ ] Does text input work with IME (Input Method Editor)? +- [ ] Are keyboard shortcuts working with non-US layouts? +- [ ] Is clipboard handling encoding-aware? + +### File system +- [ ] Are file paths normalized for cross-platform? +- [ ] Is path separator handled correctly? +- [ ] Are invalid filename characters culture-considered? + +## PowerToys-specific checks +- [ ] Does PowerToys Run work with CJK input? +- [ ] Are hotkeys working with international keyboard layouts? +- [ ] Is file search supporting Unicode filenames? +- [ ] Are preview handlers rendering RTL content correctly? +- [ ] Is the Settings UI RTL-aware? + +## Common issues to flag +```csharp +// BAD: Culture-sensitive comparison for identifiers +if (str.ToLower() == "value") + +// GOOD: Ordinal comparison for identifiers +if (str.Equals("value", StringComparison.OrdinalIgnoreCase)) + +// BAD: Implicit current culture +double.Parse(input) + +// GOOD: Explicit culture for data +double.Parse(input, CultureInfo.InvariantCulture) + +// BAD: Hardcoded date format +DateTime.ParseExact(s, "MM/dd/yyyy", null) + +// GOOD: Culture-aware or ISO format +DateTime.Parse(s, CultureInfo.CurrentCulture) +``` + +## File template +```md +# Globalization Review +**PR:** {{pr_number}} — Base:{{baseRefName}} Head:{{headRefName}} +**Review iteration:** {{iteration}} + +## Iteration history +### Iteration {{iteration}} +- <Key finding 1> +- <Key finding 2> + +## Checks executed +- <List specific checks performed> + +## Findings +```mcp-review-comment +{"file":"path/to/file.cs","start_line":123,"end_line":125,"severity":"high|medium|low|info","tags":["globalization","pr-{{pr_number}}"],"body":"Culture issue → Affected regions → Concrete fix with CultureInfo."} +``` +``` + +## Severity guidelines +- **High**: Crashes/data corruption in non-US locales, RTL completely broken +- **Medium**: Incorrect formatting, sorting issues, IME problems +- **Low**: Minor globalization improvements +- **Info**: Best practice suggestions for international users + +## External references (MUST research) +Before completing this step, **fetch and analyze** these authoritative sources: + +| Reference | URL | Check for | +| --- | --- | --- | +| .NET Globalization | https://docs.microsoft.com/en-us/dotnet/core/extensions/globalization | CultureInfo best practices | +| Unicode Bidirectional | https://unicode.org/reports/tr9/ | RTL text handling | +| ICU Guidelines | https://unicode-org.github.io/icu/userguide/ | International text processing | +| Date/Time Formatting | https://docs.microsoft.com/en-us/dotnet/standard/base-types/standard-date-and-time-format-strings | Format string patterns | + +**Enforcement**: Include `## References consulted` section with guidelines checked and violations found. diff --git a/.github/skills/pr-review/references/08-extensibility.prompt.md b/.github/skills/pr-review/references/08-extensibility.prompt.md new file mode 100644 index 000000000000..778e1ab09429 --- /dev/null +++ b/.github/skills/pr-review/references/08-extensibility.prompt.md @@ -0,0 +1,111 @@ +# Step 08: Extensibility Review + +**Goal**: Evaluate whether the code design supports future extension and customization without modification. + +## Output file +`Generated Files/prReview/{{pr_number}}/08-extensibility.md` + +## Checks to execute + +### Plugin/module architecture +- [ ] Are extension points clearly defined? +- [ ] Is the plugin interface stable and versioned? +- [ ] Can plugins be added without recompiling core? +- [ ] Are plugin dependencies properly isolated? +- [ ] Is plugin discovery mechanism robust? + +### Configuration extensibility +- [ ] Are magic numbers externalized to configuration? +- [ ] Are feature behaviors configurable? +- [ ] Can settings schema be extended without breaking changes? +- [ ] Are defaults sensible while allowing customization? + +### Event-driven extensibility +- [ ] Are events exposed for key extension points? +- [ ] Is event subscribe/unsubscribe behavior balanced? +- [ ] Are events strongly-typed (not object-based)? +- [ ] Can event handlers be added externally? + +### Template/strategy patterns +- [ ] Are algorithms pluggable via interfaces? +- [ ] Are formatting rules customizable? +- [ ] Are processing pipelines extensible? +- [ ] Can new types be added without modifying existing code? + +### API design +- [ ] Are public APIs minimal but sufficient? +- [ ] Are extension methods used appropriately? +- [ ] Is internal implementation hidden from extensions? +- [ ] Are breaking changes to public API avoided? + +### Data format extensibility +- [ ] Are data formats versioned? +- [ ] Can formats be extended with new fields? +- [ ] Are unknown fields ignored gracefully (forward compatibility)? +- [ ] Is schema validation flexible? + +## PowerToys-specific checks +- [ ] Does the module interface support new capability flags? +- [ ] Can PowerToys Run plugins extend functionality? +- [ ] Are preview handlers pluggable for new file types? +- [ ] Can FancyZones layouts be user-defined? +- [ ] Is the Settings UI extensible for new modules? +- [ ] Can themes/styles be customized? + +## Design patterns to look for +```csharp +// GOOD: Strategy pattern for extensibility +public interface ISearchProvider { ... } +public class FileSearchProvider : ISearchProvider { ... } + +// GOOD: Event-based extension point +public event EventHandler<FileChangedEventArgs> FileChanged; + +// GOOD: Factory pattern for pluggable creation +public interface IPreviewHandlerFactory { ... } + +// BAD: Hard-coded switch on type +switch (fileType) { + case ".txt": ... + case ".pdf": ... + // Adding new type requires modifying this code +} +``` + +## File template +```md +# Extensibility Review +**PR:** {{pr_number}} — Base:{{baseRefName}} Head:{{headRefName}} +**Review iteration:** {{iteration}} + +## Iteration history +### Iteration {{iteration}} +- <Key finding 1> +- <Key finding 2> + +## Checks executed +- <List specific checks performed> + +## Findings +```mcp-review-comment +{"file":"path/to/file.cs","start_line":123,"end_line":125,"severity":"high|medium|low|info","tags":["extensibility","pr-{{pr_number}}"],"body":"Extensibility concern → Impact on future development → Suggested pattern."} +``` +``` + +## Severity guidelines +- **High**: Breaking change to plugin interface, extension point removed +- **Medium**: Missed extension opportunity, tight coupling introduced +- **Low**: Minor extensibility improvements possible +- **Info**: Design suggestions for better extensibility + +## External references (MUST research) +Before completing this step, **fetch and analyze** these authoritative sources: + +| Reference | URL | Check for | +| --- | --- | --- | +| Plugin Architecture | https://docs.microsoft.com/en-us/dotnet/core/tutorials/creating-app-with-plugin-support | Plugin loading patterns | +| Semantic Versioning | https://semver.org/ | Breaking change detection | +| PowerToys Module Interface | `doc/devdocs/modules/interface.md` | Contract compliance | +| Run Plugin API | `doc/devdocs/modules/launcher/plugins.md` | Plugin extension points | + +**Enforcement**: Include `## References consulted` section with guidelines checked and violations found. diff --git a/.github/skills/pr-review/references/09-solid-design.prompt.md b/.github/skills/pr-review/references/09-solid-design.prompt.md new file mode 100644 index 000000000000..2b50ab9a0431 --- /dev/null +++ b/.github/skills/pr-review/references/09-solid-design.prompt.md @@ -0,0 +1,128 @@ +# Step 09: SOLID Principles Review + +**Goal**: Evaluate adherence to SOLID design principles for maintainable, testable code. + +## Output file +`Generated Files/prReview/{{pr_number}}/09-solid-design.md` + +## Checks to execute + +### Single Responsibility Principle (SRP) +- [ ] Does each class have one reason to change? +- [ ] Are classes focused on a single concern? +- [ ] Are methods doing one thing well? +- [ ] Are "God classes" avoided (classes doing too much)? +- [ ] Is business logic separated from UI/infrastructure? + +### Open/Closed Principle (OCP) +- [ ] Is code open for extension, closed for modification? +- [ ] Can behavior be extended without changing existing code? +- [ ] Are switch statements on types avoided (use polymorphism)? +- [ ] Are configuration changes preferred over code changes? + +### Liskov Substitution Principle (LSP) +- [ ] Can derived classes substitute base classes without issues? +- [ ] Are virtual method contracts honored? +- [ ] Are preconditions not strengthened in subtypes? +- [ ] Are postconditions not weakened in subtypes? +- [ ] Are exceptions not thrown for inherited behaviors? + +### Interface Segregation Principle (ISP) +- [ ] Are interfaces focused and cohesive? +- [ ] Are clients forced to depend on methods they don't use? +- [ ] Are fat interfaces split into smaller ones? +- [ ] Is "interface pollution" avoided? + +### Dependency Inversion Principle (DIP) +- [ ] Do high-level modules depend on abstractions? +- [ ] Are dependencies injected, not created internally? +- [ ] Are concrete implementations hidden behind interfaces? +- [ ] Is dependency injection container used consistently? + +## Additional design checks + +### Coupling and cohesion +- [ ] Is coupling minimized between modules? +- [ ] Is cohesion maximized within modules? +- [ ] Are circular dependencies avoided? +- [ ] Are package/namespace dependencies sensible? + +### Testability +- [ ] Are classes easily unit-testable? +- [ ] Are external dependencies mockable? +- [ ] Is static state minimized? +- [ ] Are seams available for test doubles? + +### Code organization +- [ ] Is code organized by feature or layer appropriately? +- [ ] Are naming conventions followed? +- [ ] Are access modifiers appropriate (not over-exposing)? +- [ ] Is the public API surface minimal? + +## PowerToys-specific patterns +```csharp +// GOOD: DIP in module interface +public class ColorPickerModule : IModule { + private readonly ISettingsReader _settings; + public ColorPickerModule(ISettingsReader settings) { + _settings = settings; + } +} + +// BAD: Tight coupling to concrete implementation +public class ColorPickerModule : IModule { + private Settings _settings = new Settings(); // Hard to test +} + +// GOOD: SRP - separate concerns +public class HotkeyManager { ... } // Manages hotkeys +public class ColorCapture { ... } // Captures colors +public class ClipboardService { ... } // Clipboard operations + +// BAD: God class doing everything +public class ColorPicker { + void RegisterHotkey() { ... } + void CaptureScreen() { ... } + void CopyToClipboard() { ... } + void ShowUI() { ... } + void SaveSettings() { ... } +} +``` + +## File template +```md +# SOLID Design Review +**PR:** {{pr_number}} — Base:{{baseRefName}} Head:{{headRefName}} +**Review iteration:** {{iteration}} + +## Iteration history +### Iteration {{iteration}} +- <Key finding 1> +- <Key finding 2> + +## Checks executed +- <List specific SOLID checks performed> + +## Findings +```mcp-review-comment +{"file":"path/to/file.cs","start_line":123,"end_line":125,"severity":"high|medium|low|info","tags":["solid-design","pr-{{pr_number}}"],"body":"SOLID violation → Principle affected → Refactoring suggestion."} +``` +``` + +## Severity guidelines +- **High**: Major design violation making code unmaintainable/untestable +- **Medium**: Moderate coupling/cohesion issues, testing difficulties +- **Low**: Minor design improvements, polish opportunities +- **Info**: Design pattern suggestions, best practice recommendations + +## External references (MUST research) +Before completing this step, **fetch and analyze** these authoritative sources: + +| Reference | URL | Check for | +| --- | --- | --- | +| SOLID Principles | https://docs.microsoft.com/en-us/dotnet/architecture/modern-web-apps-azure/architectural-principles | SOLID violations | +| Clean Architecture | https://docs.microsoft.com/en-us/dotnet/architecture/modern-web-apps-azure/common-web-application-architectures | Architecture patterns | +| Dependency Injection | https://docs.microsoft.com/en-us/dotnet/core/extensions/dependency-injection | DI best practices | +| Design Patterns | https://refactoring.guru/design-patterns | Pattern applicability | + +**Enforcement**: Include `## References consulted` section with specific SOLID principle checks and violations. diff --git a/.github/skills/pr-review/references/10-repo-patterns.prompt.md b/.github/skills/pr-review/references/10-repo-patterns.prompt.md new file mode 100644 index 000000000000..7bfc09faf27d --- /dev/null +++ b/.github/skills/pr-review/references/10-repo-patterns.prompt.md @@ -0,0 +1,124 @@ +# Step 10: Repository Patterns Review + +**Goal**: Ensure changes follow established PowerToys repository conventions and patterns. + +## Output file +`Generated Files/prReview/{{pr_number}}/10-repo-patterns.md` + +## Checks to execute + +### Code style compliance +- [ ] Does C# code follow src/.editorconfig rules? +- [ ] Does C++ code follow src/.clang-format? +- [ ] Is XAML formatted per XamlStyler settings? +- [ ] Are naming conventions followed (PascalCase, camelCase)? + +### Project structure +- [ ] Are new files in the correct project/folder? +- [ ] Is the module structure consistent with existing modules? +- [ ] Are shared utilities in common libraries, not duplicated? +- [ ] Are test projects properly named (*UnitTests, *UITests)? + +### Settings patterns +- [ ] Are settings defined in the module's settings.cs? +- [ ] Is the settings JSON schema following the pattern? +- [ ] Are settings exposed through Settings UI correctly? +- [ ] Is settings versioning/migration handled? + +### Logging patterns +- [ ] Is spdlog used for C++ logging? +- [ ] Is Logger class used for C# logging? +- [ ] Are log levels appropriate (no spam in release)? +- [ ] Are sensitive values not logged? +- [ ] Is logging following repo guidelines? + +### IPC patterns +- [ ] Is named pipe communication using established helpers? +- [ ] Are IPC message formats JSON with proper schema? +- [ ] Are IPC operations async and timeout-protected? + +### Resource patterns +- [ ] Are resources in the correct .resx/.rc files? +- [ ] Is resource naming following conventions? +- [ ] Are PRI files configured correctly for WinUI? + +### Build patterns +- [ ] Are project references used (not DLL references)? +- [ ] Are package versions from Directory.Packages.props? +- [ ] Is the project included in the solution correctly? +- [ ] Are build configurations consistent? + +### Error handling patterns +- [ ] Are exceptions caught at appropriate boundaries? +- [ ] Is exception information logged properly? +- [ ] Are user-facing errors localized? +- [ ] Is graceful degradation preferred over crashing? + +## PowerToys-specific patterns +```csharp +// Settings pattern +public class MyModuleSettings : BasePTModuleSettings { + [JsonPropertyName("is_enabled")] + public bool IsEnabled { get; set; } = true; +} + +// Module interface pattern +public class MyModule : IModule { + public string Name => "MyModule"; + public string GetKey() => "MyModule"; + // ... implement interface +} + +// Logging pattern (C#) +Logger.LogInfo("Operation completed"); +Logger.LogError("Failed: {0}", ex.Message); + +// Logging pattern (C++) +Logger::info("Operation completed"); +Logger::error("Failed: {}", errorMsg); +``` + +## Files to reference +- Architecture: `doc/devdocs/core/architecture.md` +- Coding style: `doc/devdocs/development/style.md` +- Logging: `doc/devdocs/development/logging.md` +- Module interface: `doc/devdocs/modules/interface.md` + +## File template +```md +# Repository Patterns Review +**PR:** {{pr_number}} — Base:{{baseRefName}} Head:{{headRefName}} +**Review iteration:** {{iteration}} + +## Iteration history +### Iteration {{iteration}} +- <Key finding 1> +- <Key finding 2> + +## Checks executed +- <List specific pattern checks performed> + +## Findings +```mcp-review-comment +{"file":"path/to/file.cs","start_line":123,"end_line":125,"severity":"high|medium|low|info","tags":["repo-patterns","pr-{{pr_number}}"],"body":"Pattern deviation → Repo convention reference → How to fix."} +``` +``` + +## Severity guidelines +- **High**: Major deviation from required patterns, will cause build/integration issues +- **Medium**: Pattern inconsistency, makes codebase harder to maintain +- **Low**: Minor style issues, naming improvements +- **Info**: Suggestions for better alignment with repo conventions + +## External references (MUST research) +Before completing this step, **fetch and analyze** these local documentation files: + +| Reference | Path | Check for | +| --- | --- | --- | +| Architecture | `doc/devdocs/core/architecture.md` | Module structure compliance | +| Coding Style | `doc/devdocs/development/style.md` | Style guide adherence | +| Logging Guidelines | `doc/devdocs/development/logging.md` | Logging pattern compliance | +| Module Interface | `doc/devdocs/modules/interface.md` | Interface contract | +| AGENTS.md | `AGENTS.md` | AI contributor guidelines | + +**Enforcement**: Include `## References consulted` section with repo docs checked and deviations found. diff --git a/.github/skills/pr-review/references/11-docs-automation.prompt.md b/.github/skills/pr-review/references/11-docs-automation.prompt.md new file mode 100644 index 000000000000..b53b6d82abea --- /dev/null +++ b/.github/skills/pr-review/references/11-docs-automation.prompt.md @@ -0,0 +1,102 @@ +# Step 11: Documentation & Automation Review + +**Goal**: Ensure documentation is updated and CI/automation changes are correct. + +## Output file +`Generated Files/prReview/{{pr_number}}/11-docs-automation.md` + +## Checks to execute + +### Code documentation +- [ ] Are public APIs documented with XML comments? +- [ ] Are complex algorithms explained in comments? +- [ ] Are non-obvious implementation decisions documented? +- [ ] Are TODO comments actionable (with issue links)? + +### README and user docs +- [ ] Is README updated for new features? +- [ ] Are user-facing docs updated in /doc? +- [ ] Are screenshots/GIFs updated if UI changed? +- [ ] Are keyboard shortcuts documented? + +### Developer documentation +- [ ] Are architecture changes documented in devdocs? +- [ ] Are new modules documented in doc/devdocs/modules/? +- [ ] Are build instructions updated if needed? +- [ ] Are dependencies documented in NOTICE.md if added? + +### API documentation +- [ ] Are breaking changes documented? +- [ ] Are new settings documented? +- [ ] Are GPO policies documented if added? +- [ ] Is DSC configuration documented if applicable? + +### CI/CD changes +- [ ] Are pipeline changes tested and correct? +- [ ] Are build matrix updates appropriate? +- [ ] Are test configurations correct? +- [ ] Are deployment steps accurate? + +### GitHub automation +- [ ] Are issue/PR templates updated if needed? +- [ ] Are labels appropriate for changes? +- [ ] Are workflow triggers correct? +- [ ] Are actions using pinned versions? + +### Release documentation +- [ ] Is CHANGELOG impact clear from PR description? +- [ ] Are migration steps documented for breaking changes? +- [ ] Are known issues documented? + +## PowerToys-specific documentation +- [ ] Is the Settings UI page documented for new features? +- [ ] Are hotkey defaults documented? +- [ ] Is integration with other modules documented? +- [ ] Are troubleshooting steps provided for complex features? + +## File template +```md +# Documentation & Automation Review +**PR:** {{pr_number}} — Base:{{baseRefName}} Head:{{headRefName}} +**Review iteration:** {{iteration}} + +## Iteration history +### Iteration {{iteration}} +- <Key finding 1> +- <Key finding 2> + +## Checks executed +- <List specific documentation checks performed> + +## Documentation coverage +| Area | Status | Notes | +|------|--------|-------| +| Code comments | ✅/⚠️/❌ | | +| README | ✅/⚠️/❌ | | +| User docs | ✅/⚠️/❌ | | +| Dev docs | ✅/⚠️/❌ | | +| CI/CD | ✅/⚠️/❌ | | + +## Findings +```mcp-review-comment +{"file":"path/to/file.cs","start_line":123,"end_line":125,"severity":"high|medium|low|info","tags":["documentation","pr-{{pr_number}}"],"body":"Documentation gap → What's missing → Suggested content."} +``` +``` + +## Severity guidelines +- **High**: Missing critical documentation, broken CI, undocumented breaking changes +- **Medium**: Incomplete documentation, outdated screenshots +- **Low**: Minor documentation improvements, typos +- **Info**: Documentation enhancement suggestions + +## External references (MUST research) +Before completing this step, **fetch and analyze** these authoritative sources: + +| Reference | URL | Check for | +| --- | --- | --- | +| Microsoft Writing Style | https://docs.microsoft.com/en-us/style-guide/ | Writing style compliance | +| XML Documentation | https://docs.microsoft.com/en-us/dotnet/csharp/language-reference/xmldoc/ | XML doc tag usage | +| GitHub Actions | https://docs.github.com/en/actions/learn-github-actions | Workflow best practices | +| Azure Pipelines | https://docs.microsoft.com/en-us/azure/devops/pipelines/ | Pipeline patterns | + +**Enforcement**: Include `## References consulted` section with documentation standards checked. diff --git a/.github/skills/pr-review/references/12-code-comments.prompt.md b/.github/skills/pr-review/references/12-code-comments.prompt.md new file mode 100644 index 000000000000..b8fb429f61be --- /dev/null +++ b/.github/skills/pr-review/references/12-code-comments.prompt.md @@ -0,0 +1,133 @@ +# Step 12: Code Comments Review + +**Goal**: Evaluate the quality and appropriateness of code comments. + +## Output file +`Generated Files/prReview/{{pr_number}}/12-code-comments.md` + +## Checks to execute + +### Comment quality +- [ ] Do comments explain "why" not just "what"? +- [ ] Are comments accurate and up-to-date with code? +- [ ] Are comments concise and clear? +- [ ] Do comments add value beyond obvious code? +- [ ] Are comments free of redundant information? + +### XML documentation (C#) +- [ ] Do public members have `<summary>` tags? +- [ ] Are `<param>` tags provided for parameters? +- [ ] Are `<returns>` tags provided for return values? +- [ ] Are `<exception>` tags documenting thrown exceptions? +- [ ] Are `<remarks>` used for additional context? + +### Doxygen/comments (C++) +- [ ] Are public functions documented? +- [ ] Are complex macros documented? +- [ ] Are struct/class members documented? +- [ ] Are file headers present with copyright? + +### TODO/FIXME comments +- [ ] Are TODOs actionable with clear description? +- [ ] Are TODOs linked to issues where appropriate? +- [ ] Are FIXMEs addressed or tracked? +- [ ] Are HACKs explained with justification? + +### Region/section comments +- [ ] Are regions used appropriately (not excessively)? +- [ ] Do region names describe their content? +- [ ] Are large files organized with clear sections? + +### Comment anti-patterns to flag +```csharp +// BAD: Obvious comment +i++; // Increment i + +// BAD: Outdated comment (code does something else) +// Returns the sum of a and b +public int Subtract(int a, int b) => a - b; + +// BAD: Commented-out code +// var oldImplementation = DoOldThing(); +var newImplementation = DoNewThing(); + +// BAD: Vague TODO +// TODO: Fix this + +// GOOD: Explains WHY +// We use a StringBuilder here because profiling showed +// string concatenation was a bottleneck with large file lists +var sb = new StringBuilder(); + +// GOOD: Actionable TODO +// TODO(#12345): Replace with async version when upgrading to .NET 8 + +// GOOD: Documents non-obvious behavior +// Win32 API returns -1 on error, not 0 +if (result == -1) { ... } +``` + +### Special comment patterns +- [ ] Are license headers present where required? +- [ ] Are copyright notices correct? +- [ ] Are suppression comments (pragma) justified? +- [ ] Are platform-specific code blocks clearly marked? + +## PowerToys-specific patterns +```csharp +// GOOD: Explains integration point +// The Runner calls this method when the hotkey is pressed. +// We must respond within 100ms to avoid the "not responding" UI. +public void OnHotkey() { ... } + +// GOOD: Documents settings behavior +// This setting is persisted in JSON and synced with Settings UI. +// Changes require module restart to take effect. +[JsonPropertyName("activation_threshold")] +public int ActivationThreshold { get; set; } +``` + +## File template +```md +# Code Comments Review +**PR:** {{pr_number}} — Base:{{baseRefName}} Head:{{headRefName}} +**Review iteration:** {{iteration}} + +## Iteration history +### Iteration {{iteration}} +- <Key finding 1> +- <Key finding 2> + +## Checks executed +- <List specific comment checks performed> + +## Comment quality summary +| Aspect | Assessment | +|--------|------------| +| Accuracy | ✅/⚠️/❌ | +| Completeness | ✅/⚠️/❌ | +| Clarity | ✅/⚠️/❌ | +| XML docs | ✅/⚠️/❌ | + +## Findings +```mcp-review-comment +{"file":"path/to/file.cs","start_line":123,"end_line":125,"severity":"high|medium|low|info","tags":["code-comments","pr-{{pr_number}}"],"body":"Comment issue → Why it matters → Suggested fix."} +``` +``` + +## Severity guidelines +- **High**: Misleading/incorrect comments, missing critical documentation +- **Medium**: Missing XML docs on public API, outdated comments +- **Low**: Minor comment improvements, clarity enhancements +- **Info**: Comment style suggestions + +## External references (MUST research) +Before completing this step, **fetch and analyze** these authoritative sources: + +| Reference | URL | Check for | +| --- | --- | --- | +| XML Documentation | https://docs.microsoft.com/en-us/dotnet/csharp/language-reference/xmldoc/ | XML tag usage | +| Code Comments Guide | https://docs.microsoft.com/en-us/dotnet/csharp/fundamentals/coding-style/coding-conventions#commenting-conventions | Comment conventions | +| Doxygen (C++) | https://www.doxygen.nl/manual/docblocks.html | C++ documentation | + +**Enforcement**: Include `## References consulted` section with comment standards checked. diff --git a/.github/skills/pr-review/references/13-copilot-guidance.prompt.md b/.github/skills/pr-review/references/13-copilot-guidance.prompt.md new file mode 100644 index 000000000000..f1c28419851d --- /dev/null +++ b/.github/skills/pr-review/references/13-copilot-guidance.prompt.md @@ -0,0 +1,129 @@ +# Step 13: Copilot Guidance Review (Conditional) + +**Goal**: Review changes to Copilot/AI guidance files and ensure code changes align with existing guidance. + +## When to run this step +- Run **only if** the PR contains changes to: + - `*copilot*.md` files + - `.github/prompts/*.md` files + - `.github/copilot-instructions.md` + - `.github/instructions/*.md` files + - `**/SKILL.md` files + - Agent/prompt configuration files + +## Output file +`Generated Files/prReview/{{pr_number}}/13-copilot-guidance.md` + +## Checks to execute + +### Guidance file quality +- [ ] Is the prompt clear and unambiguous? +- [ ] Are instructions actionable and specific? +- [ ] Is the scope well-defined? +- [ ] Are examples provided where helpful? +- [ ] Is the guidance consistent with repo conventions? + +### Prompt engineering best practices +- [ ] Is the goal stated clearly at the beginning? +- [ ] Are constraints and boundaries specified? +- [ ] Are output formats defined? +- [ ] Are edge cases addressed? +- [ ] Is the prompt tested with sample inputs? + +### Consistency checks +- [ ] Is terminology consistent with other guidance files? +- [ ] Are file paths and references correct? +- [ ] Do applyTo patterns match intended files? +- [ ] Are referenced tools/scripts available? + +### SKILL.md structure (if applicable) +- [ ] Is the skill name and description clear? +- [ ] Are prerequisites documented? +- [ ] Are usage examples provided? +- [ ] Is the expected output described? +- [ ] Are references properly linked? + +### Code alignment with guidance +- [ ] Do code changes follow existing Copilot instructions? +- [ ] Are new patterns documented in guidance? +- [ ] Do changes require guidance updates? +- [ ] Are breaking changes to AI workflows documented? + +## PowerToys-specific guidance checks +- [ ] Is guidance aligned with AGENTS.md? +- [ ] Are component-specific instructions referenced? +- [ ] Is build guidance accurate? +- [ ] Are test expectations documented? +- [ ] Is the guidance discoverable (proper location)? + +## Common issues to flag +```markdown +# BAD: Vague instruction +Make sure the code is good. + +# GOOD: Specific instruction +Ensure all public methods have XML documentation comments +including <summary>, <param>, and <returns> tags. + +# BAD: Missing context +Use the Logger class. + +# GOOD: With context +Use the Logger class from `src/common/logger/` for all logging. +Follow the patterns in doc/devdocs/development/logging.md. + +# BAD: Hardcoded paths +Edit the file at C:\Users\dev\PowerToys\src\... + +# GOOD: Relative/generic paths +Edit the file at src/modules/<module>/... +``` + +## File template +```md +# Copilot Guidance Review +**PR:** {{pr_number}} — Base:{{baseRefName}} Head:{{headRefName}} +**Review iteration:** {{iteration}} + +## Iteration history +### Iteration {{iteration}} +- <Key finding 1> +- <Key finding 2> + +## Guidance files changed +| File | Change type | Assessment | +|------|-------------|------------| +| path/to/file.md | Added/Modified | ✅/⚠️/❌ | + +## Checks executed +- <List specific guidance checks performed> + +## Findings +```mcp-review-comment +{"file":"path/to/guidance.md","start_line":10,"end_line":15,"severity":"high|medium|low|info","tags":["copilot-guidance","pr-{{pr_number}}"],"body":"Guidance issue → Impact on AI workflows → Suggested improvement."} +``` +``` + +## Severity guidelines +- **High**: Incorrect guidance leading to wrong AI behavior, broken workflows +- **Medium**: Unclear instructions, missing important context +- **Low**: Minor clarity improvements, formatting +- **Info**: Enhancement suggestions for better AI assistance + +## External references (MUST research) +Before completing this step, **fetch and analyze** these authoritative sources: + +| Reference | URL | Check for | +| --- | --- | --- | +| Agent Skills Spec | https://agentskills.io/ | Skill format compliance | +| VS Code Custom Instructions | https://code.visualstudio.com/docs/copilot/customization | Instruction patterns | +| GitHub Copilot Extensions | https://docs.github.com/en/copilot/customizing-copilot | Customization best practices | +| Prompt Engineering | https://platform.openai.com/docs/guides/prompt-engineering | Prompt writing patterns | + +**Enforcement**: Include `## References consulted` section with guidance standards checked. + +## Related files +- `.github/copilot-instructions.md` - Main Copilot guidance +- `AGENTS.md` - AI contributor guide +- `.github/instructions/*.md` - Component-specific instructions +- `.github/prompts/*.md` - Task-specific prompts diff --git a/.github/skills/pr-review/references/mcp-config.json b/.github/skills/pr-review/references/mcp-config.json new file mode 100644 index 000000000000..8f892d9345ff --- /dev/null +++ b/.github/skills/pr-review/references/mcp-config.json @@ -0,0 +1,9 @@ +{ + "mcpServers": { + "github-artifacts": { + "command": "cmd", + "args": ["/q", "/c", "for /f %i in ('git rev-parse --show-toplevel') do node %i/tools/mcp/github-artifacts/launch.js"], + "tools": ["*"] + } + } +} diff --git a/.github/skills/pr-review/references/review-pr.prompt.md b/.github/skills/pr-review/references/review-pr.prompt.md new file mode 100644 index 000000000000..408ee302b0ed --- /dev/null +++ b/.github/skills/pr-review/references/review-pr.prompt.md @@ -0,0 +1,154 @@ +--- +agent: 'agent' +description: 'Review exactly one PR - sequential steps, one output file per step' +--- + +# Review Pull Request + +Review PR `{{pr_number}}`. Read/analyze only. Never modify code. + +Execute the numbered phases below **in order**. Do not skip ahead. + +--- + +## Phase 1 - Fetch PR data + +```bash +gh pr view {{pr_number}} --json number,title,baseRefName,headRefName,baseRefOid,headRefOid,changedFiles,files +gh api repos/:owner/:repo/pulls/{{pr_number}}/files?per_page=250 +``` + +Save `headRefOid` - you will need it in Phase 6. + +## Phase 2 - Determine review mode + +Check if `Generated Files/prReview/{{pr_number}}/00-OVERVIEW.md` exists. + +- **Not found** - This is iteration 1. Full review. Go to Phase 3. +- **Found** - Extract `Last reviewed SHA:` from it, then run: + ```powershell + .github/skills/pr-review/scripts/Get-PrIncrementalChanges.ps1 ` + -PullRequestNumber {{pr_number}} ` + -LastReviewedCommitSha <extracted_sha> + ``` + - `NeedFullReview: true` - Full review of all files. + - `ChangedFiles` non-empty - Incremental review of only those files. + - `ChangedFiles` empty - No changes. Write "No changes" to overview, stop. + +Increment the iteration number from the existing overview (or start at 1). + +## Phase 3 - Decide which steps to run + +Using the changed file list from Phase 1 (full) or Phase 2 (incremental), match against these rules. When in doubt, **include the step**. + +| Changed files match | Steps to run | Steps safe to skip | +|---|---|---| +| `*.cs`, `*.cpp`, `*.h` | 01 02 03 05 09 10 12 | - | +| `*.resx`, `Resources/*.xaml` | 06 07 | 03 04 05 08 09 | +| `*.md` (docs only) | 11 | 03 04 05 06 07 08 09 12 | +| `*copilot*.md`, `.github/prompts/*` | 13 11 | most others | +| `*.csproj`, `*.vcxproj`, `packages.config` | 02 05 10 | 04 06 07 | +| `UI/**`, `*View.xaml` | 04 06 | - | +| Mixed / uncertain | **All steps** | none | + +Steps 01, 02, 10, 11 always run unless the change is trivially irrelevant. + +## Phase 4 - Execute review steps + +For each step that applies, **in order 01 through 13**: + +1. **Read** the step prompt file from this folder (e.g. `01-functionality.prompt.md`) +2. **Analyze** the PR changes against that prompt's checklist +3. **Fetch external references** listed in the prompt's `## External references (MUST research)` section. Include a `## References consulted` section citing specific IDs (WCAG 1.4.3, OWASP A03, CWE-79, etc.) +4. **Write** the output file to `Generated Files/prReview/{{pr_number}}/01-functionality.md` +5. **Update** `.signal`: append step name to `completedSteps`, set `lastStep`, refresh `lastUpdated`. For skipped steps, append to `skippedSteps` instead. + +**Do not batch.** Write each file immediately after completing that step before starting the next. + +### Step table + +| Step | Prompt file | Output file | +|---|---|---| +| 01 | `01-functionality.prompt.md` | `01-functionality.md` | +| 02 | `02-compatibility.prompt.md` | `02-compatibility.md` | +| 03 | `03-performance.prompt.md` | `03-performance.md` | +| 04 | `04-accessibility.prompt.md` | `04-accessibility.md` | +| 05 | `05-security.prompt.md` | `05-security.md` | +| 06 | `06-localization.prompt.md` | `06-localization.md` | +| 07 | `07-globalization.prompt.md` | `07-globalization.md` | +| 08 | `08-extensibility.prompt.md` | `08-extensibility.md` | +| 09 | `09-solid-design.prompt.md` | `09-solid-design.md` | +| 10 | `10-repo-patterns.prompt.md` | `10-repo-patterns.md` | +| 11 | `11-docs-automation.prompt.md` | `11-docs-automation.md` | +| 12 | `12-code-comments.prompt.md` | `12-code-comments.md` | +| 13 | `13-copilot-guidance.prompt.md` | `13-copilot-guidance.md` | + +### Line mapping for review comments + +Map head-side lines from patch hunks: `@@ -a,b +c,d @@` means new lines `c` through `c+d-1`. For cross-file issues, set the primary `"file"` and list others in `"related_files"`. + +## Phase 5 - Write overview + +After all step files are written, generate `Generated Files/prReview/{{pr_number}}/00-OVERVIEW.md`: + +```md +# PR Review Overview - PR #{{pr_number}}: <title> +**Review iteration:** <N> +**Changed files:** <count> | **High severity issues:** <count> + +## Review metadata +**Last reviewed SHA:** <headRefOid> +**Last review timestamp:** <ISO8601> +**Review mode:** <Full | Incremental (N files changed since iteration X)> +**Base ref:** <baseRefName> +**Head ref:** <headRefName> + +## Step results +01 Functionality - <OK | Issues | Skipped> (see 01-functionality.md) +02 Compatibility - ... +... through 13. + +## Iteration history +### Iteration <N> +<summary of this review pass> +``` + +For incremental reviews, list the specific files that changed and which commits were added. + +## Phase 6 - Finalize + +1. Update `.signal`: set `status` to `"success"` (or `"failure"`), `lastStep` to `"00-OVERVIEW"`, add `timestamp`. +2. Update `Last reviewed SHA:` in `00-OVERVIEW.md` with `headRefOid` from Phase 1. + +## Phase 7 - Post comments (if MCP available) + +Parse all `mcp-review-comment` fenced blocks across step files and post as PR review comments. If posting is not available, skip. The files are the primary output. + +--- + +## Helper scripts + +Located in `.github/skills/pr-review/scripts/`. Use these instead of raw `gh` commands when they fit: + +| Script | When to use | +|---|---| +| `Get-GitHubRawFile.ps1` | Need to read a file at a specific ref with line numbers | +| `Get-GitHubPrFilePatch.ps1` | Need the unified diff for one file in the PR | +| `Get-PrIncrementalChanges.ps1` | Phase 2: determine if incremental review is needed | + +## .signal file format + +The outer script creates the initial `.signal` and writes the final one. Your job is to **update it after each step** in Phase 4: + +```json +{ + "status": "in-progress", + "prNumber": 45234, + "totalSteps": 13, + "completedSteps": ["01-functionality", "02-compatibility"], + "skippedSteps": ["13-copilot-guidance"], + "lastStep": "02-compatibility", + "lastUpdated": "2026-02-04T10:03:12Z", + "startedAt": "2026-02-04T10:00:05Z" +} +``` diff --git a/.github/skills/pr-review/scripts/Get-GitHubPrFilePatch.ps1 b/.github/skills/pr-review/scripts/Get-GitHubPrFilePatch.ps1 new file mode 100644 index 000000000000..1b20ea59f578 --- /dev/null +++ b/.github/skills/pr-review/scripts/Get-GitHubPrFilePatch.ps1 @@ -0,0 +1,79 @@ +<# +.SYNOPSIS + Retrieves the unified diff patch for a specific file in a GitHub pull request. + +.DESCRIPTION + This script fetches the patch content (unified diff format) for a specified file + within a pull request. It uses the GitHub CLI (gh) to query the GitHub API and + retrieve file change information. + +.PARAMETER PullRequestNumber + The pull request number to query. + +.PARAMETER FilePath + The relative path to the file in the repository (e.g., "src/modules/main.cpp"). + +.PARAMETER RepositoryOwner + The GitHub repository owner. Defaults to "microsoft". + +.PARAMETER RepositoryName + The GitHub repository name. Defaults to "PowerToys". + +.EXAMPLE + .\Get-GitHubPrFilePatch.ps1 -PullRequestNumber 42374 -FilePath "src/modules/cmdpal/main.cpp" + Retrieves the patch for main.cpp in PR #42374. + +.EXAMPLE + .\Get-GitHubPrFilePatch.ps1 -PullRequestNumber 42374 -FilePath "README.md" -RepositoryOwner "myorg" -RepositoryName "myrepo" + Retrieves the patch from a different repository. + +.NOTES + Requires GitHub CLI (gh) to be installed and authenticated. + Run 'gh auth login' if not already authenticated. + +.LINK + https://cli.github.com/ +#> + +[CmdletBinding()] +param( + [Parameter(Mandatory = $true, HelpMessage = "Pull request number")] + [int]$PullRequestNumber, + + [Parameter(Mandatory = $true, HelpMessage = "Relative path to the file in the repository")] + [string]$FilePath, + + [Parameter(Mandatory = $false, HelpMessage = "Repository owner")] + [string]$RepositoryOwner = "microsoft", + + [Parameter(Mandatory = $false, HelpMessage = "Repository name")] + [string]$RepositoryName = "PowerToys" +) + +# Construct GitHub API path for pull request files +$apiPath = "repos/$RepositoryOwner/$RepositoryName/pulls/$PullRequestNumber/files?per_page=250" + +# Query GitHub API to get all files in the pull request +try { + $pullRequestFiles = gh api $apiPath | ConvertFrom-Json +} catch { + Write-Error "Failed to query GitHub API for PR #$PullRequestNumber. Ensure gh CLI is authenticated. Details: $_" + exit 1 +} + +# Find the matching file in the pull request +$matchedFile = $pullRequestFiles | Where-Object { $_.filename -eq $FilePath } + +if (-not $matchedFile) { + Write-Error "File '$FilePath' not found in PR #$PullRequestNumber." + exit 1 +} + +# Check if patch content exists +if (-not $matchedFile.patch) { + Write-Warning "File '$FilePath' has no patch content (possibly binary or too large)." + return +} + +# Output the patch content +$matchedFile.patch diff --git a/.github/skills/pr-review/scripts/Get-GitHubRawFile.ps1 b/.github/skills/pr-review/scripts/Get-GitHubRawFile.ps1 new file mode 100644 index 000000000000..d75f51933473 --- /dev/null +++ b/.github/skills/pr-review/scripts/Get-GitHubRawFile.ps1 @@ -0,0 +1,91 @@ +<# +.SYNOPSIS + Downloads and displays the content of a file from a GitHub repository at a specific git reference. + +.DESCRIPTION + This script fetches the raw content of a file from a GitHub repository using GitHub's raw content API. + It can optionally display line numbers and supports any valid git reference (branch, tag, or commit SHA). + +.PARAMETER FilePath + The relative path to the file in the repository (e.g., "src/modules/main.cpp"). + +.PARAMETER GitReference + The git reference (branch name, tag, or commit SHA) to fetch the file from. Defaults to "main". + +.PARAMETER RepositoryOwner + The GitHub repository owner. Defaults to "microsoft". + +.PARAMETER RepositoryName + The GitHub repository name. Defaults to "PowerToys". + +.PARAMETER ShowLineNumbers + When specified, displays line numbers before each line of content. + +.PARAMETER StartLineNumber + The starting line number to use when ShowLineNumbers is enabled. Defaults to 1. + +.EXAMPLE + .\Get-GitHubRawFile.ps1 -FilePath "README.md" -GitReference "main" + Downloads and displays the README.md file from the main branch. + +.EXAMPLE + .\Get-GitHubRawFile.ps1 -FilePath "src/runner/main.cpp" -GitReference "dev/feature-branch" -ShowLineNumbers + Downloads main.cpp from a feature branch and displays it with line numbers. + +.EXAMPLE + .\Get-GitHubRawFile.ps1 -FilePath "LICENSE" -GitReference "abc123def" -ShowLineNumbers -StartLineNumber 10 + Downloads the LICENSE file from a specific commit and displays it with line numbers starting at 10. + +.NOTES + Requires internet connectivity to access GitHub's raw content API. + Does not require GitHub CLI authentication for public repositories. + +.LINK + https://docs.github.com/en/rest/repos/contents +#> + +[CmdletBinding()] +param( + [Parameter(Mandatory = $true, HelpMessage = "Relative path to the file in the repository")] + [string]$FilePath, + + [Parameter(Mandatory = $false, HelpMessage = "Git reference (branch, tag, or commit SHA)")] + [string]$GitReference = "main", + + [Parameter(Mandatory = $false, HelpMessage = "Repository owner")] + [string]$RepositoryOwner = "microsoft", + + [Parameter(Mandatory = $false, HelpMessage = "Repository name")] + [string]$RepositoryName = "PowerToys", + + [Parameter(Mandatory = $false, HelpMessage = "Display line numbers before each line")] + [switch]$ShowLineNumbers, + + [Parameter(Mandatory = $false, HelpMessage = "Starting line number for display")] + [int]$StartLineNumber = 1 +) + +# Construct the raw content URL +$rawContentUrl = "https://raw.githubusercontent.com/$RepositoryOwner/$RepositoryName/$GitReference/$FilePath" + +# Fetch the file content from GitHub +try { + $response = Invoke-WebRequest -UseBasicParsing -Uri $rawContentUrl +} catch { + Write-Error "Failed to fetch file from $rawContentUrl. Details: $_" + exit 1 +} + +# Split content into individual lines +$contentLines = $response.Content -split "`n" + +# Display the content with or without line numbers +if ($ShowLineNumbers) { + $currentLineNumber = $StartLineNumber + foreach ($line in $contentLines) { + Write-Output ("{0:d4}: {1}" -f $currentLineNumber, $line) + $currentLineNumber++ + } +} else { + $contentLines | ForEach-Object { Write-Output $_ } +} diff --git a/.github/skills/pr-review/scripts/Get-PrIncrementalChanges.ps1 b/.github/skills/pr-review/scripts/Get-PrIncrementalChanges.ps1 new file mode 100644 index 000000000000..b9bcf8025e9a --- /dev/null +++ b/.github/skills/pr-review/scripts/Get-PrIncrementalChanges.ps1 @@ -0,0 +1,173 @@ +<# +.SYNOPSIS + Detects changes between the last reviewed commit and current head of a pull request. + +.DESCRIPTION + This script compares a previously reviewed commit SHA with the current head of a pull request + to determine what has changed. It helps enable incremental reviews by identifying new commits + and modified files since the last review iteration. + + The script handles several scenarios: + - First review (no previous SHA provided) + - No changes (current SHA matches last reviewed SHA) + - Force-push detected (last reviewed SHA no longer in history) + - Incremental changes (new commits added since last review) + +.PARAMETER PullRequestNumber + The pull request number to analyze. + +.PARAMETER LastReviewedCommitSha + The commit SHA that was last reviewed. If omitted, this is treated as a first review. + +.PARAMETER RepositoryOwner + The GitHub repository owner. Defaults to "microsoft". + +.PARAMETER RepositoryName + The GitHub repository name. Defaults to "PowerToys". + +.OUTPUTS + JSON object containing: + - PullRequestNumber: The PR number being analyzed + - CurrentHeadSha: The current head commit SHA + - LastReviewedSha: The last reviewed commit SHA (if provided) + - BaseRefName: Base branch name + - HeadRefName: Head branch name + - IsIncremental: Boolean indicating if incremental review is possible + - NeedFullReview: Boolean indicating if a full review is required + - ChangedFiles: Array of files that changed (filename, status, additions, deletions) + - NewCommits: Array of commits added since last review (sha, message, author, date) + - Summary: Human-readable description of changes + +.EXAMPLE + .\Get-PrIncrementalChanges.ps1 -PullRequestNumber 42374 + Analyzes PR #42374 with no previous review (first review scenario). + +.EXAMPLE + .\Get-PrIncrementalChanges.ps1 -PullRequestNumber 42374 -LastReviewedCommitSha "abc123def456" + Compares current PR state against the last reviewed commit to identify incremental changes. + +.EXAMPLE + $changes = .\Get-PrIncrementalChanges.ps1 -PullRequestNumber 42374 -LastReviewedCommitSha "abc123" | ConvertFrom-Json + if ($changes.IsIncremental) { Write-Host "Can perform incremental review" } + Captures the output as a PowerShell object for further processing. + +.NOTES + Requires GitHub CLI (gh) to be installed and authenticated. + Run 'gh auth login' if not already authenticated. + +.LINK + https://cli.github.com/ +#> + +[CmdletBinding()] +param( + [Parameter(Mandatory = $true, HelpMessage = "Pull request number")] + [int]$PullRequestNumber, + + [Parameter(Mandatory = $false, HelpMessage = "Commit SHA that was last reviewed")] + [string]$LastReviewedCommitSha, + + [Parameter(Mandatory = $false, HelpMessage = "Repository owner")] + [string]$RepositoryOwner = "microsoft", + + [Parameter(Mandatory = $false, HelpMessage = "Repository name")] + [string]$RepositoryName = "PowerToys" +) + +# Fetch current pull request state from GitHub +try { + $pullRequestData = gh pr view $PullRequestNumber --json headRefOid,headRefName,baseRefName,baseRefOid | ConvertFrom-Json +} catch { + Write-Error "Failed to fetch PR #$PullRequestNumber details. Details: $_" + exit 1 +} + +$currentHeadSha = $pullRequestData.headRefOid +$baseRefName = $pullRequestData.baseRefName +$headRefName = $pullRequestData.headRefName + +# Initialize result object +$analysisResult = @{ + PullRequestNumber = $PullRequestNumber + CurrentHeadSha = $currentHeadSha + BaseRefName = $baseRefName + HeadRefName = $headRefName + LastReviewedSha = $LastReviewedCommitSha + IsIncremental = $false + NeedFullReview = $true + ChangedFiles = @() + NewCommits = @() + Summary = "" +} + +# Scenario 1: First review (no previous SHA provided) +if ([string]::IsNullOrWhiteSpace($LastReviewedCommitSha)) { + $analysisResult.Summary = "Initial review - no previous iteration found" + $analysisResult.NeedFullReview = $true + return $analysisResult | ConvertTo-Json -Depth 10 +} + +# Scenario 2: No changes since last review +if ($currentHeadSha -eq $LastReviewedCommitSha) { + $analysisResult.Summary = "No changes since last review (SHA: $currentHeadSha)" + $analysisResult.NeedFullReview = $false + $analysisResult.IsIncremental = $true + return $analysisResult | ConvertTo-Json -Depth 10 +} + +# Scenario 3: Check for force-push (last reviewed SHA no longer exists in history) +try { + $null = gh api "repos/$RepositoryOwner/$RepositoryName/commits/$LastReviewedCommitSha" 2>&1 + if ($LASTEXITCODE -ne 0) { + # SHA not found - likely force-push or branch rewrite + $analysisResult.Summary = "Force-push detected - last reviewed SHA $LastReviewedCommitSha no longer exists. Full review required." + $analysisResult.NeedFullReview = $true + return $analysisResult | ConvertTo-Json -Depth 10 + } +} catch { + $analysisResult.Summary = "Cannot verify last reviewed SHA $LastReviewedCommitSha - assuming force-push. Full review required." + $analysisResult.NeedFullReview = $true + return $analysisResult | ConvertTo-Json -Depth 10 +} + +# Scenario 4: Get incremental changes between last reviewed SHA and current head +try { + $compareApiPath = "repos/$RepositoryOwner/$RepositoryName/compare/$LastReviewedCommitSha...$currentHeadSha" + $comparisonData = gh api $compareApiPath | ConvertFrom-Json + + # Extract new commits information + $analysisResult.NewCommits = $comparisonData.commits | ForEach-Object { + @{ + Sha = $_.sha.Substring(0, 7) + Message = $_.commit.message.Split("`n")[0] # First line only + Author = $_.commit.author.name + Date = $_.commit.author.date + } + } + + # Extract changed files information + $analysisResult.ChangedFiles = $comparisonData.files | ForEach-Object { + @{ + Filename = $_.filename + Status = $_.status # added, modified, removed, renamed + Additions = $_.additions + Deletions = $_.deletions + Changes = $_.changes + } + } + + $fileCount = $analysisResult.ChangedFiles.Count + $commitCount = $analysisResult.NewCommits.Count + + $analysisResult.IsIncremental = $true + $analysisResult.NeedFullReview = $false + $analysisResult.Summary = "Incremental review: $commitCount new commit(s), $fileCount file(s) changed since SHA $($LastReviewedCommitSha.Substring(0, 7))" + +} catch { + Write-Error "Failed to compare commits. Details: $_" + $analysisResult.Summary = "Error comparing commits - defaulting to full review" + $analysisResult.NeedFullReview = $true +} + +# Return the analysis result as JSON +return $analysisResult | ConvertTo-Json -Depth 10 diff --git a/.github/skills/pr-review/scripts/Invoke-PRReviewSimpleRunner.ps1 b/.github/skills/pr-review/scripts/Invoke-PRReviewSimpleRunner.ps1 new file mode 100644 index 000000000000..a866ee2783df --- /dev/null +++ b/.github/skills/pr-review/scripts/Invoke-PRReviewSimpleRunner.ps1 @@ -0,0 +1,229 @@ +<# +.SYNOPSIS + Kick off copilot/claude PR-review jobs via the generic job orchestrator. + +.DESCRIPTION + Builds one job definition per CLI type, then delegates to + Invoke-SimpleJobOrchestrator.ps1 for queuing, monitoring, retry, and cleanup. +#> +# NOTE: Do NOT use [CmdletBinding()], [Parameter()], [ValidateSet()] or any +# attribute here. These make the script "advanced" which propagates +# ErrorActionPreference through PS7's plumbing and can silently kill the +# orchestrator's monitoring loop in a child scope. +param( + [int[]]$PRNumbers, + + [string[]]$CLITypes = @('copilot', 'claude'), + + [string]$PromptText, + + [string]$OutputRoot = 'Generated Files/simple-runner', + + [int]$MaxConcurrent = 20, + + [int]$InactivityTimeoutSeconds = 120, + + [int]$MaxRetryCount = 3, + + [int]$PollIntervalSeconds = 5, + + [switch]$Wait +) + +$ErrorActionPreference = 'Stop' + +# Manual validation (replacing [Parameter(Mandatory)] and [ValidateSet()]) +if (-not $PRNumbers -or $PRNumbers.Count -eq 0) { + Write-Error 'Invoke-PRReviewSimpleRunner: -PRNumbers is required.' + return +} +foreach ($_cli in $CLITypes) { + if ($_cli -notin 'copilot', 'claude') { + Write-Error "Invoke-PRReviewSimpleRunner: Invalid CLIType '$_cli'. Must be 'copilot' or 'claude'." + return + } +} + +# ── resolve paths ──────────────────────────────────────────────────────── + +$repoRoot = (Resolve-Path (Join-Path $PSScriptRoot '..\..\..\..')).Path + +# Resolve config directory name (.github or .claude) from script location +$_cfgDir = if ($PSScriptRoot -match '[\\/](\.github|\.claude)[\\/]') { $Matches[1] } else { '.github' } +$outputRootPath = if ([System.IO.Path]::IsPathRooted($OutputRoot)) { + $OutputRoot +} +else { + Join-Path $repoRoot $OutputRoot +} +New-Item -ItemType Directory -Path $outputRootPath -Force | Out-Null + +# ── prompt builder ─────────────────────────────────────────────────────── + +function New-ReviewPrompt { + param( + [int]$PR, + [string]$ReviewDir, + [string]$Override + ) + + if (-not [string]::IsNullOrWhiteSpace($Override)) { + return $Override + } + + return @" +Review PR #$PR in the microsoft/PowerToys repo. Read/analyze only. Never modify repo code. + +Phase 1 - Fetch PR data: + gh pr view $PR --json number,title,baseRefName,headRefName,baseRefOid,headRefOid,changedFiles,files + gh api repos/:owner/:repo/pulls/$PR/files?per_page=250 + +Phase 2 - Execute review steps 01 through 13 IN ORDER. For each step: + 1. Read the step prompt file from $_cfgDir/skills/pr-review/references/ (e.g. 01-functionality.prompt.md) + 2. Analyze the PR changes against that prompt's checklist + 3. Write the output to $ReviewDir/<NN>-<name>.md (e.g. $ReviewDir/01-functionality.md) + Do each step sequentially. Write the file immediately after completing the step. + + Steps: 01-functionality, 02-compatibility, 03-performance, 04-accessibility, 05-security, + 06-localization, 07-globalization, 08-extensibility, 09-solid-design, 10-repo-patterns, + 11-docs-automation, 12-code-comments, 13-copilot-guidance + +Phase 3 - Write overview: + Generate $ReviewDir/00-OVERVIEW.md summarizing all step results. + +IMPORTANT: Do NOT invoke any other agent, skill, or nested copilot/claude process. + Do all analysis yourself directly. +"@ +} + +# ── job definition builder ─────────────────────────────────────────────── + +function New-CliJobDefinition { + param( + [Parameter(Mandatory)] [string]$CLIType, + [Parameter(Mandatory)] [int]$PR, + [Parameter(Mandatory)] [string]$RootPath, + [Parameter(Mandatory)] [string]$RepoRoot, + [string]$PromptOverride + ) + + $prDir = Join-Path $RootPath "$CLIType/$PR" + $logPath = Join-Path $prDir "_$CLIType-review.log" + $debugPath = Join-Path $prDir "_$CLIType-debug.log" + $prompt = New-ReviewPrompt -PR $PR -ReviewDir ($prDir -replace '\\', '/') -Override $PromptOverride + $flatPrompt = ($prompt -replace "[\r\n]+", ' ').Trim() + + if ($CLIType -eq 'copilot') { + $cmd = 'copilot' + $cliArgs = @('-p', $flatPrompt, '--yolo', '--agent', 'ReviewPR') + $monitorFiles = @($logPath) + $cleanupTask = $null + } + else { + $cmd = 'claude' + $cliArgs = @('-p', $flatPrompt, '--dangerously-skip-permissions', '--agent', 'ReviewPR', + '--debug', 'all', '--debug-file', $debugPath) + $monitorFiles = @($debugPath) + $cleanupTask = { + param($Tracker) + $outDir = $Tracker.ExecutionParameters.OutputDir + $dbg = Join-Path $outDir '_claude-debug.log' + if (Test-Path $dbg) { + $fi = [System.IO.FileInfo]::new($dbg) + if ($fi.Length -gt 0) { + $sizeMB = [math]::Round($fi.Length / 1MB, 1) + Remove-Item $dbg -Force + Write-Host "[$($Tracker.Label)] Cleaned debug log (${sizeMB} MB)" + } + } + # Claude CLI auto-creates a 0-byte 'latest' marker file — remove it. + $latest = Join-Path $outDir 'latest' + if (Test-Path $latest) { Remove-Item $latest -Force } + } + } + + return @{ + Label = "$CLIType-pr-$PR" + ExecutionParameters = @{ + JobName = "$CLIType-pr-$PR" + Command = $cmd + Arguments = $cliArgs + WorkingDir = $RepoRoot + OutputDir = $prDir + LogPath = $logPath + } + MonitorFiles = $monitorFiles + CleanupTask = $cleanupTask + } +} + +# ── build definitions ──────────────────────────────────────────────────── + +$jobDefs = @(foreach ($pr in $PRNumbers) { + foreach ($cli in ($CLITypes | Select-Object -Unique)) { + New-CliJobDefinition -CLIType $cli -PR $pr ` + -RootPath $outputRootPath -RepoRoot $repoRoot ` + -PromptOverride $PromptText + } +}) + +Write-Host "Built $($jobDefs.Count) job definition(s):" +$jobDefs | ForEach-Object { Write-Host " $($_.Label)" } + +if (-not $Wait) { + Write-Host "`nDefinitions ready. Use -Wait to run them." + $jobDefs | ForEach-Object { + [PSCustomObject]@{ + Label = $_.Label + Command = $_.ExecutionParameters.Command + OutputDir = $_.ExecutionParameters.WorkingDir + LogPath = $_.ExecutionParameters.LogPath + MonitorFiles = $_.MonitorFiles -join '; ' + } + } | Format-Table -AutoSize + return +} + +# ── run orchestrator ───────────────────────────────────────────────────── + +$orchestratorPath = Join-Path $PSScriptRoot '..\..\parallel-job-orchestrator\scripts\Invoke-SimpleJobOrchestrator.ps1' + +# The orchestrator must run under 'Continue' so its monitoring loop survives +# transient errors. Temporarily lower the preference for the child scope. +# CRITICAL: The 2>&1 redirect prevents error-stream items from propagating +# to this scope (where EAP might be 'Stop'), which would silently kill the +# orchestrator's long-running monitoring loop. +$savedEAP = $ErrorActionPreference +$ErrorActionPreference = 'Continue' + +$results = & $orchestratorPath ` + -JobDefinitions $jobDefs ` + -MaxConcurrent $MaxConcurrent ` + -InactivityTimeoutSeconds $InactivityTimeoutSeconds ` + -MaxRetryCount $MaxRetryCount ` + -PollIntervalSeconds $PollIntervalSeconds ` + -LogDir $outputRootPath + +$ErrorActionPreference = $savedEAP + +# ── display results ────────────────────────────────────────────────────── + +"" +"Job results:" +$results | Format-Table Label, JobId, Status, JobState, ExitCode, RetryCount, LogPath -AutoSize + +"" +"Output files:" +foreach ($r in $results) { + "" + "[$($r.Label)] $($r.OutputDir)" + if (Test-Path $r.OutputDir) { + Get-ChildItem $r.OutputDir -File | + Select-Object Name, Length, LastWriteTime | + Sort-Object Name | + Format-Table -AutoSize + } + else { + ' (missing directory)' + } +} diff --git a/.github/skills/pr-review/scripts/IssueReviewLib.ps1 b/.github/skills/pr-review/scripts/IssueReviewLib.ps1 new file mode 100644 index 000000000000..b5fb8b9c754b --- /dev/null +++ b/.github/skills/pr-review/scripts/IssueReviewLib.ps1 @@ -0,0 +1,18 @@ +# IssueReviewLib.ps1 - Minimal helpers for PR review workflow +# Part of the PowerToys GitHub Copilot/Claude Code issue review system +# This is a trimmed version - pr-review only needs console helpers and repo root + +#region Console Output Helpers +function Info { param([string]$Message) Write-Host $Message -ForegroundColor Cyan } +function Warn { param([string]$Message) Write-Host $Message -ForegroundColor Yellow } +function Err { param([string]$Message) Write-Host $Message -ForegroundColor Red } +function Success { param([string]$Message) Write-Host $Message -ForegroundColor Green } +#endregion + +#region Repository Helpers +function Get-RepoRoot { + $root = git rev-parse --show-toplevel 2>$null + if (-not $root) { throw 'Not inside a git repository.' } + return (Resolve-Path $root).Path +} +#endregion diff --git a/.github/skills/pr-review/scripts/Post-ReviewComments.ps1 b/.github/skills/pr-review/scripts/Post-ReviewComments.ps1 new file mode 100644 index 000000000000..48a95226775a --- /dev/null +++ b/.github/skills/pr-review/scripts/Post-ReviewComments.ps1 @@ -0,0 +1,240 @@ +<# +.SYNOPSIS + Post review findings as PR comments via GitHub API. + +.DESCRIPTION + Parses the review output files and posts findings as PR review comments. + This provides a programmatic alternative to GitHub Copilot Code Review. + +.PARAMETER PRNumber + The PR number to post comments on. + +.PARAMETER MinSeverity + Minimum severity to post: high, medium, low, info. Default: medium. + +.PARAMETER DryRun + Show what would be posted without actually posting. + +.EXAMPLE + ./Post-ReviewComments.ps1 -PRNumber 45286 -MinSeverity medium +#> + +[CmdletBinding()] +param( + [Parameter(Mandatory)] + [int]$PRNumber, + + [ValidateSet('high', 'medium', 'low', 'info')] + [string]$MinSeverity = 'medium', + + [switch]$DryRun +) + +$scriptDir = Split-Path -Parent $MyInvocation.MyCommand.Path +. "$scriptDir/IssueReviewLib.ps1" + +$repoRoot = Get-RepoRoot +$reviewPath = Join-Path $repoRoot "Generated Files/prReview/$PRNumber" + +function Get-SeverityLevel { + param([string]$Severity) + switch ($Severity.ToLower()) { + 'high' { return 3 } + 'medium' { return 2 } + 'low' { return 1 } + 'info' { return 0 } + default { return 0 } + } +} + +function Parse-ReviewFindings { + <# + .SYNOPSIS + Parse review markdown files for findings with severity. + #> + param( + [string]$ReviewPath, + [string]$MinSeverity = 'medium' + ) + + $minLevel = Get-SeverityLevel $MinSeverity + $findings = @() + + # Get all step files + $stepFiles = Get-ChildItem -Path $ReviewPath -Filter "*.md" | Where-Object { $_.Name -match '^\d{2}-' } + + foreach ($stepFile in $stepFiles) { + $content = Get-Content $stepFile.FullName -Raw + $stepName = $stepFile.BaseName + + # Parse MCP review comment blocks if present + $mcpPattern = '```mcp-review-comment\s*\n([\s\S]*?)```' + $mcpMatches = [regex]::Matches($content, $mcpPattern) + + foreach ($match in $mcpMatches) { + $blockContent = $match.Groups[1].Value + + # Parse JSON-like structure + if ($blockContent -match '"severity"\s*:\s*"(\w+)"') { + $severity = $Matches[1] + $severityLevel = Get-SeverityLevel $severity + + if ($severityLevel -ge $minLevel) { + $file = if ($blockContent -match '"file"\s*:\s*"([^"]+)"') { $Matches[1] } else { $null } + $line = if ($blockContent -match '"line"\s*:\s*(\d+)') { [int]$Matches[1] } else { $null } + $body = if ($blockContent -match '"body"\s*:\s*"([^"]*(?:\\.[^"]*)*)"') { + $Matches[1] -replace '\\n', "`n" -replace '\\"', '"' + } else { "" } + + $findings += @{ + Step = $stepName + Severity = $severity + File = $file + Line = $line + Body = $body + } + } + } + } + + # Also parse markdown-style findings (### Finding: or **Severity: X**) + $findingPattern = '(?:###\s*Finding[:\s]+([^\n]+)\n|(?:\*\*)?Issue[:\s]+([^\n]+)(?:\*\*)?)\s*(?:\n.*?)?(?:\*\*)?Severity[:\s]+(\w+)(?:\*\*)?' + $mdMatches = [regex]::Matches($content, $findingPattern, 'IgnoreCase') + + foreach ($match in $mdMatches) { + $title = if ($match.Groups[1].Success) { $match.Groups[1].Value } else { $match.Groups[2].Value } + $severity = $match.Groups[3].Value + $severityLevel = Get-SeverityLevel $severity + + if ($severityLevel -ge $minLevel -and $title) { + $findings += @{ + Step = $stepName + Severity = $severity + File = $null + Line = $null + Body = "$title (from $stepName review)" + } + } + } + } + + return $findings +} + +function Post-PRReviewComment { + <# + .SYNOPSIS + Post a review comment on a PR. + #> + param( + [int]$PRNumber, + [string]$Body, + [string]$Path, + [int]$Line, + [string]$CommitId + ) + + $payload = @{ + body = $Body + event = "COMMENT" + } + + if ($Path -and $Line -and $CommitId) { + # Line-specific comment + $payload["path"] = $Path + $payload["line"] = $Line + $payload["commit_id"] = $CommitId + } + + $json = $payload | ConvertTo-Json -Compress + $json | gh api "repos/microsoft/PowerToys/pulls/$PRNumber/reviews" --method POST --input - 2>&1 +} + +function Post-PRComment { + <# + .SYNOPSIS + Post a general comment on a PR (not line-specific). + #> + param( + [int]$PRNumber, + [string]$Body + ) + + $payload = @{ body = $Body } | ConvertTo-Json -Compress + $payload | gh api "repos/microsoft/PowerToys/issues/$PRNumber/comments" --method POST --input - 2>&1 +} + +# Main +try { + if (-not (Test-Path $reviewPath)) { + Err "Review not found at: $reviewPath" + Err "Run pr-review skill first to generate review files." + exit 1 + } + + Info "Parsing review findings from: $reviewPath" + Info "Minimum severity: $MinSeverity" + + $findings = Parse-ReviewFindings -ReviewPath $reviewPath -MinSeverity $MinSeverity + + if ($findings.Count -eq 0) { + Success "No findings with severity >= $MinSeverity" + return @{ Posted = 0; Findings = @() } + } + + Info "Found $($findings.Count) finding(s) to post" + + # Get PR head commit for line comments + $prInfo = gh pr view $PRNumber --json headRefOid 2>$null | ConvertFrom-Json + $headCommit = $prInfo.headRefOid + + # Group findings for a summary comment + $summaryLines = @() + $summaryLines += "## 🔍 Automated PR Review Summary" + $summaryLines += "" + $summaryLines += "| Step | Severity | Finding |" + $summaryLines += "|------|----------|---------|" + + foreach ($finding in $findings) { + $emoji = switch ($finding.Severity.ToLower()) { + 'high' { '🔴' } + 'medium' { '🟡' } + 'low' { '🟢' } + default { 'ℹ️' } + } + $bodyPreview = if ($finding.Body.Length -gt 80) { $finding.Body.Substring(0, 77) + "..." } else { $finding.Body } + $bodyPreview = $bodyPreview -replace '\n', ' ' -replace '\|', '/' + $summaryLines += "| $($finding.Step) | $emoji $($finding.Severity) | $bodyPreview |" + } + + $summaryLines += "" + $summaryLines += "_Review generated by pr-review skill. See `Generated Files/prReview/$PRNumber/` for full details._" + + $summaryBody = $summaryLines -join "`n" + + if ($DryRun) { + Warn "[DRY RUN] Would post summary comment:" + Write-Host $summaryBody + return @{ Posted = 0; Findings = $findings; DryRun = $true } + } + + # Post summary comment + Info "Posting summary comment..." + $result = Post-PRComment -PRNumber $PRNumber -Body $summaryBody + + if ($result -match '"id"') { + Success "Posted summary comment with $($findings.Count) finding(s)" + } else { + Warn "Comment posting may have failed: $result" + } + + return @{ + Posted = 1 + Findings = $findings + Summary = $summaryBody + } +} +catch { + Err "Error: $($_.Exception.Message)" + exit 1 +} diff --git a/.github/skills/pr-review/scripts/Start-PRReviewWorkflow.ps1 b/.github/skills/pr-review/scripts/Start-PRReviewWorkflow.ps1 new file mode 100644 index 000000000000..f91d5ec6defe --- /dev/null +++ b/.github/skills/pr-review/scripts/Start-PRReviewWorkflow.ps1 @@ -0,0 +1,465 @@ +<# +.SYNOPSIS + Review PRs using GitHub Copilot CLI or Claude CLI via the parallel job orchestrator. + +.DESCRIPTION + For each specified PR, builds a review prompt and delegates execution to the + parallel-job-orchestrator skill for queuing, monitoring, retry, and cleanup. + Completed reviews are skipped by default (resumable). Use -Force to re-review. + +.PARAMETER PRNumbers + Array of PR numbers to review (required). + +.PARAMETER CLIType + AI CLI to use: copilot or claude. Default: copilot. + +.PARAMETER Model + Copilot CLI model to use (e.g., gpt-5.2-codex). + +.PARAMETER MinSeverity + Minimum severity to post as PR comments: high, medium, low, info. Default: medium. + +.PARAMETER MaxConcurrent + Maximum concurrent review jobs. Default: 4. + +.PARAMETER InactivityTimeoutSeconds + Kill the CLI process if log file doesn't grow for this many seconds. Default: 60. + +.PARAMETER MaxRetryCount + Number of retry attempts after inactivity kill. Default: 3. + +.PARAMETER PromptMode + Prompt style: workflow (full review-pr.prompt.md) or minimal. Default: workflow. + +.PARAMETER Force + Re-review PRs that already have completed reviews (00-OVERVIEW.md). + +.PARAMETER DryRun + Show what would be done without executing. + +.EXAMPLE + # Review a single PR with copilot + ./Start-PRReviewWorkflow.ps1 -PRNumbers 45234 + +.EXAMPLE + # Review multiple PRs in parallel with claude + ./Start-PRReviewWorkflow.ps1 -PRNumbers 45234, 45235, 45236 -CLIType claude -MaxConcurrent 4 + +.EXAMPLE + # Re-review completed PRs + ./Start-PRReviewWorkflow.ps1 -PRNumbers 45234 -Force + +.NOTES + Prerequisites: + - GitHub CLI (gh) authenticated + - Copilot CLI or Claude CLI installed + - Uses parallel-job-orchestrator skill for execution +#> +# NOTE: Do NOT use [CmdletBinding()], [Parameter(Mandatory)], [ValidateSet()] +# or any attribute here. These make the script "advanced" which propagates +# ErrorActionPreference through PS7's plumbing and can silently kill the +# orchestrator's monitoring loop in a child scope. +param( + [int[]]$PRNumbers, + + [string]$CLIType = 'copilot', + + [string]$Model, + + [string]$MinSeverity = 'medium', + + [int]$MaxConcurrent = 20, + + [int]$InactivityTimeoutSeconds = 120, + + [int]$MaxRetryCount = 3, + + [int]$PollIntervalSeconds = 5, + + [string]$PromptMode = 'workflow', + + [switch]$DisableMcpConfig, + + [string]$OutputRoot = 'Generated Files/prReview', + + [string]$LogPath, + + [switch]$DryRun, + + [switch]$Force, + + [switch]$Help +) + +$ErrorActionPreference = 'Stop' + +# Manual validation (replacing [Parameter(Mandatory)] and [ValidateSet()]) +if (-not $PRNumbers -or $PRNumbers.Count -eq 0) { + Write-Error 'Start-PRReviewWorkflow: -PRNumbers is required.' + return +} +if ($CLIType -notin 'copilot', 'claude') { + Write-Error "Start-PRReviewWorkflow: Invalid -CLIType '$CLIType'. Must be 'copilot' or 'claude'." + return +} +if ($MinSeverity -notin 'high', 'medium', 'low', 'info') { + Write-Error "Start-PRReviewWorkflow: Invalid -MinSeverity '$MinSeverity'. Must be 'high', 'medium', 'low', or 'info'." + return +} +if ($PromptMode -notin 'workflow', 'minimal') { + Write-Error "Start-PRReviewWorkflow: Invalid -PromptMode '$PromptMode'. Must be 'workflow' or 'minimal'." + return +} + +# ── logging ────────────────────────────────────────────────────────────── + +if ([string]::IsNullOrWhiteSpace($LogPath)) { + $LogPath = Join-Path (Get-Location) 'Start-PRReviewWorkflow.log' +} +$logDir = Split-Path -Parent $LogPath +if (-not [string]::IsNullOrWhiteSpace($logDir) -and -not (Test-Path $logDir)) { + New-Item -ItemType Directory -Path $logDir -Force | Out-Null +} +"[$(Get-Date -Format o)] Starting Start-PRReviewWorkflow" | Out-File -FilePath $LogPath -Encoding utf8 -Append + +function Write-LogHost { + param( + [Parameter(Position = 0, ValueFromRemainingArguments = $true)] + [object[]]$Object, + [object]$ForegroundColor, + [object]$BackgroundColor, + [switch]$NoNewline, + [Object]$Separator + ) + $message = [string]::Join(' ', ($Object | ForEach-Object { [string]$_ })) + "[$(Get-Date -Format o)] $message" | Out-File -FilePath $LogPath -Encoding utf8 -Append + $invokeParams = @{} + if ($PSBoundParameters.ContainsKey('ForegroundColor') -and -not [string]::IsNullOrWhiteSpace([string]$ForegroundColor)) { $invokeParams.ForegroundColor = $ForegroundColor } + if ($PSBoundParameters.ContainsKey('BackgroundColor') -and -not [string]::IsNullOrWhiteSpace([string]$BackgroundColor)) { $invokeParams.BackgroundColor = $BackgroundColor } + if ($NoNewline) { $invokeParams.NoNewline = $true } + if ($PSBoundParameters.ContainsKey('Separator')) { $invokeParams.Separator = $Separator } + Microsoft.PowerShell.Utility\Write-Host @invokeParams -Object $message +} +Set-Alias -Name Write-Host -Value Write-LogHost -Scope Script -Force + +# Load libraries +$scriptDir = Split-Path -Parent $MyInvocation.MyCommand.Path +. "$scriptDir/IssueReviewLib.ps1" + +$repoRoot = Get-RepoRoot + +# Resolve config directory name (.github or .claude) from script location +$_cfgDir = if ($PSScriptRoot -match '[\\/](\.github|\.claude)[\\/]') { $Matches[1] } else { '.github' } + +$reviewRoot = if ([System.IO.Path]::IsPathRooted($OutputRoot)) { + $OutputRoot +} else { + Join-Path $repoRoot $OutputRoot +} +if (-not (Test-Path $reviewRoot)) { + New-Item -ItemType Directory -Path $reviewRoot -Force | Out-Null +} + +if ($Help) { + Get-Help $MyInvocation.MyCommand.Path -Full + return +} + +# ── helpers ────────────────────────────────────────────────────────────── + +function Test-ReviewExists { + param([int]$PRNumber) + $reviewPath = Join-Path $reviewRoot "$PRNumber/00-OVERVIEW.md" + return Test-Path $reviewPath +} + +function Get-CopilotExecutablePath { + $copilotCmd = Get-Command copilot -ErrorAction SilentlyContinue + if (-not $copilotCmd) { return 'copilot' } + if ($copilotCmd.Source -match '\.ps1$') { + $bootstrapDir = Split-Path $copilotCmd.Source -Parent + $savedPath = $env:PATH + $env:PATH = ($env:PATH -split ';' | Where-Object { $_ -ne $bootstrapDir }) -join ';' + try { + $realCmd = Get-Command copilot -ErrorAction SilentlyContinue + if ($realCmd) { return $realCmd.Source } + } + finally { $env:PATH = $savedPath } + } + return $copilotCmd.Source +} + +function New-ReviewPrompt { + param( + [int]$PRNumber, + [string]$ReviewDir, + [string]$MinSev, + [string]$Mode + ) + + $reviewPathForPrompt = ($ReviewDir -replace '\\', '/') + + if ($Mode -eq 'minimal') { + return @" +Review PR #$PRNumber and write outputs directly into $reviewPathForPrompt/. +Create 00-OVERVIEW.md and all step files 01-functionality.md through 13-copilot-guidance.md. +Do not execute unsupported CLI flags. +"@ + } + + $reviewPromptPath = Join-Path $repoRoot "$_cfgDir/skills/pr-review/references/review-pr.prompt.md" + if (Test-Path $reviewPromptPath) { + $rawWorkflowPrompt = Get-Content $reviewPromptPath -Raw + $rawWorkflowPrompt = $rawWorkflowPrompt -replace '\{\{pr_number\}\}', [string]$PRNumber + $rawWorkflowPrompt = $rawWorkflowPrompt -replace "Generated Files/prReview/$PRNumber", $reviewPathForPrompt + $rawWorkflowPrompt = $rawWorkflowPrompt -replace 'Generated Files/prReview/\{\{pr_number\}\}', $reviewPathForPrompt + return @" +You are running an automated pull-request review workflow for PR #$PRNumber. +Execute the workflow below exactly and write outputs to $reviewPathForPrompt/. + +$rawWorkflowPrompt +"@ + } + + return @" +Follow exactly what at $_cfgDir/skills/pr-review/references/review-pr.prompt.md to do with PR #$PRNumber. +Post findings with severity >= $MinSev as PR review comments via GitHub MCP. +"@ +} + +# ── build job definitions ──────────────────────────────────────────────── + +Info "Repository root: $repoRoot" +Info "Review output root: $reviewRoot" +Info "CLI type: $CLIType" +Info "Max concurrent: $MaxConcurrent" +Info "Min severity for comments: $MinSeverity" +Info "Inactivity timeout (seconds): $InactivityTimeoutSeconds" +Info "Max retry count: $MaxRetryCount" +Info "Prompt mode: $PromptMode" + +# Build PR list, skip completed reviews +$prsToProcess = @($PRNumbers | Where-Object { $_ } | + ForEach-Object { [int]$_ } | Sort-Object -Unique) + +if (-not $Force -and $prsToProcess.Count -gt 0) { + $beforeCount = $prsToProcess.Count + $prsToProcess = @($prsToProcess | Where-Object { -not (Test-ReviewExists -PRNumber $_) }) + $skippedCount = $beforeCount - $prsToProcess.Count + if ($skippedCount -gt 0) { + Info "Skipped $skippedCount PRs with existing reviews (use -Force to redo)" + } +} + +if ($prsToProcess.Count -eq 0) { + Warn "No PRs to review." + return +} + +Info "`nPRs to review:" +Info ("-" * 80) +foreach ($pr in $prsToProcess) { + Info (" #{0,-6} https://github.com/microsoft/PowerToys/pull/{0}" -f $pr) +} +Info ("-" * 80) + +if ($DryRun) { + Warn "`nDry run mode - no reviews will be executed." + return +} + +# Resolve CLI executable +$copilotExe = if ($CLIType -eq 'copilot') { Get-CopilotExecutablePath } else { $null } +$mcpConfigPath = Join-Path $repoRoot "$_cfgDir/skills/pr-review/references/mcp-config.json" + +$jobDefs = @(foreach ($pr in $prsToProcess) { + $reviewPath = Join-Path $reviewRoot "$pr" + $prompt = New-ReviewPrompt -PRNumber $pr -ReviewDir $reviewPath -MinSev $MinSeverity -Mode $PromptMode + $flatPrompt = ($prompt -replace "[\r\n]+", ' ').Trim() + + # Write initial in-progress .signal + New-Item -ItemType Directory -Path $reviewPath -Force | Out-Null + $now = (Get-Date).ToString("o") + $signalPath = Join-Path $reviewPath '.signal' + [ordered]@{ + status = "in-progress" + prNumber = $pr + totalSteps = 13 + completedSteps = @() + skippedSteps = @() + lastStep = $null + lastUpdated = $now + startedAt = $now + } | ConvertTo-Json -Depth 3 | Set-Content $signalPath -Force + + if ($CLIType -eq 'copilot') { + $cliArgs = @('-p', $flatPrompt, '--yolo', '--no-custom-instructions', '--agent', 'ReviewPR') + if (-not $DisableMcpConfig) { + $cliArgs = @('--additional-mcp-config', "@$mcpConfigPath") + $cliArgs + } + if ($Model) { $cliArgs += @('--model', $Model) } + $logFile = Join-Path $reviewPath "_copilot-review.log" + + @{ + Label = "copilot-pr-$pr" + ExecutionParameters = @{ + JobName = "copilot-pr-$pr" + Command = $copilotExe + Arguments = $cliArgs + WorkingDir = $repoRoot + OutputDir = $reviewPath + LogPath = $logFile + } + MonitorFiles = @($logFile) + CleanupTask = $null + } + } + else { + $debugFile = Join-Path $reviewPath "_claude-debug.log" + $logFile = Join-Path $reviewPath "_claude-review.log" + $cliArgs = @('-p', $flatPrompt, '--dangerously-skip-permissions', '--agent', 'ReviewPR', + '--debug', 'all', '--debug-file', $debugFile) + + @{ + Label = "claude-pr-$pr" + ExecutionParameters = @{ + JobName = "claude-pr-$pr" + Command = 'claude' + Arguments = $cliArgs + WorkingDir = $repoRoot + OutputDir = $reviewPath + LogPath = $logFile + } + MonitorFiles = @($debugFile) + CleanupTask = { + param($Tracker) + $outDir = $Tracker.ExecutionParameters.OutputDir + $dbg = Join-Path $outDir '_claude-debug.log' + if (Test-Path $dbg) { + $fi = [System.IO.FileInfo]::new($dbg) + if ($fi.Length -gt 0) { + $sizeMB = [math]::Round($fi.Length / 1MB, 1) + Remove-Item $dbg -Force + Write-Host "[$($Tracker.Label)] Cleaned debug log (${sizeMB} MB)" + } + } + # Claude CLI auto-creates a 0-byte 'latest' marker file — remove it. + $latest = Join-Path $outDir 'latest' + if (Test-Path $latest) { Remove-Item $latest -Force } + } + } + } +}) + +Info "`nBuilt $($jobDefs.Count) job definition(s):" +$jobDefs | ForEach-Object { Info " $($_.Label)" } + +# ── run orchestrator ───────────────────────────────────────────────────── + +$orchestratorPath = Join-Path $scriptDir '..\..\parallel-job-orchestrator\scripts\Invoke-SimpleJobOrchestrator.ps1' + +# CRITICAL: Lower ErrorActionPreference before calling the orchestrator. +# The orchestrator must run under 'Continue' so its monitoring loop survives +# transient errors. +$savedEAP = $ErrorActionPreference +$ErrorActionPreference = 'Continue' + +$results = & $orchestratorPath ` + -JobDefinitions $jobDefs ` + -MaxConcurrent $MaxConcurrent ` + -InactivityTimeoutSeconds $InactivityTimeoutSeconds ` + -MaxRetryCount $MaxRetryCount ` + -PollIntervalSeconds $PollIntervalSeconds ` + -LogDir $reviewRoot + +$ErrorActionPreference = $savedEAP + +# ── process results ────────────────────────────────────────────────────── + +# A job is only truly successful if it completed AND produced review output. +# The orchestrator marks jobs as 'Completed' when the process exits, but +# a fast crash (e.g. model unavailable) also exits cleanly with exit code 1. +$succeeded = @($results | Where-Object { + if ($_.Status -ne 'Completed') { return $false } + if ($_.Label -notmatch '(\d+)$') { return $false } + $prDir = Join-Path $reviewRoot $Matches[1] + $overview = Join-Path $prDir '00-OVERVIEW.md' + return (Test-Path $overview) +}) +$failed = @($results | Where-Object { + $_ -notin $succeeded +}) + +# Write final signal files +foreach ($r in $results) { + # Extract PR number from label (e.g. "copilot-pr-45601" → 45601) + if ($r.Label -notmatch '(\d+)$') { continue } + $prNum = [int]$Matches[1] + $prDir = Join-Path $reviewRoot "$prNum" + $signalPath = Join-Path $prDir '.signal' + $isFailed = $r -in $failed + + # Discover completed step files + $stepFiles = @() + $skippedSteps = @() + if (Test-Path $prDir) { + $mdFiles = Get-ChildItem -Path $prDir -Filter '*.md' -ErrorAction SilentlyContinue + $stepFiles = @($mdFiles | Where-Object { $_.Name -match '^\d{2}-' } | + ForEach-Object { $_.BaseName }) + $overviewPath = Join-Path $prDir '00-OVERVIEW.md' + if (Test-Path $overviewPath) { + $overviewText = Get-Content $overviewPath -Raw -ErrorAction SilentlyContinue + $skippedSteps = @([regex]::Matches($overviewText, '(\d{2}-[\w-]+)\s*.*Skipped') | + ForEach-Object { $_.Groups[1].Value }) + } + } + + $now = (Get-Date).ToString("o") + $startedAt = $now + if (Test-Path $signalPath) { + try { + $existing = Get-Content $signalPath -Raw | ConvertFrom-Json + if ($existing.startedAt) { $startedAt = $existing.startedAt } + } catch { } + } + + [ordered]@{ + status = if ($isFailed) { "failure" } else { "success" } + prNumber = $prNum + totalSteps = 13 + completedSteps = $stepFiles + skippedSteps = $skippedSteps + lastStep = if ($stepFiles.Count -gt 0) { $stepFiles[-1] } else { $null } + lastUpdated = $now + startedAt = $startedAt + timestamp = $now + retryCount = $r.RetryCount + } | ConvertTo-Json -Depth 3 | Set-Content $signalPath -Force +} + +# ── summary ────────────────────────────────────────────────────────────── + +Info "`n$("=" * 80)" +Info "PR REVIEW COMPLETE" +Info ("=" * 80) +Info "Total jobs: $($results.Count)" + +if ($succeeded.Count -gt 0) { + Success "Succeeded: $($succeeded.Count)" + foreach ($r in $succeeded) { Success " $($r.Label) (retries: $($r.RetryCount))" } +} + +if ($failed.Count -gt 0) { + Err "Had issues: $($failed.Count)" + foreach ($r in $failed) { Err " $($r.Label) — status: $($r.Status), retries: $($r.RetryCount)" } +} + +Info "`nReview files location: $OutputRoot/<PR_NUMBER>/" +Info "Absolute output path: $reviewRoot" + +# Display per-job results table +$results | Format-Table Label, Status, JobState, ExitCode, RetryCount, LogPath -AutoSize + +Info ("=" * 80) + +return $results diff --git a/.github/skills/pr-review/scripts/Test-IncrementalReview.ps1 b/.github/skills/pr-review/scripts/Test-IncrementalReview.ps1 new file mode 100644 index 000000000000..b03bbe7cd59b --- /dev/null +++ b/.github/skills/pr-review/scripts/Test-IncrementalReview.ps1 @@ -0,0 +1,170 @@ +<# +.SYNOPSIS + Tests and previews incremental review detection for a pull request. + +.DESCRIPTION + This helper script validates the incremental review detection logic by analyzing an existing + PR review folder. It reads the last reviewed SHA from the overview file, compares it with + the current PR state, and displays detailed information about what has changed. + + This is useful for: + - Testing the incremental review system before running a full review + - Understanding what changed since the last review iteration + - Verifying that review metadata was properly recorded + +.PARAMETER PullRequestNumber + The pull request number to test incremental review detection for. + +.PARAMETER RepositoryOwner + The GitHub repository owner. Defaults to "microsoft". + +.PARAMETER RepositoryName + The GitHub repository name. Defaults to "PowerToys". + +.OUTPUTS + Colored console output displaying: + - Current and last reviewed commit SHAs + - Whether incremental review is possible + - List of new commits since last review + - List of changed files with status indicators + - Recommended review strategy + +.EXAMPLE + .\Test-IncrementalReview.ps1 -PullRequestNumber 42374 + Tests incremental review detection for PR #42374. + +.EXAMPLE + .\Test-IncrementalReview.ps1 -PullRequestNumber 42374 -RepositoryOwner "myorg" -RepositoryName "myrepo" + Tests incremental review for a PR in a different repository. + +.NOTES + Requires GitHub CLI (gh) to be installed and authenticated. + Run 'gh auth login' if not already authenticated. + + Prerequisites: + - PR review folder must exist at "Generated Files\prReview\{PRNumber}" + - 00-OVERVIEW.md must exist in the review folder + - For incremental detection, overview must contain "Last reviewed SHA" metadata + +.LINK + https://cli.github.com/ +#> + +[CmdletBinding()] +param( + [Parameter(Mandatory = $true, HelpMessage = "Pull request number to test")] + [int]$PullRequestNumber, + + [Parameter(Mandatory = $false, HelpMessage = "Repository owner")] + [string]$RepositoryOwner = "microsoft", + + [Parameter(Mandatory = $false, HelpMessage = "Repository name")] + [string]$RepositoryName = "PowerToys" +) + +# Resolve paths to review folder and overview file +$repositoryRoot = Split-Path (Split-Path $PSScriptRoot -Parent) -Parent +$reviewFolderPath = Join-Path $repositoryRoot "Generated Files\prReview\$PullRequestNumber" +$overviewFilePath = Join-Path $reviewFolderPath "00-OVERVIEW.md" + +Write-Host "=== Testing Incremental Review for PR #$PullRequestNumber ===" -ForegroundColor Cyan +Write-Host "" + +# Check if review folder exists +if (-not (Test-Path $reviewFolderPath)) { + Write-Host "❌ Review folder not found: $reviewFolderPath" -ForegroundColor Red + Write-Host "This appears to be a new review (iteration 1)" -ForegroundColor Yellow + exit 0 +} + +# Check if overview file exists +if (-not (Test-Path $overviewFilePath)) { + Write-Host "❌ Overview file not found: $overviewFilePath" -ForegroundColor Red + Write-Host "This appears to be an incomplete review" -ForegroundColor Yellow + exit 0 +} + +# Read overview file and extract last reviewed SHA +Write-Host "📄 Reading overview file..." -ForegroundColor Green +$overviewFileContent = Get-Content $overviewFilePath -Raw + +if ($overviewFileContent -match '\*\*Last reviewed SHA:\*\*\s+(\w+)') { + $lastReviewedSha = $Matches[1] + Write-Host "✅ Found last reviewed SHA: $lastReviewedSha" -ForegroundColor Green +} else { + Write-Host "⚠️ No 'Last reviewed SHA' found in overview - this may be an old format" -ForegroundColor Yellow + Write-Host "Proceeding without incremental detection (full review will be needed)" -ForegroundColor Yellow + exit 0 +} + +Write-Host "" +Write-Host "🔍 Running incremental change detection..." -ForegroundColor Cyan + +# Call the incremental changes detection script +$incrementalChangesScriptPath = Join-Path $PSScriptRoot "Get-PrIncrementalChanges.ps1" +if (-not (Test-Path $incrementalChangesScriptPath)) { + Write-Host "❌ Script not found: $incrementalChangesScriptPath" -ForegroundColor Red + exit 1 +} + +try { + $analysisResult = & $incrementalChangesScriptPath ` + -PullRequestNumber $PullRequestNumber ` + -LastReviewedCommitSha $lastReviewedSha ` + -RepositoryOwner $RepositoryOwner ` + -RepositoryName $RepositoryName | ConvertFrom-Json + + # Display analysis results + Write-Host "" + Write-Host "=== Incremental Review Analysis ===" -ForegroundColor Cyan + Write-Host "Current HEAD SHA: $($analysisResult.CurrentHeadSha)" -ForegroundColor White + Write-Host "Last reviewed SHA: $($analysisResult.LastReviewedSha)" -ForegroundColor White + Write-Host "Base branch: $($analysisResult.BaseRefName)" -ForegroundColor White + Write-Host "Head branch: $($analysisResult.HeadRefName)" -ForegroundColor White + Write-Host "" + Write-Host "Is incremental? $($analysisResult.IsIncremental)" -ForegroundColor $(if ($analysisResult.IsIncremental) { "Green" } else { "Yellow" }) + Write-Host "Need full review? $($analysisResult.NeedFullReview)" -ForegroundColor $(if ($analysisResult.NeedFullReview) { "Yellow" } else { "Green" }) + Write-Host "" + Write-Host "Summary: $($analysisResult.Summary)" -ForegroundColor Cyan + Write-Host "" + + # Display new commits if any + if ($analysisResult.NewCommits -and $analysisResult.NewCommits.Count -gt 0) { + Write-Host "📝 New commits ($($analysisResult.NewCommits.Count)):" -ForegroundColor Green + foreach ($commit in $analysisResult.NewCommits) { + Write-Host " - $($commit.Sha): $($commit.Message)" -ForegroundColor Gray + } + Write-Host "" + } + + # Display changed files if any + if ($analysisResult.ChangedFiles -and $analysisResult.ChangedFiles.Count -gt 0) { + Write-Host "📁 Changed files ($($analysisResult.ChangedFiles.Count)):" -ForegroundColor Green + foreach ($file in $analysisResult.ChangedFiles) { + $statusDisplayColor = switch ($file.Status) { + "added" { "Green" } + "removed" { "Red" } + "modified" { "Yellow" } + "renamed" { "Cyan" } + default { "White" } + } + Write-Host " - [$($file.Status)] $($file.Filename) (+$($file.Additions)/-$($file.Deletions))" -ForegroundColor $statusDisplayColor + } + Write-Host "" + } + + # Suggest review strategy based on analysis + Write-Host "=== Recommended Review Strategy ===" -ForegroundColor Cyan + if ($analysisResult.NeedFullReview) { + Write-Host "🔄 Full review recommended" -ForegroundColor Yellow + } elseif ($analysisResult.IsIncremental -and ($analysisResult.ChangedFiles.Count -eq 0)) { + Write-Host "✅ No changes detected - no review needed" -ForegroundColor Green + } elseif ($analysisResult.IsIncremental) { + Write-Host "⚡ Incremental review possible - review only changed files" -ForegroundColor Green + Write-Host "💡 Consider applying smart step filtering based on file types" -ForegroundColor Cyan + } + +} catch { + Write-Host "❌ Error running incremental change detection: $_" -ForegroundColor Red + exit 1 +} diff --git a/.github/skills/pr-rework/SKILL.md b/.github/skills/pr-rework/SKILL.md new file mode 100644 index 000000000000..32fc65037c5e --- /dev/null +++ b/.github/skills/pr-rework/SKILL.md @@ -0,0 +1,248 @@ +--- +name: pr-rework +description: Iteratively rework pull requests to production quality using local worktrees. Use when asked to polish a PR, iterate on PR quality, rework a PR locally, fix and re-review a PR until clean, prepare PR for merge, loop review-fix on a PR, or bring multiple PRs to merge-ready state. Creates worktrees, runs pr-review locally (no GitHub posting), applies pr-fix for medium+ issues, builds and runs unit tests, and loops until no actionable findings remain. Supports multiple PRs in parallel with full crash-resume. +license: Complete terms in LICENSE.txt +--- + +# PR Rework Skill + +Iteratively rework pull requests to production quality entirely locally. Creates a worktree per PR, runs review → fix → build/test → re-review loops until the PR is clean, then asks the human to push. + +**Key difference from `pr-review` + `pr-fix`**: This skill keeps everything local — no comments posted, no pushes, no thread resolution. The human decides when to push. + +### Why a separate local-review prompt? + +The standard `pr-review` prompt (`review-pr.prompt.md`) fetches file content and patches from the **GitHub API** (`gh pr view`, `gh api .../pulls/N/files`, `Get-GitHubRawFile.ps1`). This works for remote reviews but **breaks in the rework loop**: after iteration 1 fixes files locally, the remote PR hasn't changed, so pr-review would re-fetch the same stale code and produce identical findings forever. + +`rework-local-review.prompt.md` uses `git diff main` (two-dot) and local file reads instead, so it always sees the latest worktree state including uncommitted fix changes. Two-dot diff is critical: three-dot (`main...HEAD`) only shows committed changes and would miss uncommitted fixes from previous iterations. It reuses the same per-step checklists (01-functionality through 13) from pr-review. + +## Skill Contents + +``` +.github/skills/pr-rework/ +├── SKILL.md # This file +├── LICENSE.txt # MIT License +├── references/ +│ ├── mcp-config.json # MCP configuration +│ ├── rework-local-review.prompt.md # AI prompt for LOCAL review (git diff, no GitHub API) +│ └── rework-fix.prompt.md # AI prompt for local fix pass +└── scripts/ + ├── Start-PRRework.ps1 # Main single-PR orchestrator (review→fix→test loop) + ├── Start-PRReworkParallel.ps1 # Multi-PR parallel launcher + ├── Get-PRReworkStatus.ps1 # Check rework state for all PRs + └── IssueReviewLib.ps1 # Shared helpers (copy) +``` + +## Output Directory + +All artifacts are placed under `Generated Files/prRework/<pr-number>/` (gitignored). + +``` +Generated Files/prRework/ +└── <pr-number>/ + ├── .state.json # Resumable state (iteration, phase, worktree path) + ├── worktree-info.json # Worktree path + branch mapping + ├── iteration-1/ + │ ├── review/ # pr-review output (00-OVERVIEW.md, step files) + │ ├── findings.json # Parsed medium+ findings from review + │ ├── fix.log # Copilot CLI fix output + │ ├── build.log # Build output + │ └── test.log # Unit test output + ├── iteration-2/ + │ └── ... # Same structure per iteration + ├── summary.md # Final human-readable summary of all changes + └── .signal # Completion signal for orchestrator +``` + +## Signal File + +```json +{ + "status": "success", + "prNumber": 45365, + "timestamp": "2026-02-10T10:05:23Z", + "iterations": 3, + "finalFindingsCount": 0, + "worktreePath": "Q:/PowerToys-ab12" +} +``` + +Status values: `success` (no findings remain), `max-iterations` (hit limit but improved), `failure` + +## State File (`.state.json`) — Crash Resume + +```json +{ + "prNumber": 45365, + "branch": "feature/my-pr", + "worktreePath": "Q:/PowerToys-ab12", + "currentIteration": 2, + "currentPhase": "fix", + "maxIterations": 5, + "phaseHistory": [ + { "iteration": 1, "phase": "review", "status": "done", "timestamp": "..." }, + { "iteration": 1, "phase": "fix", "status": "done", "findingsFixed": 4, "timestamp": "..." }, + { "iteration": 1, "phase": "build", "status": "done", "exitCode": 0, "timestamp": "..." }, + { "iteration": 1, "phase": "test", "status": "done", "passed": 42, "failed": 0, "timestamp": "..." }, + { "iteration": 2, "phase": "review", "status": "done", "timestamp": "..." }, + { "iteration": 2, "phase": "fix", "status": "in-progress", "timestamp": "..." } + ], + "startedAt": "2026-02-10T10:00:00Z", + "lastUpdatedAt": "2026-02-10T10:15:00Z" +} +``` + +On resume, the script reads `.state.json`, finds the last `in-progress` phase, and restarts from there. + +## When to Use This Skill + +- Polish a PR before requesting human review +- Iterate review/fix cycles locally without posting to GitHub +- Bring multiple PRs to merge-ready quality in parallel +- Prepare PRs for final human sign-off +- Run quality gate loop: review → fix → build → test → repeat + +## Prerequisites + +- GitHub CLI (`gh`) installed and authenticated +- Copilot CLI or Claude CLI installed +- PowerShell 7+ +- PR must be open (not draft) +- `tools/build/WorktreeLib.ps1` available (for worktree management) + +## Required Variables + +| Variable | Description | Example | +|----------|-------------|---------| +| `{{PRNumbers}}` | One or more PR numbers to rework | `45365, 45366` | + +## Workflow + +### Single PR + +```powershell +# Rework a single PR with default settings +.github/skills/pr-rework/scripts/Start-PRRework.ps1 -PRNumber 45365 -CLIType copilot -Force + +# With model override and custom max iterations +.github/skills/pr-rework/scripts/Start-PRRework.ps1 -PRNumber 45365 -CLIType copilot -Model claude-opus-4.6 -MaxIterations 5 -Force +``` + +### Multiple PRs in Parallel + +```powershell +# Rework 3 PRs with throttle limit +.github/skills/pr-rework/scripts/Start-PRReworkParallel.ps1 -PRNumbers 45365,45366,45367 -CLIType copilot -Model claude-opus-4.6 -ThrottleLimit 2 -Force +``` + +### Check Status + +```powershell +# See rework state for all PRs +.github/skills/pr-rework/scripts/Get-PRReworkStatus.ps1 +``` + +### Resume After Crash + +The same command resumes from where it left off (reads `.state.json`): + +```powershell +# Automatically resumes from last checkpoint +.github/skills/pr-rework/scripts/Start-PRRework.ps1 -PRNumber 45365 -CLIType copilot -Force +``` + +Use `-Fresh` to discard previous state and start over: + +```powershell +.github/skills/pr-rework/scripts/Start-PRRework.ps1 -PRNumber 45365 -CLIType copilot -Fresh -Force +``` + +## Loop Logic + +``` +Phase 0 — BUILD ESSENTIALS: One-time tools/build/build-essentials.cmd (NuGet restore) + +for each iteration (1..MaxIterations): + Phase 1 — REVIEW: Run pr-review locally (git diff main, no GitHub API) + Phase 2 — PARSE: Extract medium+ severity findings → findings.json + Phase 3 — CHECK: If 0 actionable findings → DONE (success) + Phase 4 — FIX: Run Copilot CLI with rework-fix.prompt.md in worktree + Phase 5 — BUILD: Run tools/build/build.cmd for ALL changed projects + Phase 6 — TEST: Discover & run related unit tests + → next iteration (build/test failures fed as context to next fix) + +FINAL VERIFICATION — One extra review-only pass after last iteration: + If 0 findings → DONE (success) + Otherwise → status max-iterations with remaining count +``` + +Key details: +- **Two-dot diff** (`git diff main`) used everywhere — includes uncommitted fix changes +- **Multi-project build** — ALL changed `.csproj`/`.vcxproj` directories are built, not just the first +- **Cross-iteration error feedback** — build/test failures from iteration N are fed to iteration N+1's fix prompt +- **Final verification** — prevents silent regressions from the last fix pass + +## CLI Options + +### Start-PRRework.ps1 + +| Parameter | Description | Default | +|-----------|-------------|---------| +| `-PRNumber` | PR number to rework | (required) | +| `-CLIType` | `copilot` or `claude` | `copilot` | +| `-Model` | Copilot CLI model override | `claude-opus-4.6` | +| `-MaxIterations` | Max review/fix loops | `5` | +| `-MinSeverity` | Minimum severity to fix: `high`, `medium`, `low` | `medium` | +| `-ReviewTimeoutMin` | Timeout for review CLI call (minutes) | `10` | +| `-FixTimeoutMin` | Timeout for fix CLI call (minutes) | `15` | +| `-Force` | Skip confirmation prompts | `false` | +| `-Fresh` | Discard previous state, start over | `false` | +| `-SkipTests` | Skip unit test phase | `false` | + +### Start-PRReworkParallel.ps1 + +| Parameter | Description | Default | +|-----------|-------------|---------| +| `-PRNumbers` | Array of PR numbers | (required) | +| `-ThrottleLimit` | Max concurrent rework jobs | `2` | +| `-CLIType` | `copilot` or `claude` | `copilot` | +| `-Model` | Copilot CLI model override | `claude-opus-4.6` | +| `-MaxIterations` | Max loops per PR | `5` | +| `-MinSeverity` | Minimum severity to fix | `medium` | +| `-ReviewTimeoutMin` | Timeout for review CLI call (minutes) | `10` | +| `-FixTimeoutMin` | Timeout for fix CLI call (minutes) | `15` | +| `-Force` | Skip confirmation | `false` | +| `-Fresh` | Start all PRs fresh | `false` | +| `-SkipTests` | Skip unit test phase | `false` | + +## Timeout Handling + +Each Copilot CLI invocation has a process-level timeout (configurable, default 10 min for review, 15 min for fix). If the CLI hangs: +1. The process is killed after timeout +2. The phase is marked `timeout` in `.state.json` +3. On resume, the timed-out phase is retried + +## Integration with Other Skills + +| Skill | Integration | +|-------|-------------| +| `pr-review` | Review prompt files are reused; output goes to local iteration folder instead of `Generated Files/prReview/` | +| `pr-fix` | Fix prompt is adapted for local-only operation (no thread resolution, no push) | +| `issue-to-pr-cycle` | Can invoke `pr-rework` as a post-fix quality gate | + +## Quality Gate: Build + Test + +After every fix pass, the script: + +1. **Builds** the changed projects: + - Detects changed `.csproj`/`.vcxproj` files from `git diff` + - Runs `tools/build/build.cmd` scoped to those projects + - Checks exit code 0 + +2. **Runs unit tests**: + - Discovers test projects by product code prefix + - Looks for `*UnitTests` sibling folders + - Runs via `dotnet vstest` + - Reports pass/fail count + +If build or tests fail, the failure details are fed back to the next fix iteration. diff --git a/.github/skills/pr-rework/references/mcp-config.json b/.github/skills/pr-rework/references/mcp-config.json new file mode 100644 index 000000000000..5af15d54218c --- /dev/null +++ b/.github/skills/pr-rework/references/mcp-config.json @@ -0,0 +1,9 @@ +{ + "mcpServers": { + "github-artifacts": { + "command": "cmd", + "args": ["/c", "for /f %i in ('git rev-parse --show-toplevel') do node %i/tools/mcp/github-artifacts/launch.js"], + "tools": ["*"] + } + } +} diff --git a/.github/skills/pr-rework/references/rework-fix.prompt.md b/.github/skills/pr-rework/references/rework-fix.prompt.md new file mode 100644 index 000000000000..86c1cd450d9a --- /dev/null +++ b/.github/skills/pr-rework/references/rework-fix.prompt.md @@ -0,0 +1,107 @@ +--- +description: 'Fix PR review findings locally without posting or pushing' +name: 'rework-fix' +agent: 'agent' +argument-hint: 'PR number and findings file' +--- + +# Rework Fix — Local PR Quality Pass + +## Mission +Address review findings from a local pr-review pass. Apply targeted code fixes, then build and run unit tests. All changes stay local — no commits, no pushes, no GitHub comment posting. + +## Scope & Preconditions +- You are working in a git worktree checked out to the PR branch. +- A findings file (JSON) lists the issues to address, each with severity, file, line, and description. +- Only implement fixes for findings with severity >= the configured minimum. +- Do NOT commit, push, post comments, or resolve GitHub threads. + +## Inputs +- Required: `${input:pr_number}` — PR number for context +- Required: `${input:findings_file}` — Path to `findings.json` with parsed review findings +- Optional: `${input:build_errors}` — Path to build error log from a previous failed build +- Optional: `${input:test_failures}` — Path to test failure log from a previous failed test run +- Optional: `${input:min_severity}` — Minimum severity to fix (`high`, `medium`, `low`). Default: `medium` + +## Findings JSON Schema +```json +[ + { + "id": "F-001", + "step": "01-functionality", + "severity": "high", + "file": "src/modules/Foo/Bar.cs", + "line": 42, + "endLine": 50, + "title": "Null reference in error path", + "description": "The catch block accesses `result.Value` without null check...", + "suggestedFix": "Add null guard before accessing .Value" + } +] +``` + +## Workflow + +### Phase 1: Understand Context +1. Read the PR diff to understand overall changes: `git diff origin/main --stat` + (Use two-dot `git diff origin/main`, NOT three-dot `origin/main...HEAD` — changes may be uncommitted. + Always use `origin/main` instead of `main` to avoid stale local refs.) +2. Read the findings file to understand what needs fixing. +3. If build errors or test failures are provided, read those too — they take priority. + +### Phase 2: Fix Build Errors (if any) +If `${input:build_errors}` is provided: +1. Read the build error log. +2. Fix each compilation error. +3. These take absolute priority over review findings. + +### Phase 3: Fix Test Failures (if any) +If `${input:test_failures}` is provided: +1. Read the test failure log. +2. Fix each failing test — either fix the code under test or update the test if the new behavior is intentional. +3. Test failures take priority over review findings (except build errors). + +### Phase 4: Fix Review Findings +For each finding in `${input:findings_file}` with severity >= `${input:min_severity}`: +1. Read the target file and understand the context around the reported line. +2. Determine the appropriate fix: + - **Simple fix**: Apply the code change directly. + - **Complex refactor**: Write a brief note explaining why it was deferred (do not change code). +3. Apply the fix with minimal edits — do not refactor surrounding code. +4. Track what was fixed and what was deferred. + +### Phase 5: Build Verification +1. Identify changed project files: `git diff --name-only | Select-String '\.(csproj|vcxproj)$'` +2. If specific projects changed, build them: `tools/build/build.cmd -Path <project-dir>` +3. Otherwise, build from the changed source directories. +4. Check exit code — 0 means success. +5. If build fails, read the error log and fix the issues, then rebuild. +6. Repeat up to 3 build-fix attempts. + +### Phase 6: Unit Test Verification +1. Find test projects related to the changed code: + - Look for sibling or nearby `*UnitTests` or `*Tests` projects. + - Match by product code prefix (e.g., changes to `FancyZones` → look for `FancyZonesUnitTests`). +2. Build the test project if found. +3. Run the tests using `vstest.console.exe` or the test runner available. +4. If tests fail, analyze failures and fix — either the production code or the test expectations. +5. Repeat up to 2 test-fix attempts. + +### Phase 7: Summary +Write a brief summary to stdout listing: +- Findings fixed (with IDs) +- Findings deferred (with reasons) +- Build result (pass/fail) +- Test result (pass/fail/skipped) + +## Output Expectations +- Code changes applied in the worktree (not committed). +- Build passes (exit code 0). +- Unit tests pass (or no test project found). +- No commits, no pushes, no GitHub API calls. + +## Quality Rules +- Follow existing code style (`.editorconfig`, `.clang-format`, XamlStyler). +- Do not introduce new warnings. +- Do not add noisy logging in hot paths. +- Keep changes minimal and targeted to the finding. diff --git a/.github/skills/pr-rework/references/rework-local-review.prompt.md b/.github/skills/pr-rework/references/rework-local-review.prompt.md new file mode 100644 index 000000000000..b81d2c645ab5 --- /dev/null +++ b/.github/skills/pr-rework/references/rework-local-review.prompt.md @@ -0,0 +1,174 @@ +--- +description: 'Perform a local-only PR review using git diff (no GitHub API for file content)' +name: 'rework-local-review' +agent: 'agent' +argument-hint: 'PR number, output directory' +--- + +# Local PR Review (Worktree-Based) + +**Goal**: Review code changes in a local worktree using `git diff` as the data source. +Write per-step Markdown files with machine-readable finding blocks. + +> **Key difference from `review-pr.prompt.md`**: This prompt reads code from the +> local worktree via `git diff` and `cat`/`Get-Content`, NOT from GitHub API. +> It does NOT post comments, resolve threads, or call any GitHub API. + +## Inputs +- `${input:pr_number}` — PR number (for labeling only) +- `${input:output_dir}` — Directory to write step files into +- `${input:iteration}` — Iteration number for this review cycle +- `${input:previous_findings}` — (optional) Path to previous iteration's `findings.json` + +## How to get the changed files + +**USE THESE LOCAL COMMANDS — NOT GitHub API:** + +```bash +# Summary of what changed (origin/main vs working tree, includes uncommitted fixes) +git diff origin/main --stat + +# Full diff for review (includes uncommitted changes) +git diff origin/main + +# List only changed file names +git diff origin/main --name-only + +# Diff for a specific file +git diff origin/main -- path/to/file.cs + +# Read the current file content (working tree version, latest) +cat path/to/file.cs # or Get-Content path/to/file.cs + +# Read the base version for comparison +git show origin/main:path/to/file.cs +``` + +> **Why `origin/main` instead of `main`?** +> The local `main` ref may be stale (not fetched recently). Using `origin/main` +> ensures we always diff against the latest remote main. + +> **Why two-dot diff and NOT three-dot (`origin/main...HEAD`)?** +> After each fix iteration, changes are left uncommitted in the working tree. +> Three-dot diff only shows committed changes and would miss the fixes. +> Two-dot diff compares origin/main directly against the working tree, which always +> reflects the latest state. + +**NEVER USE:** +- `gh pr view` / `gh api` for fetching file content or patches +- `Get-GitHubRawFile.ps1` or `Get-GitHubPrFilePatch.ps1` +- `Get-PrIncrementalChanges.ps1` +- Any `https://raw.githubusercontent.com/` URLs + +## Output files +Folder: `${input:output_dir}/` + +Write each step file immediately after completing the step. Generate `00-OVERVIEW.md` last. + +## Smart step filtering +Determine which steps to run based on changed file types: + +| File pattern | Required steps | Skippable steps | +| --- | --- | --- | +| `*.cs`, `*.cpp`, `*.h` | 01-Functionality, 02-Compatibility, 03-Performance, 05-Security, 09-SOLID, 10-Repo patterns, 12-Code comments | — | +| `*.resx`, `Resources/*.xaml` | 06-Localization, 07-Globalization | Most others | +| `*.md` (docs only) | 11-Docs & automation | Most others | +| `*copilot*.md`, `.github/prompts/*.md` | 13-Copilot guidance, 11-Docs & automation | Most others | +| `*.csproj`, `*.vcxproj`, `packages.config` | 02-Compatibility, 05-Security, 10-Repo patterns | 06, 07, 04 | +| `UI/**`, `*View.xaml` | 04-Accessibility, 06-Localization | 03 (unless perf-sensitive) | + +Default: run all applicable steps when unsure. + +## Review steps +Use the same checklists from the pr-review skill step prompt files. For each step: + +1. Read the relevant checklist from `.github/skills/pr-review/references/NN-<step>.prompt.md` +2. Analyze the local diff (`git diff origin/main`) against that checklist +3. Write findings to `${input:output_dir}/NN-<step>.md` + +| Step | Checklist source | Output file | +| --- | --- | --- | +| 01 | `.github/skills/pr-review/references/01-functionality.prompt.md` | `01-functionality.md` | +| 02 | `.github/skills/pr-review/references/02-compatibility.prompt.md` | `02-compatibility.md` | +| 03 | `.github/skills/pr-review/references/03-performance.prompt.md` | `03-performance.md` | +| 04 | `.github/skills/pr-review/references/04-accessibility.prompt.md` | `04-accessibility.md` | +| 05 | `.github/skills/pr-review/references/05-security.prompt.md` | `05-security.md` | +| 06 | `.github/skills/pr-review/references/06-localization.prompt.md` | `06-localization.md` | +| 07 | `.github/skills/pr-review/references/07-globalization.prompt.md` | `07-globalization.md` | +| 08 | `.github/skills/pr-review/references/08-extensibility.prompt.md` | `08-extensibility.md` | +| 09 | `.github/skills/pr-review/references/09-solid-design.prompt.md` | `09-solid-design.md` | +| 10 | `.github/skills/pr-review/references/10-repo-patterns.prompt.md` | `10-repo-patterns.md` | +| 11 | `.github/skills/pr-review/references/11-docs-automation.prompt.md` | `11-docs-automation.md` | +| 12 | `.github/skills/pr-review/references/12-code-comments.prompt.md` | `12-code-comments.md` | +| 13 | `.github/skills/pr-review/references/13-copilot-guidance.prompt.md` | `13-copilot-guidance.md` | + +## Incremental review (iteration 2+) +When `${input:previous_findings}` is provided: + +1. Read the previous findings JSON to understand what was already flagged. +2. Check if those findings have been fixed in the current code (via `git diff`). +3. Focus review effort on: + - Files that were modified since the last iteration (new uncommitted changes) + - Areas adjacent to previous findings + - Any new issues introduced by fixes +4. In each step file, note which previous findings are now resolved. + +To detect local changes since the last fix pass: +```bash +# Show uncommitted changes (what the fix pass modified) +git diff --name-only + +# Show uncommitted changes with stat +git diff --stat +``` + +## Finding format +Use `mcp-review-comment` blocks for machine-readable findings (same format as pr-review): + +````md +```mcp-review-comment +{ + "file": "src/modules/Foo/Bar.cs", + "line": 42, + "endLine": 50, + "severity": "high", + "title": "Null reference in error path", + "body": "Problem → Why it matters → Concrete fix suggestion.", + "suggestedFix": "Add null guard before accessing .Value", + "tags": ["functionality", "pr-${input:pr_number}"] +} +``` +```` + +Severity levels: +- **high**: Code doesn't work, crashes, data loss, security vulnerability +- **medium**: Edge cases broken, degraded experience, incomplete implementation +- **low**: Minor issues, suboptimal but working, style concerns +- **info**: Suggestions, not blocking + +## Overview file template (`00-OVERVIEW.md`) +```md +# Local Review — PR #${input:pr_number} +**Review iteration:** ${input:iteration} +**Changed files:** <count from git diff --name-only main> +**High severity issues:** <count> + +## Review mode +Local worktree review (no GitHub API) + +## Step results +01 Functionality — <OK|Issues|Skipped> +02 Compatibility — <OK|Issues|Skipped> +...through 13 + +## Findings summary +| ID | Severity | File | Line | Title | +|----|----------|------|------|-------| +| F-001 | high | src/... | 42 | ... | +``` + +## Constraints +- **Read-only review** — do NOT modify any code +- **No GitHub API** — all data comes from local git commands +- **No posting** — do NOT post comments to GitHub +- **No MCP comment blocks execution** — write them in files for parsing, but do not execute them diff --git a/.github/skills/pr-rework/scripts/Get-PRReworkStatus.ps1 b/.github/skills/pr-rework/scripts/Get-PRReworkStatus.ps1 new file mode 100644 index 000000000000..a4da64c0b949 --- /dev/null +++ b/.github/skills/pr-rework/scripts/Get-PRReworkStatus.ps1 @@ -0,0 +1,193 @@ +<# +.SYNOPSIS + Display status of all pr-rework sessions from Generated Files. + +.DESCRIPTION + Scans Generated Files/prRework/ directories and reads .state.json and + .signal files to show a table of PR rework progress. + +.PARAMETER PRNumber + Optional: show status for a specific PR only. + +.PARAMETER Detailed + Show full phase history for each PR. + +.EXAMPLE + ./Get-PRReworkStatus.ps1 + +.EXAMPLE + ./Get-PRReworkStatus.ps1 -PRNumber 45365 -Detailed +#> +[CmdletBinding()] +param( + [int]$PRNumber, + + [switch]$Detailed +) + +$scriptDir = Split-Path -Parent $MyInvocation.MyCommand.Path +. (Join-Path $scriptDir 'IssueReviewLib.ps1') +$repoRoot = Get-RepoRoot + +$prReworkRoot = Join-Path $repoRoot 'Generated Files' 'prRework' + +if (-not (Test-Path $prReworkRoot)) { + Write-Host "No pr-rework data found at: $prReworkRoot" -ForegroundColor Yellow + return +} + +$dirs = Get-ChildItem -Path $prReworkRoot -Directory +if ($PRNumber) { + $dirs = $dirs | Where-Object { $_.Name -eq "$PRNumber" } +} + +if ($dirs.Count -eq 0) { + Write-Host "No pr-rework sessions found$(if ($PRNumber) { " for PR #$PRNumber" })." -ForegroundColor Yellow + return +} + +$results = foreach ($dir in $dirs) { + $stateFile = Join-Path $dir.FullName '.state.json' + $signalFile = Join-Path $dir.FullName '.signal' + + $prNum = $dir.Name + + # Read state + $state = $null + if (Test-Path $stateFile) { + try { $state = Get-Content $stateFile -Raw | ConvertFrom-Json } catch {} + } + + # Read signal + $signal = $null + if (Test-Path $signalFile) { + try { $signal = Get-Content $signalFile -Raw | ConvertFrom-Json } catch {} + } + + # Determine status + $status = 'unknown' + if ($signal) { + $status = $signal.status + } elseif ($state) { + $status = "iter-$($state.currentIteration)/$($state.maxIterations) ($($state.currentPhase))" + } + + # Count iterations with data + $iterDirs = Get-ChildItem -Path $dir.FullName -Directory -Filter 'iteration-*' -ErrorAction SilentlyContinue + $iterCount = $iterDirs.Count + + # Get latest findings count + $latestFindings = 0 + if ($iterDirs.Count -gt 0) { + $latestIterDir = $iterDirs | Sort-Object Name | Select-Object -Last 1 + $findingsFile = Join-Path $latestIterDir.FullName 'findings.json' + if (Test-Path $findingsFile) { + try { + $findings = Get-Content $findingsFile -Raw | ConvertFrom-Json + $latestFindings = $findings.Count + } catch {} + } + } + + # Worktree path + $worktreePath = '' + if ($state -and $state.worktreePath) { $worktreePath = $state.worktreePath } + + # Build result + [PSCustomObject]@{ + PR = $prNum + Status = $status + Iterations = $iterCount + Phase = if ($state) { $state.currentPhase } else { '-' } + Findings = $latestFindings + Branch = if ($state) { $state.branch } else { '-' } + WorktreePath = $worktreePath + LastUpdated = if ($state) { $state.lastUpdatedAt } else { '-' } + } +} + +Write-Host "" +Write-Host ("=" * 80) -ForegroundColor Cyan +Write-Host " PR REWORK STATUS" -ForegroundColor Cyan +Write-Host ("=" * 80) -ForegroundColor Cyan +Write-Host "" + +$results | Format-Table @( + @{Label = 'PR'; Expression = { $_.PR }; Width = 8} + @{Label = 'Status'; Expression = { $_.Status }; Width = 25} + @{Label = 'Iter'; Expression = { $_.Iterations }; Width = 5} + @{Label = 'Phase'; Expression = { $_.Phase }; Width = 10} + @{Label = 'Findings'; Expression = { $_.Findings }; Width = 9} + @{Label = 'Branch'; Expression = { $_.Branch }; Width = 30} + @{Label = 'Last Updated'; Expression = { $_.LastUpdated }; Width = 25} +) -AutoSize + +# Summary +$total = $results.Count +$done = ($results | Where-Object { $_.Status -eq 'success' }).Count +$maxed = ($results | Where-Object { $_.Status -eq 'max-iterations' }).Count +$failed = ($results | Where-Object { $_.Status -eq 'failure' }).Count +$running = $total - $done - $maxed - $failed + +Write-Host "" +Write-Host "Total: $total | Clean: $done | Max-Iter: $maxed | Failed: $failed | Running/Pending: $running" -ForegroundColor $( + if ($failed -gt 0) { 'Yellow' } elseif ($maxed -gt 0) { 'DarkYellow' } else { 'Green' } +) + +# ── Detailed view ────────────────────────────────────────────────────────── +if ($Detailed) { + foreach ($r in $results) { + $stateFile = Join-Path $prReworkRoot $r.PR '.state.json' + if (-not (Test-Path $stateFile)) { continue } + + $state = Get-Content $stateFile -Raw | ConvertFrom-Json + + Write-Host "" + Write-Host ("─" * 60) -ForegroundColor DarkCyan + Write-Host " PR #$($r.PR) — Phase History" -ForegroundColor DarkCyan + Write-Host ("─" * 60) -ForegroundColor DarkCyan + Write-Host "" + + if ($state.phaseHistory -and $state.phaseHistory.Count -gt 0) { + $state.phaseHistory | Format-Table @( + @{Label = 'Iter'; Expression = { $_.iteration }; Width = 5} + @{Label = 'Phase'; Expression = { $_.phase }; Width = 10} + @{Label = 'Status'; Expression = { $_.status }; Width = 12} + @{Label = 'Timestamp'; Expression = { $_.timestamp }; Width = 25} + ) -AutoSize + } else { + Write-Host " No phase history recorded." -ForegroundColor DarkGray + } + + # Show worktree path for easy access + if ($r.WorktreePath) { + Write-Host " Worktree: $($r.WorktreePath)" -ForegroundColor DarkGray + } + + # Show latest findings + $latestIterDir = Get-ChildItem -Path (Join-Path $prReworkRoot $r.PR) -Directory -Filter 'iteration-*' -ErrorAction SilentlyContinue | + Sort-Object Name | Select-Object -Last 1 + if ($latestIterDir) { + $findingsFile = Join-Path $latestIterDir.FullName 'findings.json' + if (Test-Path $findingsFile) { + $findings = Get-Content $findingsFile -Raw | ConvertFrom-Json + if ($findings.Count -gt 0) { + Write-Host "" + Write-Host " Latest Findings ($($findings.Count)):" -ForegroundColor DarkYellow + foreach ($f in $findings) { + $sevColor = switch ($f.severity) { + 'high' { 'Red' } + 'medium' { 'Yellow' } + 'low' { 'DarkGray' } + default { 'White' } + } + Write-Host " [$($f.id)] $($f.severity.ToUpper().PadRight(7)) $($f.file):$($f.line) — $($f.title)" -ForegroundColor $sevColor + } + } + } + } + } +} + +Write-Host "" +return $results diff --git a/.github/skills/pr-rework/scripts/IssueReviewLib.ps1 b/.github/skills/pr-rework/scripts/IssueReviewLib.ps1 new file mode 100644 index 000000000000..b5fb8b9c754b --- /dev/null +++ b/.github/skills/pr-rework/scripts/IssueReviewLib.ps1 @@ -0,0 +1,18 @@ +# IssueReviewLib.ps1 - Minimal helpers for PR review workflow +# Part of the PowerToys GitHub Copilot/Claude Code issue review system +# This is a trimmed version - pr-review only needs console helpers and repo root + +#region Console Output Helpers +function Info { param([string]$Message) Write-Host $Message -ForegroundColor Cyan } +function Warn { param([string]$Message) Write-Host $Message -ForegroundColor Yellow } +function Err { param([string]$Message) Write-Host $Message -ForegroundColor Red } +function Success { param([string]$Message) Write-Host $Message -ForegroundColor Green } +#endregion + +#region Repository Helpers +function Get-RepoRoot { + $root = git rev-parse --show-toplevel 2>$null + if (-not $root) { throw 'Not inside a git repository.' } + return (Resolve-Path $root).Path +} +#endregion diff --git a/.github/skills/pr-rework/scripts/Start-PRRework.ps1 b/.github/skills/pr-rework/scripts/Start-PRRework.ps1 new file mode 100644 index 000000000000..2a53865cc874 --- /dev/null +++ b/.github/skills/pr-rework/scripts/Start-PRRework.ps1 @@ -0,0 +1,1335 @@ +<# +.SYNOPSIS + Iteratively rework a PR to production quality using local review/fix/build/test loops. + +.DESCRIPTION + For a single PR: + 1. Creates or reuses a git worktree for the PR branch + 2. Runs pr-review locally (no GitHub posting) to find issues + 3. Parses medium+ severity findings into a structured list + 4. If no findings → done (PR is clean) + 5. Runs Copilot/Claude CLI to fix the findings in the worktree + 6. Builds changed projects and runs related unit tests + 7. Loops back to step 2 until clean or max iterations reached + 8. Writes a human-readable summary of all changes + + All changes stay LOCAL — no commits, no pushes, no GitHub posting. + The human reviews the summary and decides whether to push. + + Fully resumable: reads .state.json on restart and picks up from the last phase. + +.PARAMETER PRNumber + PR number to rework. + +.PARAMETER CLIType + AI CLI to use: copilot or claude. Default: copilot. + +.PARAMETER Model + Copilot CLI model override. Default: claude-opus-4.6. + +.PARAMETER MaxIterations + Maximum review/fix loop iterations. Default: 5. + +.PARAMETER MinSeverity + Minimum severity to fix: high, medium, low. Default: medium. + +.PARAMETER ReviewTimeoutMin + Timeout in minutes for the review CLI call. Default: 10. + +.PARAMETER FixTimeoutMin + Timeout in minutes for the fix CLI call. Default: 15. + +.PARAMETER Force + Skip confirmation prompts. + +.PARAMETER Fresh + Discard previous state and start over (keeps worktree). + +.PARAMETER SkipTests + Skip the unit test phase after each fix. + +.EXAMPLE + ./Start-PRRework.ps1 -PRNumber 45365 -CLIType copilot -Model claude-sonnet-4 -Force + +.EXAMPLE + # Resume after crash + ./Start-PRRework.ps1 -PRNumber 45365 -CLIType copilot -Force + +.EXAMPLE + # Fresh start, skip tests + ./Start-PRRework.ps1 -PRNumber 45365 -CLIType copilot -Fresh -SkipTests -Force +#> +[CmdletBinding()] +param( + [Parameter(Mandatory)] + [int]$PRNumber, + + [ValidateSet('copilot', 'claude')] + [string]$CLIType = 'copilot', + + [string]$Model = 'claude-opus-4.6', + + [int]$MaxIterations = 5, + + [ValidateSet('high', 'medium', 'low')] + [string]$MinSeverity = 'medium', + + [int]$ReviewTimeoutMin = 10, + + [int]$FixTimeoutMin = 15, + + [switch]$Force, + + [switch]$Fresh, + + [switch]$SkipTests +) + +$ErrorActionPreference = 'Continue' + +# ── Load libraries ────────────────────────────────────────────────────────── +$scriptDir = Split-Path -Parent $MyInvocation.MyCommand.Path +. (Join-Path $scriptDir 'IssueReviewLib.ps1') + +$repoRoot = Get-RepoRoot + +# Resolve config directory name (.github or .claude) from script location +$_cfgDir = if ($PSScriptRoot -match '[\\/](\.github|\.claude)[\\/]') { $Matches[1] } else { '.github' } +$newWorktreeScript = Join-Path $repoRoot 'tools/build/New-WorktreeFromBranch.ps1' + +$genRoot = Join-Path $repoRoot 'Generated Files' 'prRework' $PRNumber +$stateFile = Join-Path $genRoot '.state.json' +$signalFile = Join-Path $genRoot '.signal' +$worktreeInfoFile = Join-Path $genRoot 'worktree-info.json' +$summaryFile = Join-Path $genRoot 'summary.md' + +# Severity ranking for filtering +$severityRank = @{ 'high' = 3; 'medium' = 2; 'low' = 1; 'info' = 0 } +$minRank = $severityRank[$MinSeverity] + +# ── Helper: State Management ─────────────────────────────────────────────── + +function Read-State { + if ((Test-Path $stateFile) -and -not $Fresh) { + return Get-Content $stateFile -Raw | ConvertFrom-Json + } + return $null +} + +function Save-State { + param($State) + $State.lastUpdatedAt = (Get-Date).ToString('o') + $State | ConvertTo-Json -Depth 10 | Set-Content $stateFile -Force +} + +function New-State { + param([string]$Branch, [string]$WorktreePath) + return [PSCustomObject]@{ + prNumber = $PRNumber + branch = $Branch + worktreePath = $WorktreePath + currentIteration = 1 + currentPhase = 'review' + maxIterations = $MaxIterations + phaseHistory = @() + startedAt = (Get-Date).ToString('o') + lastUpdatedAt = (Get-Date).ToString('o') + } +} + +function Add-PhaseRecord { + param($State, [string]$Phase, [string]$Status, [hashtable]$Extra = @{}) + $record = @{ + iteration = $State.currentIteration + phase = $Phase + status = $Status + timestamp = (Get-Date).ToString('o') + } + foreach ($k in $Extra.Keys) { $record[$k] = $Extra[$k] } + # Convert phaseHistory to mutable list if needed + $history = [System.Collections.ArrayList]@($State.phaseHistory) + $history.Add([PSCustomObject]$record) | Out-Null + $State.phaseHistory = $history + Save-State $State +} + +function Get-LastPhaseOfType { + param($State, [string]$Phase, [int]$Iteration) + $State.phaseHistory | Where-Object { + $_.iteration -eq $Iteration -and $_.phase -eq $Phase + } | Select-Object -Last 1 +} + +# ── Helper: Worktree Management ──────────────────────────────────────────── + +function Get-OrCreateWorktree { + param([string]$Branch) + + # Check if the main repo IS on that branch already + Push-Location $repoRoot + try { + $currentBranch = git branch --show-current 2>$null + if ($currentBranch -eq $Branch) { + Info "Main repo is on branch '$Branch' — using repo root as worktree" + return $repoRoot + } + } finally { Pop-Location } + + # Delegate to New-WorktreeFromBranch.ps1 — handles fetch, reuse, submodules + # But first check if worktree already exists to avoid calling the script + # (which opens VS Code windows via code --new-window). + . (Join-Path $repoRoot 'tools/build/WorktreeLib.ps1') + $existingEntry = Get-WorktreeEntries | Where-Object { $_.Branch -eq $Branch } | Select-Object -First 1 + if ($existingEntry) { + Info "Reusing existing worktree for '$Branch': $($existingEntry.Path)" + } else { + Info "Creating worktree for branch '$Branch' via New-WorktreeFromBranch.ps1..." + $null = & $newWorktreeScript -Branch $Branch 2>&1 + if ($LASTEXITCODE -ne 0) { + throw "New-WorktreeFromBranch.ps1 failed for branch '$Branch' (exit $LASTEXITCODE)" + } + } + + # Read back the worktree path from git worktree list + # (dot-source already done above, skip redundant load) + $entry = Get-WorktreeEntries | Where-Object { $_.Branch -eq $Branch } | Select-Object -First 1 + if (-not $entry) { + throw "Worktree for branch '$Branch' not found after creation" + } + $worktreePath = $entry.Path + + # Copy config dirs to worktree (agents, skills, instructions, prompts, top-level md) + # These aren't on the PR branch so the CLI can't find them without this. + $sourceCfg = Join-Path $repoRoot $_cfgDir + $destCfg = Join-Path $worktreePath $_cfgDir + if (Test-Path $sourceCfg) { + if (-not (Test-Path $destCfg)) { + New-Item -ItemType Directory -Path $destCfg -Force | Out-Null + } + foreach ($sub in @('agents', 'skills', 'instructions', 'prompts')) { + $src = Join-Path $sourceCfg $sub + $dst = Join-Path $destCfg $sub + if ((Test-Path $src) -and -not (Test-Path $dst)) { + Copy-Item -Path $src -Destination $dst -Recurse -Force + Info "Copied $_cfgDir/$sub to worktree" + } + } + # Top-level instruction file (copilot-instructions.md / CLAUDE.md) + foreach ($mdFile in @('copilot-instructions.md', 'CLAUDE.md')) { + $src = Join-Path $sourceCfg $mdFile + $dst = Join-Path $destCfg $mdFile + if ((Test-Path $src) -and -not (Test-Path $dst)) { + Copy-Item -Path $src -Destination $dst -Force + Info "Copied $_cfgDir/$mdFile to worktree" + } + } + } + + Info "Worktree ready at: $worktreePath" + return $worktreePath +} + +# ── Helper: Run CLI with Timeout ─────────────────────────────────────────── + +function Invoke-CLIWithTimeout { + param( + [string]$Prompt, + [string]$WorkDir, + [int]$TimeoutMinutes, + [string]$LogFile, + [string]$AgentName + ) + + $mcpConfigPath = Join-Path $repoRoot "$_cfgDir/skills/pr-rework/references/mcp-config.json" + $mcpConfig = "@$mcpConfigPath" + + # Find the copilot.ps1 bootstrapper that lives next to copilot.bat. + # We call copilot.ps1 directly to avoid the .bat wrapper which triggers + # "Terminate batch job (Y/N)?" prompts in interactive terminals. + function Resolve-CopilotPS1 { + $candidates = Get-Command copilot -CommandType Application -ErrorAction SilentlyContinue -All + foreach ($c in $candidates) { + $dir = Split-Path $c.Source -Parent + $ps1 = Join-Path $dir 'copilot.ps1' + if (Test-Path $ps1) { return $ps1 } + } + return $null + } + + # Ensure log directory exists. + $logDir = Split-Path $LogFile -Parent + if (-not (Test-Path $logDir)) { New-Item -ItemType Directory -Path $logDir -Force | Out-Null } + + # Launch the CLI as a child pwsh process with Start-Process for full I/O. + # Start-Job was used previously but it breaks the CLI's interactive tool + # protocol — the CLI's file-edit and shell tool calls silently fail because + # Start-Job captures all output streams (*> redirect) in a separate runspace + # that doesn't share the real filesystem working directory. Start-Process + # gives the CLI a real process with proper working directory and I/O. + + switch ($CLIType) { + 'copilot' { + $copilotPS1 = Resolve-CopilotPS1 + if (-not $copilotPS1) { throw 'Cannot find copilot.ps1 bootstrapper. Is GitHub Copilot CLI installed?' } + Info " Using copilot CLI: $copilotPS1" + + # Write a thin wrapper script that the child process will execute. + # This avoids argument-escaping hell with Start-Process -ArgumentList. + $wrapperScript = Join-Path $logDir '_cli-wrapper.ps1' + $modelArg = if ($Model) { "--model '$Model'" } else { '' } + $agentArg = if ($AgentName) { "--agent '$AgentName'" } else { '' } + @" +`$ErrorActionPreference = 'Continue' +Set-Location -LiteralPath '$($WorkDir -replace "'","''")' +& '$($copilotPS1 -replace "'","''")' --additional-mcp-config '$($mcpConfig -replace "'","''")' $agentArg -p @' +$Prompt +'@ --allow-all -s $modelArg *> '$($LogFile -replace "'","''")' +exit `$LASTEXITCODE +"@ | Set-Content $wrapperScript -Force + $psi = [System.Diagnostics.ProcessStartInfo]::new() + $psi.FileName = 'pwsh' + $psi.Arguments = "-nop -nol -noe -File `"$wrapperScript`"" + $psi.WorkingDirectory = $WorkDir + $psi.UseShellExecute = $false + $psi.CreateNoWindow = $true + $proc = [System.Diagnostics.Process]::new() + $proc.StartInfo = $psi + [void]$proc.Start() + } + 'claude' { + $wrapperScript = Join-Path $logDir '_cli-wrapper.ps1' + $agentArg = if ($AgentName) { "--agent '$AgentName'" } else { '' } + @" +`$ErrorActionPreference = 'Continue' +Set-Location -LiteralPath '$($WorkDir -replace "'","''")' +& claude --print --dangerously-skip-permissions $agentArg --prompt @' +$Prompt +'@ *> '$($LogFile -replace "'","''")' +exit `$LASTEXITCODE +"@ | Set-Content $wrapperScript -Force + $psi = [System.Diagnostics.ProcessStartInfo]::new() + $psi.FileName = 'pwsh' + $psi.Arguments = "-nop -nol -noe -File `"$wrapperScript`"" + $psi.WorkingDirectory = $WorkDir + $psi.UseShellExecute = $false + $psi.CreateNoWindow = $true + $proc = [System.Diagnostics.Process]::new() + $proc.StartInfo = $psi + [void]$proc.Start() + } + } + + # Wait with timeout + early-exit detection. + # The CLI often finishes its work (writes all files) but hangs indefinitely. + # We poll the log file: if it stops growing for 60s after reaching 500+ bytes, + # the CLI is done but hung — kill it and return success. + $timeoutMs = $TimeoutMinutes * 60 * 1000 + $pollMs = 15000 # check every 15 seconds + $staleLimit = 60000 # 60s of no growth = done + $minLogBytes = 500 # minimum log size before early-exit kicks in + $elapsed = 0 + $lastLogSize = 0 + $staleSince = $null + $earlyExit = $false + + while ($elapsed -lt $timeoutMs) { + $exited = $proc.WaitForExit($pollMs) + if ($exited) { break } + $elapsed += $pollMs + + # Check log file growth + if (Test-Path $LogFile) { + $currentSize = (Get-Item $LogFile).Length + if ($currentSize -ne $lastLogSize) { + $lastLogSize = $currentSize + $staleSince = $null # reset stale timer + } elseif ($currentSize -ge $minLogBytes) { + if (-not $staleSince) { $staleSince = $elapsed } + elseif (($elapsed - $staleSince) -ge $staleLimit) { + Info " CLI log stable for 60s at $([math]::Round($currentSize/1024,1))KB — early exit (work complete)" + $earlyExit = $true + try { $proc.Kill($true) } catch { } + break + } + } + } + } + + if ($earlyExit) { + $proc.Dispose() + return @{ Success = $true; TimedOut = $false; ExitCode = 0 } + } + + if (-not $exited) { + try { $proc.Kill($true) } catch { } + Warn "CLI timed out after $TimeoutMinutes minutes" + $proc.Dispose() + return @{ Success = $false; TimedOut = $true; ExitCode = -1 } + } + + $exitCode = $proc.ExitCode + if ($null -eq $exitCode) { $exitCode = 0 } + $proc.Dispose() + + return @{ Success = ($exitCode -eq 0); TimedOut = $false; ExitCode = $exitCode } +} + +# ── Helper: Isolated Build Execution ─────────────────────────────────────── + +function Invoke-BuildIsolated { + <# + .SYNOPSIS + Runs a build command in an isolated process with a SEPARATE console + window (hidden). This prevents MSBuild's console signal handlers from + killing the parent pwsh process. + + Uses Start-Process -WindowStyle Hidden to create a completely separate + console session. The child inherits the parent's env vars (including + VSINSTALLDIR, PATH with MSBuild, etc.) so build tools work correctly. + + IMPORTANT: The parent process MUST have VS developer environment variables + already set (e.g., launched from a VS Developer Command Prompt or terminal + with Enter-VsDevShell already run). The child build process checks + $env:VSINSTALLDIR and skips Enter-VsDevShell, avoiding the crash. + #> + param( + [string]$Command, + [string[]]$Arguments, + [string]$WorkDir, + [string]$LogFile, + [int]$TimeoutSeconds = 600 + ) + + $combinedLog = if ($LogFile) { $LogFile } else { [System.IO.Path]::GetTempFileName() } + + # If Command is already cmd.exe, extract the inner command from Arguments. + if ($Command -match '(?i)^cmd(\.exe)?$' -and $Arguments.Count -ge 2 -and $Arguments[0] -match '(?i)^/[ck]$') { + $innerCmd = ($Arguments | Select-Object -Skip 1) -join ' ' + } else { + $innerCmd = if ($Arguments) { "`"$Command`" $($Arguments -join ' ')" } else { "`"$Command`"" } + } + + # Create wrapper .cmd that does redirection and captures exit code + $exitCodeFile = [System.IO.Path]::GetTempFileName() + $wrapperFile = [System.IO.Path]::GetTempFileName() + '.cmd' + @" +@echo off +cd /d "$WorkDir" +call $innerCmd > "$combinedLog" 2>&1 +echo %ERRORLEVEL% > "$exitCodeFile" +"@ | Set-Content $wrapperFile -Force -Encoding ASCII + + try { + # Start-Process -WindowStyle Hidden creates a separate console session, + # isolating MSBuild's CTRL+C handlers from the parent pwsh process. + $timeoutMs = $TimeoutSeconds * 1000 + $buildProc = Start-Process -FilePath 'cmd.exe' ` + -ArgumentList "/c `"$wrapperFile`"" ` + -WindowStyle Hidden -PassThru + $exited = $buildProc.WaitForExit($timeoutMs) + if (-not $exited) { + try { $buildProc.Kill() } catch {} + Warn "Build timed out after ${TimeoutSeconds}s" + } + + $exitCode = if (Test-Path $exitCodeFile) { + $raw = (Get-Content $exitCodeFile -Raw).Trim() + if ($raw -match '^\d+$') { [int]$raw } else { -1 } + } else { -1 } + + $stdoutText = if (Test-Path $combinedLog) { Get-Content $combinedLog -Raw -ErrorAction SilentlyContinue } else { '' } + + return @{ ExitCode = $exitCode; Stdout = $stdoutText; Stderr = '' } + } + finally { + Remove-Item $wrapperFile -ErrorAction SilentlyContinue + Remove-Item $exitCodeFile -ErrorAction SilentlyContinue + if (-not $LogFile) { Remove-Item $combinedLog -ErrorAction SilentlyContinue } + } +} + +# ── Phase: Review ────────────────────────────────────────────────────────── + +function Invoke-ReviewPhase { + param($State, [int]$Iteration) + + $iterDir = Join-Path $genRoot "iteration-$Iteration" + $reviewDir = Join-Path $iterDir 'review' + if (-not (Test-Path $reviewDir)) { New-Item -ItemType Directory -Path $reviewDir -Force | Out-Null } + + # Build the previous-findings reference for iteration 2+ + $prevFindingsArg = '' + if ($Iteration -gt 1) { + $prevFindingsFile = Join-Path $genRoot "iteration-$($Iteration - 1)" 'findings.json' + if (Test-Path $prevFindingsFile) { + $prevFindingsArg = "`nPrevious iteration findings are at: $prevFindingsFile" + } + } + + $prompt = @" +You are reviewing PR #$PRNumber locally in a worktree. Do NOT post any comments to GitHub. + +Follow the LOCAL review methodology from $_cfgDir/skills/pr-rework/references/rework-local-review.prompt.md + +Inputs: +- pr_number: $PRNumber +- output_dir: $reviewDir +- iteration: $Iteration +$prevFindingsArg + +CRITICAL — DATA SOURCE (use two-dot diff to include uncommitted fix changes): +- Get changed files via: git diff origin/main --name-only +- Get file diffs via: git diff origin/main -- <file> +- Read file content via: cat <file> or Get-Content <file> +- Do NOT use gh pr view, gh api, Get-GitHubRawFile.ps1, or any GitHub API +- Do NOT use three-dot diff (main...HEAD) — it misses uncommitted changes +- The worktree contains the LATEST local state including uncommitted fix changes +- IMPORTANT: Always use 'origin/main' (not 'main') to avoid stale local refs + +Write all step files to: $reviewDir/ +For each finding, use mcp-review-comment blocks with severity, file, line, and description. +"@ + + Info " Running local review (timeout: ${ReviewTimeoutMin}m)..." + $State.currentPhase = 'review' + Add-PhaseRecord $State 'review' 'in-progress' + + $logFile = Join-Path $iterDir 'review-cli.log' + # Remove stale log from previous runs to avoid false-positive fallback + if (Test-Path $logFile) { Remove-Item $logFile -Force -ErrorAction SilentlyContinue } + $result = Invoke-CLIWithTimeout -Prompt $prompt -WorkDir $State.worktreePath ` + -TimeoutMinutes $ReviewTimeoutMin -LogFile $logFile -AgentName 'ReviewPR' + + if ($result.TimedOut) { + Add-PhaseRecord $State 'review' 'timeout' + Warn " Review timed out — will retry on next run" + return $false + } + + # Check if review files were created + $overviewFile = Join-Path $reviewDir '00-OVERVIEW.md' + $stepFiles = Get-ChildItem -Path $reviewDir -Filter '*.md' -ErrorAction SilentlyContinue | + Where-Object { $_.Name -match '^\d{2}-' } + + if ($stepFiles.Count -ge 3 -or (Test-Path $overviewFile)) { + Add-PhaseRecord $State 'review' 'done' @{ stepFiles = $stepFiles.Count } + Info " Review complete ($($stepFiles.Count) step files)" + return $true + } + + # Review may have run via non-redirect path; check if log file has content + # Only trust the log if the CLI exited successfully (exit code 0) + if ($result.ExitCode -eq 0 -and (Test-Path $logFile) -and (Get-Item $logFile).Length -gt 100) { + Add-PhaseRecord $State 'review' 'done' @{ stepFiles = 0; note = 'output-in-log-only' } + Info " Review ran but files may be in alternate location. Checking..." + # Also check the standard prReview output path (CLI may have ignored our custom path) + $altPath = Join-Path $repoRoot "Generated Files/prReview/$PRNumber" + if (Test-Path $altPath) { + $altSteps = Get-ChildItem -Path $altPath -Filter '*.md' -ErrorAction SilentlyContinue | + Where-Object { $_.Name -match '^\d{2}-' } + if ($altSteps.Count -gt 0) { + Info " Found $($altSteps.Count) step files in standard prReview path, copying..." + Copy-Item -Path "$altPath\*" -Destination $reviewDir -Recurse -Force + return $true + } + } + # CLI exited 0 but produced no review files anywhere — treat as failure + Add-PhaseRecord $State 'review' 'failed' @{ exitCode = 0; note = 'no-output-files' } + Warn " Review CLI exited 0 but produced no step files" + return $false + } + + Add-PhaseRecord $State 'review' 'failed' @{ exitCode = $result.ExitCode } + Warn " Review produced no output files" + return $false +} + +# ── Phase: Parse Findings ────────────────────────────────────────────────── + +function Invoke-ParsePhase { + param($State, [int]$Iteration) + + $iterDir = Join-Path $genRoot "iteration-$Iteration" + $reviewDir = Join-Path $iterDir 'review' + $findingsFile = Join-Path $iterDir 'findings.json' + + $findings = @() + $stepFiles = Get-ChildItem -Path $reviewDir -Filter '*.md' -ErrorAction SilentlyContinue | + Where-Object { $_.Name -match '^\d{2}-' } + + foreach ($file in $stepFiles) { + $content = Get-Content $file.FullName -Raw -ErrorAction SilentlyContinue + if (-not $content) { continue } + + $stepName = $file.BaseName + + # Parse mcp-review-comment blocks (machine-readable findings) + $commentPattern = '(?s)```mcp-review-comment\s*\n(.+?)```' + $commentMatches = [regex]::Matches($content, $commentPattern) + foreach ($m in $commentMatches) { + try { + $parsed = $m.Groups[1].Value | ConvertFrom-Json + $sev = if ($parsed.severity) { $parsed.severity.ToLower() } else { 'info' } + if (($severityRank.ContainsKey($sev)) -and ($severityRank[$sev] -ge $minRank)) { + # Accept both pr-review format (start_line/end_line) and local format (line/endLine) + $parsedLine = if ($parsed.line) { $parsed.line } elseif ($parsed.start_line) { $parsed.start_line } else { 0 } + $parsedEndLine = if ($parsed.endLine) { $parsed.endLine } elseif ($parsed.end_line) { $parsed.end_line } else { 0 } + $findings += [PSCustomObject]@{ + id = "F-{0:D3}" -f ($findings.Count + 1) + step = $stepName + severity = $sev + file = $parsed.file + line = $parsedLine + endLine = $parsedEndLine + title = if ($parsed.title) { $parsed.title } else { '' } + description = $parsed.body + suggestedFix = if ($parsed.suggestedFix) { $parsed.suggestedFix } else { '' } + } + } + } catch { + # Not valid JSON, skip + } + } + + # Also parse severity markers in plain text (e.g., "**Severity: high**") + $textPattern = '(?mi)^\*?\*?severity:\s*(high|medium|low)\*?\*?\s*$' + $sevMatches = [regex]::Matches($content, $textPattern) + # If we found mcp blocks, skip text parsing to avoid duplicates + if ($commentMatches.Count -eq 0) { + # Heuristic: parse heading-based findings + $sectionPattern = '(?ms)^###?\s+(.+?)$\s*(?:.*?severity:\s*(high|medium|low).*?)(?=^###?\s|\z)' + $secMatches = [regex]::Matches($content, $sectionPattern) + foreach ($sm in $secMatches) { + $sev = $sm.Groups[2].Value.ToLower() + if (($severityRank.ContainsKey($sev)) -and ($severityRank[$sev] -ge $minRank)) { + $findings += [PSCustomObject]@{ + id = "F-$($findings.Count + 1)".PadLeft(5, '0').Replace('F-0', 'F-') + step = $stepName + severity = $sev + file = '' + line = 0 + endLine = 0 + title = $sm.Groups[1].Value.Trim() + description = $sm.Value.Substring(0, [Math]::Min(500, $sm.Value.Length)) + suggestedFix = '' + } + } + } + } + } + + # Deduplicate by file+line+title + $unique = @{} + foreach ($f in $findings) { + $key = "$($f.file):$($f.line):$($f.title)" + if (-not $unique.ContainsKey($key)) { $unique[$key] = $f } + } + $findings = @($unique.Values) + + # Assign sequential IDs + for ($i = 0; $i -lt $findings.Count; $i++) { + $findings[$i].id = "F-{0:D3}" -f ($i + 1) + } + + $findings | ConvertTo-Json -Depth 5 | Set-Content $findingsFile -Force + + Add-PhaseRecord $State 'parse' 'done' @{ + totalFindings = $findings.Count + high = ($findings | Where-Object severity -eq 'high').Count + medium = ($findings | Where-Object severity -eq 'medium').Count + low = ($findings | Where-Object severity -eq 'low').Count + } + + Info " Parsed $($findings.Count) actionable findings (high: $(($findings | Where-Object severity -eq 'high').Count), medium: $(($findings | Where-Object severity -eq 'medium').Count))" + return $findings +} + +# ── Phase: Fix ───────────────────────────────────────────────────────────── + +function Invoke-FixPhase { + param($State, [int]$Iteration, [array]$Findings) + + $iterDir = Join-Path $genRoot "iteration-$Iteration" + $findingsFile = Join-Path $iterDir 'findings.json' + $fixLogFile = Join-Path $iterDir 'fix.log' + + # Build the fix prompt with inline findings summary for the CLI + $findingsSummary = ($Findings | ForEach-Object { + "- [$($_.id)] $($_.severity.ToUpper()) in $($_.file):$($_.line) — $($_.title): $($_.description)" + }) -join "`n" + + # Feed previous iteration's build/test failures to help the AI fix them + $buildErrArg = '' + $testErrArg = '' + if ($Iteration -gt 1) { + $prevIterDir = Join-Path $genRoot "iteration-$($Iteration - 1)" + $prevBuildLog = Join-Path $prevIterDir 'build.log' + $prevTestLog = Join-Path $prevIterDir 'test.log' + if (Test-Path $prevBuildLog) { $buildErrArg = "Build errors from previous iteration: $prevBuildLog" } + if (Test-Path $prevTestLog) { $testErrArg = "Test failures from previous iteration: $prevTestLog" } + } + + $prompt = @" +You are fixing review findings for PR #$PRNumber. All changes stay LOCAL — do NOT commit, push, or post to GitHub. + +Read the detailed prompt at $_cfgDir/skills/pr-rework/references/rework-fix.prompt.md for full instructions. + +The findings file is at: $findingsFile + +Here is a summary of the $($Findings.Count) findings to fix: + +$findingsSummary + +$buildErrArg +$testErrArg + +After fixing, build the changed projects using tools/build/build.cmd scoped to the changed directories. +If there are related unit test projects (*UnitTests*), build and run them too. + +CRITICAL: +- Do NOT commit or push any changes +- Do NOT post comments to GitHub or resolve threads +- DO fix all findings listed above +- DO build and verify the fix compiles +- DO run related unit tests if found +"@ + + Info " Running fix pass (timeout: ${FixTimeoutMin}m, findings: $($Findings.Count))..." + $State.currentPhase = 'fix' + Add-PhaseRecord $State 'fix' 'in-progress' + + # Remove stale log from previous runs + if (Test-Path $fixLogFile) { Remove-Item $fixLogFile -Force -ErrorAction SilentlyContinue } + $result = Invoke-CLIWithTimeout -Prompt $prompt -WorkDir $State.worktreePath ` + -TimeoutMinutes $FixTimeoutMin -LogFile $fixLogFile -AgentName 'FixPR' + + if ($result.TimedOut) { + Add-PhaseRecord $State 'fix' 'timeout' + Warn " Fix timed out after $FixTimeoutMin minutes" + return $false + } + + # Check if any files were modified (both staged and unstaged) + Push-Location $State.worktreePath + try { + $unstaged = git diff --name-only 2>$null + $staged = git diff --staged --name-only 2>$null + $modified = @($unstaged) + @($staged) | Where-Object { $_ } | Sort-Object -Unique + $modCount = ($modified | Measure-Object).Count + } finally { Pop-Location } + + Add-PhaseRecord $State 'fix' 'done' @{ filesModified = $modCount; exitCode = $result.ExitCode } + Info " Fix pass complete ($modCount files modified)" + return $true +} + +# ── Phase: Build ─────────────────────────────────────────────────────────── + +function Invoke-BuildPhase { + param($State, [int]$Iteration) + + $iterDir = Join-Path $genRoot "iteration-$Iteration" + $buildLog = Join-Path $iterDir 'build.log' + + Info " Building changed projects..." + $State.currentPhase = 'build' + Add-PhaseRecord $State 'build' 'in-progress' + + Push-Location $State.worktreePath + try { + # Find changed files (two-dot diff includes uncommitted working tree changes) + $changedFiles = git diff --name-only origin/main 2>$null + $changedDirs = $changedFiles | ForEach-Object { Split-Path $_ -Parent } | Sort-Object -Unique | + Where-Object { $_ -match '^src/' } + + if ($changedDirs.Count -eq 0) { + Add-PhaseRecord $State 'build' 'skipped' @{ reason = 'no-src-changes' } + Info " No src/ changes to build" + return $true + } + + # Collect ALL changed project directories (not just the first) + $buildDirs = @() + foreach ($dir in $changedDirs) { + $fullDir = Join-Path $State.worktreePath $dir + if (Test-Path $fullDir) { + $projFiles = Get-ChildItem -Path $fullDir -Filter '*.csproj' -ErrorAction SilentlyContinue + if (-not $projFiles) { + $projFiles = Get-ChildItem -Path $fullDir -Filter '*.vcxproj' -ErrorAction SilentlyContinue + } + foreach ($pf in $projFiles) { + if ($pf.DirectoryName -notin $buildDirs) { + $buildDirs += $pf.DirectoryName + } + } + } + } + + if ($buildDirs.Count -eq 0) { + # Fall back to the first changed src/ directory + $buildDirs = @(Join-Path $State.worktreePath $changedDirs[0]) + } + + $buildScript = Join-Path $State.worktreePath 'tools/build/build.cmd' + if (-not (Test-Path $buildScript)) { + $buildScript = Join-Path $repoRoot 'tools/build/build.cmd' + } + + $allBuildOk = $true + $buildOutput = @() + foreach ($buildDir in $buildDirs) { + Info " Building: $(Split-Path $buildDir -Leaf)" + $bResult = Invoke-BuildIsolated -Command 'cmd.exe' ` + -Arguments @('/c', "`"$buildScript`" -Path `"$buildDir`"") ` + -WorkDir $State.worktreePath ` + -LogFile $null ` + -TimeoutSeconds 600 + $buildOutput += $bResult.Stdout + if ($bResult.ExitCode -ne 0) { $allBuildOk = $false } + } + $buildOutput | Out-File $buildLog -Force + + if ($allBuildOk) { + Add-PhaseRecord $State 'build' 'done' @{ exitCode = 0; projects = $buildDirs.Count } + Info " Build succeeded ($($buildDirs.Count) project(s))" + return $true + } else { + Add-PhaseRecord $State 'build' 'failed' @{ exitCode = 1; projects = $buildDirs.Count } + Warn " Build failed. Errors logged to: $buildLog" + return $false + } + } finally { Pop-Location } +} + +# ── Phase: Test ──────────────────────────────────────────────────────────── + +function Invoke-TestPhase { + param($State, [int]$Iteration) + + if ($SkipTests) { + Info " Tests skipped (-SkipTests)" + Add-PhaseRecord $State 'test' 'skipped' @{ reason = 'user-skipped' } + return $true + } + + $iterDir = Join-Path $genRoot "iteration-$Iteration" + $testLog = Join-Path $iterDir 'test.log' + + Info " Discovering unit tests..." + $State.currentPhase = 'test' + Add-PhaseRecord $State 'test' 'in-progress' + + Push-Location $State.worktreePath + try { + # Find changed modules from git diff (two-dot includes uncommitted changes) + $changedFiles = git diff --name-only origin/main 2>$null + $modules = $changedFiles | ForEach-Object { + if ($_ -match 'src/modules/(\w+)/') { $Matches[1] } + elseif ($_ -match 'src/settings-ui/') { 'Settings' } + elseif ($_ -match 'src/common/') { 'Common' } + elseif ($_ -match 'src/runner/') { 'Runner' } + } | Sort-Object -Unique | Where-Object { $_ } + + if ($modules.Count -eq 0) { + Add-PhaseRecord $State 'test' 'skipped' @{ reason = 'no-module-changes' } + Info " No module changes detected — skipping tests" + return $true + } + + # Search for test projects using targeted directory scans + $testProjects = @() + foreach ($mod in $modules) { + # Build specific search directories for this module rather than + # scanning the entire worktree recursively (which is very slow). + $searchDirs = @( + (Join-Path $State.worktreePath "src/modules/$mod"), + (Join-Path $State.worktreePath "src/settings-ui"), + (Join-Path $State.worktreePath "src/common") + ) | Where-Object { Test-Path $_ } + + foreach ($searchDir in $searchDirs) { + $found = Get-ChildItem -Path $searchDir -Filter '*.csproj' -Recurse -Depth 3 -ErrorAction SilentlyContinue | + Where-Object { $_.BaseName -match 'Test' } | Select-Object -First 1 + if ($found) { + $testProjects += $found.FullName + break + } + } + } + + if ($testProjects.Count -eq 0) { + Add-PhaseRecord $State 'test' 'skipped' @{ reason = 'no-test-projects' } + Info " No test projects found for modules: $($modules -join ', ')" + return $true + } + + Info " Found $($testProjects.Count) test project(s)" + + # Build and run tests + $allPassed = $true + $totalPassed = 0 + $totalFailed = 0 + + foreach ($testProj in $testProjects) { + $testDir = Split-Path $testProj -Parent + Info " Running tests: $(Split-Path $testProj -Leaf)" + + # Build the test project first + $buildScript = Join-Path $State.worktreePath 'tools/build/build.cmd' + if (-not (Test-Path $buildScript)) { $buildScript = Join-Path $repoRoot 'tools/build/build.cmd' } + Invoke-BuildIsolated -Command 'cmd.exe' ` + -Arguments @('/c', "`"$buildScript`" -Path `"$testDir`"") ` + -WorkDir $State.worktreePath ` + -LogFile $null ` + -TimeoutSeconds 600 | Out-Null + + # Try to find the test DLL in the project's bin directory + $testDlls = Get-ChildItem -Path $testDir -Filter '*Test*.dll' -Recurse -Depth 5 -ErrorAction SilentlyContinue | + Where-Object { $_.FullName -match '\\bin\\' -and $_.FullName -notmatch '\\ref\\' } + + if ($testDlls.Count -eq 0) { + Info " No test DLLs found after build" + continue + } + + foreach ($dll in $testDlls) { + $testOutput = dotnet vstest $dll.FullName 2>&1 + $testOutput | Out-File $testLog -Append -Force + + $passMatch = $testOutput | Select-String 'Passed:\s*(\d+)' + $failMatch = $testOutput | Select-String 'Failed:\s*(\d+)' + + if ($passMatch) { $totalPassed += [int]$passMatch.Matches[0].Groups[1].Value } + if ($failMatch) { + $failCount = [int]$failMatch.Matches[0].Groups[1].Value + $totalFailed += $failCount + if ($failCount -gt 0) { $allPassed = $false } + } + } + } + + if ($allPassed) { + Add-PhaseRecord $State 'test' 'done' @{ passed = $totalPassed; failed = 0 } + Info " Tests passed ($totalPassed passed, 0 failed)" + return $true + } else { + Add-PhaseRecord $State 'test' 'failed' @{ passed = $totalPassed; failed = $totalFailed } + Warn " Tests failed ($totalPassed passed, $totalFailed failed). Log: $testLog" + return $false + } + } finally { Pop-Location } +} + +# ── Write Summary ────────────────────────────────────────────────────────── + +function Write-ReworkSummary { + param($State) + + $sb = [System.Text.StringBuilder]::new() + $sb.AppendLine("# PR Rework Summary — PR #$PRNumber") | Out-Null + $sb.AppendLine("") | Out-Null + $sb.AppendLine("**Branch**: $($State.branch)") | Out-Null + $sb.AppendLine("**Worktree**: $($State.worktreePath)") | Out-Null + $sb.AppendLine("**Iterations**: $($State.currentIteration)") | Out-Null + $sb.AppendLine("**Started**: $($State.startedAt)") | Out-Null + $sb.AppendLine("**Completed**: $(Get-Date -Format 'o')") | Out-Null + $sb.AppendLine("") | Out-Null + + # Changed files summary + Push-Location $State.worktreePath + try { + $changedFiles = git diff --name-only origin/main 2>$null + $uncommitted = git diff --name-only 2>$null + } finally { Pop-Location } + + $sb.AppendLine("## Changed Files") | Out-Null + $sb.AppendLine("") | Out-Null + if ($uncommitted) { + $sb.AppendLine("### Uncommitted Changes (from rework)") | Out-Null + foreach ($f in $uncommitted) { $sb.AppendLine("- ``$f``") | Out-Null } + $sb.AppendLine("") | Out-Null + } + if ($changedFiles) { + $sb.AppendLine("### All Changes vs main") | Out-Null + $maxFiles = 30 + $shown = @($changedFiles) | Select-Object -First $maxFiles + foreach ($f in $shown) { $sb.AppendLine("- ``$f``") | Out-Null } + $totalChanged = @($changedFiles).Count + if ($totalChanged -gt $maxFiles) { + $sb.AppendLine("_...and $($totalChanged - $maxFiles) more files_") | Out-Null + } + $sb.AppendLine("") | Out-Null + } + + # Iteration history + $sb.AppendLine("## Iteration History") | Out-Null + $sb.AppendLine("") | Out-Null + for ($i = 1; $i -le $State.currentIteration; $i++) { + $sb.AppendLine("### Iteration $i") | Out-Null + $iterPhases = $State.phaseHistory | Where-Object { $_.iteration -eq $i } + $sb.AppendLine("") | Out-Null + $sb.AppendLine("| Phase | Status | Details |") | Out-Null + $sb.AppendLine("|-------|--------|---------|") | Out-Null + foreach ($p in $iterPhases) { + $details = ($p.PSObject.Properties | Where-Object { + $_.Name -notin @('iteration', 'phase', 'status', 'timestamp') + } | ForEach-Object { "$($_.Name)=$($_.Value)" }) -join ', ' + $sb.AppendLine("| $($p.phase) | $($p.status) | $details |") | Out-Null + } + $sb.AppendLine("") | Out-Null + + # Show findings count for this iteration + $findingsFile = Join-Path $genRoot "iteration-$i" 'findings.json' + if (Test-Path $findingsFile) { + $findings = Get-Content $findingsFile -Raw | ConvertFrom-Json + $sb.AppendLine("Findings: $($findings.Count) actionable") | Out-Null + $sb.AppendLine("") | Out-Null + } + } + + $sb.AppendLine("## Next Steps") | Out-Null + $sb.AppendLine("") | Out-Null + $sb.AppendLine("Review the changes in the worktree and decide:") | Out-Null + $sb.AppendLine("") | Out-Null + $sb.AppendLine("``````powershell") | Out-Null + $sb.AppendLine("# Review the diff") | Out-Null + $sb.AppendLine("cd `"$($State.worktreePath)`"") | Out-Null + $sb.AppendLine("git diff") | Out-Null + $sb.AppendLine("") | Out-Null + $sb.AppendLine("# If satisfied, stage and push:") | Out-Null + $sb.AppendLine("git add -A") | Out-Null + $sb.AppendLine("git commit -m `"fix: address review findings for PR #$PRNumber`"") | Out-Null + $sb.AppendLine("git push") | Out-Null + $sb.AppendLine("``````") | Out-Null + + $sb.ToString() | Set-Content $summaryFile -Force + Info "Summary written to: $summaryFile" +} + +# ════════════════════════════════════════════════════════════════════════════ +# MAIN +# ════════════════════════════════════════════════════════════════════════════ + +try { + Write-Host "" + Write-Host ("=" * 70) -ForegroundColor Cyan + Write-Host " PR REWORK — PR #$PRNumber" -ForegroundColor Cyan + Write-Host ("=" * 70) -ForegroundColor Cyan + Write-Host "" + + # Ensure output directory exists + if (-not (Test-Path $genRoot)) { + New-Item -ItemType Directory -Path $genRoot -Force | Out-Null + } + + # ── Get PR info ──────────────────────────────────────────────────────── + $prInfo = gh pr view $PRNumber --json state,headRefName,url,title 2>$null | ConvertFrom-Json + if (-not $prInfo) { throw "PR #$PRNumber not found" } + if ($prInfo.state -ne 'OPEN') { + Warn "PR #$PRNumber is $($prInfo.state), not OPEN" + return [PSCustomObject]@{ + PRNumber = $PRNumber + Status = 'Skipped' + Iterations = 0 + FinalFindings = -1 + WorktreePath = '' + SummaryPath = '' + Error = "PR is $($prInfo.state), not OPEN" + } + } + + Info "PR: #$PRNumber — $($prInfo.title)" + Info "Branch: $($prInfo.headRefName)" + Info "CLI: $CLIType $(if ($Model) { "(model: $Model)" })" + Info "Max iterations: $MaxIterations" + Info "Min severity: $MinSeverity" + Info "" + + # ── Resume or fresh start ────────────────────────────────────────────── + $state = Read-State + if ($state) { + Info "Resuming from previous state (iteration $($state.currentIteration), phase: $($state.currentPhase))" + + # Verify worktree still exists + if (-not (Test-Path $state.worktreePath)) { + Warn "Previous worktree not found at $($state.worktreePath) — recreating..." + $state.worktreePath = Get-OrCreateWorktree -Branch $prInfo.headRefName + } + } else { + # Fresh start — create worktree + $worktreePath = Get-OrCreateWorktree -Branch $prInfo.headRefName + $state = New-State -Branch $prInfo.headRefName -WorktreePath $worktreePath + + # Save worktree info for external tools + @{ + prNumber = $PRNumber + branch = $prInfo.headRefName + worktreePath = $worktreePath + createdAt = (Get-Date).ToString('o') + } | ConvertTo-Json | Set-Content $worktreeInfoFile -Force + + Save-State $state + } + + Info "Worktree: $($state.worktreePath)" + + # ── Confirm ──────────────────────────────────────────────────────────── + if (-not $Force) { + $confirm = Read-Host "Rework PR #$PRNumber with up to $MaxIterations iterations? (y/N)" + if ($confirm -notmatch '^[yY]') { Info "Cancelled."; return } + } + + # ── Verify VS environment ───────────────────────────────────────────── + # Build processes use Start-Process -WindowStyle Hidden to create isolated + # console sessions. This prevents MSBuild's console signal handlers from + # killing the parent pwsh process. The child build process will call + # Enter-VsDevShell on its own if needed. + + $essentialsDone = $state.phaseHistory | Where-Object { $_.phase -eq 'build-essentials' -and $_.status -eq 'done' } + if (-not $essentialsDone) { + Info "Running build-essentials (one-time NuGet restore + baseline build)..." + Add-PhaseRecord $state 'build-essentials' 'in-progress' + + $essentialsLog = Join-Path $genRoot 'build-essentials.log' + $buildEssentials = Join-Path $state.worktreePath 'tools/build/build-essentials.cmd' + if (-not (Test-Path $buildEssentials)) { + $buildEssentials = Join-Path $repoRoot 'tools/build/build-essentials.cmd' + } + + $bResult = Invoke-BuildIsolated -Command 'cmd.exe' ` + -Arguments @('/c', "`"$buildEssentials`"") ` + -WorkDir $state.worktreePath ` + -LogFile $essentialsLog ` + -TimeoutSeconds 600 + + $essExitCode = $bResult.ExitCode + + if ($essExitCode -eq 0) { + Add-PhaseRecord $state 'build-essentials' 'done' @{ exitCode = 0 } + Info "Build-essentials succeeded" + } else { + Add-PhaseRecord $state 'build-essentials' 'failed' @{ exitCode = $essExitCode } + Warn "Build-essentials failed (exit code $essExitCode) — the PR may already have build issues." + Warn "Log: $essentialsLog" + Warn "Continuing anyway — the review/fix loop will attempt to address build errors." + } + } else { + Info "Build-essentials already completed (skipping)" + } + + # ── Main Loop ────────────────────────────────────────────────────────── + $startIter = $state.currentIteration + for ($iter = $startIter; $iter -le $MaxIterations; $iter++) { + + $state.currentIteration = $iter + Save-State $state + + Write-Host "" + Write-Host ("─" * 50) -ForegroundColor DarkCyan + Write-Host " Iteration $iter / $MaxIterations" -ForegroundColor DarkCyan + Write-Host ("─" * 50) -ForegroundColor DarkCyan + + $iterDir = Join-Path $genRoot "iteration-$iter" + if (-not (Test-Path $iterDir)) { New-Item -ItemType Directory -Path $iterDir -Force | Out-Null } + + # ── Check if review phase already done for this iteration (resume) ── + $reviewDone = Get-LastPhaseOfType $state 'review' $iter + if (-not $reviewDone -or $reviewDone.status -notin @('done')) { + $reviewOk = Invoke-ReviewPhase -State $state -Iteration $iter + if (-not $reviewOk) { + Warn "Review phase failed/timed out in iteration $iter — stopping" + break + } + } else { + Info " Review already done for iteration $iter (resuming)" + } + + # ── Parse findings ── + $parseDone = Get-LastPhaseOfType $state 'parse' $iter + if (-not $parseDone -or $parseDone.status -ne 'done') { + $findings = Invoke-ParsePhase -State $state -Iteration $iter + } else { + $findingsFile = Join-Path $iterDir 'findings.json' + if (Test-Path $findingsFile) { + $findings = Get-Content $findingsFile -Raw | ConvertFrom-Json + } else { + $findings = @() + } + Info " Findings already parsed ($($findings.Count) actionable)" + } + + # ── Check if done ── + if ($findings.Count -eq 0) { + Write-Host "" + Write-Host " ✅ No actionable findings — PR is CLEAN!" -ForegroundColor Green + Add-PhaseRecord $state 'done' 'success' @{ finalFindings = 0 } + + # Write signal + @{ + status = 'success' + prNumber = $PRNumber + timestamp = (Get-Date).ToString('o') + iterations = $iter + finalFindingsCount = 0 + worktreePath = $state.worktreePath + } | ConvertTo-Json | Set-Content $signalFile -Force + + Write-ReworkSummary -State $state + + Write-Host "" + Write-Host ("=" * 70) -ForegroundColor Green + Write-Host " PR #$PRNumber is CLEAN after $iter iteration(s)" -ForegroundColor Green + Write-Host " Worktree: $($state.worktreePath)" -ForegroundColor Green + Write-Host " Summary: $summaryFile" -ForegroundColor Green + Write-Host ("=" * 70) -ForegroundColor Green + Write-Host "" + Write-Host "Review changes and push when ready:" -ForegroundColor Yellow + Write-Host " cd `"$($state.worktreePath)`"" -ForegroundColor White + Write-Host " git diff" -ForegroundColor White + Write-Host " git add -A && git commit -m `"fix: address review findings`" && git push" -ForegroundColor White + + return [PSCustomObject]@{ + PRNumber = $PRNumber + Status = 'Clean' + Iterations = $iter + FinalFindings = 0 + WorktreePath = $state.worktreePath + SummaryPath = $summaryFile + } + } + + Info " $($findings.Count) findings to fix — proceeding to fix phase" + + # ── Fix ── + $fixDone = Get-LastPhaseOfType $state 'fix' $iter + if (-not $fixDone -or $fixDone.status -notin @('done')) { + $fixOk = Invoke-FixPhase -State $state -Iteration $iter -Findings $findings + if (-not $fixOk) { + Warn "Fix phase failed/timed out in iteration $iter" + # Continue to next iteration anyway — review will detect remaining issues + } + } else { + Info " Fix already done for iteration $iter (resuming)" + } + + # ── Build ── + $buildDone = Get-LastPhaseOfType $state 'build' $iter + if (-not $buildDone -or $buildDone.status -notin @('done', 'skipped')) { + $buildOk = Invoke-BuildPhase -State $state -Iteration $iter + if (-not $buildOk) { + Warn "Build failed — fix phase in next iteration will receive build errors" + # Don't break — next iteration's fix will see the build log + } + } else { + Info " Build already done for iteration $iter (resuming)" + } + + # ── Test ── + $testDone = Get-LastPhaseOfType $state 'test' $iter + if (-not $testDone -or $testDone.status -notin @('done', 'skipped')) { + $testOk = Invoke-TestPhase -State $state -Iteration $iter + if (-not $testOk) { + Warn "Tests failed — fix phase in next iteration will receive test failures" + } + } else { + Info " Test already done for iteration $iter (resuming)" + } + } + + # ── Final verification review after the last fix ── + # The loop above reviewed iteration N, fixed, built, tested — but never + # re-reviewed to confirm the fixes worked. Run one more review-only pass. + $verifyIter = $MaxIterations + 1 + $state.currentIteration = $verifyIter + Save-State $state + + Write-Host "" + Write-Host ("─" * 50) -ForegroundColor DarkCyan + Write-Host " Final verification review" -ForegroundColor DarkCyan + Write-Host ("─" * 50) -ForegroundColor DarkCyan + + $verifyDir = Join-Path $genRoot "iteration-$verifyIter" + if (-not (Test-Path $verifyDir)) { New-Item -ItemType Directory -Path $verifyDir -Force | Out-Null } + + $verifyReviewOk = Invoke-ReviewPhase -State $state -Iteration $verifyIter + $finalFindingsCount = 0 + if ($verifyReviewOk) { + $verifyFindings = Invoke-ParsePhase -State $state -Iteration $verifyIter + $finalFindingsCount = $verifyFindings.Count + if ($finalFindingsCount -eq 0) { + Write-Host " ✅ Final verification: PR is CLEAN!" -ForegroundColor Green + Add-PhaseRecord $state 'done' 'success' @{ finalFindings = 0 } + + @{ + status = 'success' + prNumber = $PRNumber + timestamp = (Get-Date).ToString('o') + iterations = $MaxIterations + finalFindingsCount = 0 + worktreePath = $state.worktreePath + } | ConvertTo-Json | Set-Content $signalFile -Force + + Write-ReworkSummary -State $state + return [PSCustomObject]@{ + PRNumber = $PRNumber + Status = 'Clean' + Iterations = $MaxIterations + FinalFindings = 0 + WorktreePath = $state.worktreePath + SummaryPath = $summaryFile + } + } + Info " Final verification: $finalFindingsCount findings remain" + } else { + Warn " Final verification review failed — using last known findings count" + $lastFindingsFile = Join-Path $genRoot "iteration-$MaxIterations" 'findings.json' + if (Test-Path $lastFindingsFile) { + $finalFindingsCount = (Get-Content $lastFindingsFile -Raw | ConvertFrom-Json).Count + } + } + + @{ + status = 'max-iterations' + prNumber = $PRNumber + timestamp = (Get-Date).ToString('o') + iterations = $MaxIterations + finalFindingsCount = $finalFindingsCount + worktreePath = $state.worktreePath + } | ConvertTo-Json | Set-Content $signalFile -Force + + Write-ReworkSummary -State $state + + Write-Host "" + Write-Host ("=" * 70) -ForegroundColor Yellow + Write-Host " PR #$PRNumber — max iterations ($MaxIterations) reached" -ForegroundColor Yellow + Write-Host " Remaining findings: $finalFindingsCount" -ForegroundColor Yellow + Write-Host " Worktree: $($state.worktreePath)" -ForegroundColor Yellow + Write-Host " Summary: $summaryFile" -ForegroundColor Yellow + Write-Host ("=" * 70) -ForegroundColor Yellow + + return [PSCustomObject]@{ + PRNumber = $PRNumber + Status = 'MaxIterations' + Iterations = $MaxIterations + FinalFindings = $finalFindingsCount + WorktreePath = $state.worktreePath + SummaryPath = $summaryFile + } +} +catch { + Err "Error: $($_.Exception.Message)" + + # Write failure signal + @{ + status = 'failure' + prNumber = $PRNumber + timestamp = (Get-Date).ToString('o') + error = $_.Exception.Message + } | ConvertTo-Json | Set-Content $signalFile -Force + + return [PSCustomObject]@{ + PRNumber = $PRNumber + Status = 'Failed' + Error = $_.Exception.Message + } +} diff --git a/.github/skills/pr-rework/scripts/Start-PRReworkParallel.ps1 b/.github/skills/pr-rework/scripts/Start-PRReworkParallel.ps1 new file mode 100644 index 000000000000..ecb15e359950 --- /dev/null +++ b/.github/skills/pr-rework/scripts/Start-PRReworkParallel.ps1 @@ -0,0 +1,304 @@ +<# +.SYNOPSIS + Rework multiple PRs in parallel using Start-PRRework.ps1. + +.DESCRIPTION + Accepts a list of PR numbers and runs Start-PRRework.ps1 for each one in + parallel using ForEach-Object -Parallel. Each PR gets its own worktree, + state file, and iteration loop. + + Results are collected and displayed as a summary table. + +.PARAMETER PRNumbers + Array of PR numbers to rework. + +.PARAMETER CLIType + AI CLI to use: copilot or claude. Default: copilot. + +.PARAMETER Model + Copilot CLI model override. Default: claude-opus-4.6. + +.PARAMETER MaxIterations + Maximum review/fix loop iterations per PR. Default: 5. + +.PARAMETER MinSeverity + Minimum severity to fix: high, medium, low. Default: medium. + +.PARAMETER ThrottleLimit + Number of PRs to process in parallel. Default: 2. + +.PARAMETER ReviewTimeoutMin + Timeout in minutes for the review CLI call. Default: 10. + +.PARAMETER FixTimeoutMin + Timeout in minutes for the fix CLI call. Default: 15. + +.PARAMETER Force + Skip confirmation prompts. + +.PARAMETER Fresh + Discard previous state and start over for all PRs. + +.PARAMETER SkipTests + Skip the unit test phase after each fix. + +.EXAMPLE + ./Start-PRReworkParallel.ps1 -PRNumbers 45365,45370,45380 -CLIType copilot -Model claude-sonnet-4 -Force + +.EXAMPLE + # Resume with higher parallelism + ./Start-PRReworkParallel.ps1 -PRNumbers 45365,45370 -ThrottleLimit 3 -Force +#> +[CmdletBinding()] +param( + [Parameter(Mandatory)] + [int[]]$PRNumbers, + + [ValidateSet('copilot', 'claude')] + [string]$CLIType = 'copilot', + + [string]$Model = 'claude-opus-4.6', + + [int]$MaxIterations = 5, + + [ValidateSet('high', 'medium', 'low')] + [string]$MinSeverity = 'medium', + + [int]$ThrottleLimit = 2, + + [int]$ReviewTimeoutMin = 10, + + [int]$FixTimeoutMin = 15, + + [switch]$Force, + + [switch]$Fresh, + + [switch]$SkipTests +) + +$ErrorActionPreference = 'Continue' + +$scriptDir = Split-Path -Parent $MyInvocation.MyCommand.Path +$reworkScript = Join-Path $scriptDir 'Start-PRRework.ps1' + +if (-not (Test-Path $reworkScript)) { + Write-Error "Start-PRRework.ps1 not found at: $reworkScript" + return +} + +$uniquePRs = $PRNumbers | Sort-Object -Unique + +Write-Host "" +Write-Host ("=" * 70) -ForegroundColor Cyan +Write-Host " PR REWORK — PARALLEL MODE" -ForegroundColor Cyan +Write-Host " PRs: $($uniquePRs -join ', ')" -ForegroundColor Cyan +Write-Host " Parallelism: $ThrottleLimit" -ForegroundColor Cyan +Write-Host " CLI: $CLIType $(if ($Model) { "(model: $Model)" })" -ForegroundColor Cyan +Write-Host ("=" * 70) -ForegroundColor Cyan +Write-Host "" + +# ── Phase 1: Pre-validate PRs and create worktrees sequentially ───────── +# Git worktree operations are NOT safe for concurrent execution — they +# modify .git/worktrees and FETCH_HEAD which causes lock contention. +# We serialize this phase, then parallelize the CLI rework phase. + +Write-Host "Phase 1: Validating PRs and creating worktrees sequentially..." -ForegroundColor Cyan +$openPRs = @() +$skippedResults = @() + +$repoRoot = git rev-parse --show-toplevel 2>$null +. (Join-Path $scriptDir 'IssueReviewLib.ps1') +$worktreeLib = Join-Path $repoRoot 'tools/build/WorktreeLib.ps1' + +foreach ($prNum in $uniquePRs) { + $prInfo = $null + try { + $prInfo = gh pr view $prNum --json state,headRefName,url,title 2>$null | ConvertFrom-Json + } catch { } + if (-not $prInfo) { + Write-Host " PR #$prNum — NOT FOUND (skipping)" -ForegroundColor Red + $skippedResults += [PSCustomObject]@{ + PRNumber = $prNum; Status = 'Skipped'; Iterations = 0 + FinalFindings = -1; WorktreePath = ''; SummaryPath = '' + Error = 'PR not found' + } + continue + } + if ($prInfo.state -ne 'OPEN') { + Write-Host " PR #$prNum — $($prInfo.state) (skipping)" -ForegroundColor DarkGray + $skippedResults += [PSCustomObject]@{ + PRNumber = $prNum; Status = 'Skipped'; Iterations = 0 + FinalFindings = -1; WorktreePath = ''; SummaryPath = '' + Error = "PR is $($prInfo.state), not OPEN" + } + continue + } + + # Create worktree sequentially to avoid git lock contention. + # We inline the git commands instead of calling New-WorktreeFromBranch.ps1 + # because that script: (1) calls `code --new-window` which opens unwanted + # VS Code windows, and (2) has `exit 1` in its catch block which can + # terminate callers unpredictably depending on invocation method. + $branch = $prInfo.headRefName + $currentBranch = git branch --show-current 2>$null + if ($currentBranch -ne $branch) { + . $worktreeLib + $existingWt = Get-WorktreeEntries | Where-Object { $_.Branch -eq $branch } | Select-Object -First 1 + if ($existingWt) { + Write-Host " PR #$prNum — reusing worktree at $($existingWt.Path)" -ForegroundColor DarkCyan + } else { + Write-Host " PR #$prNum — creating worktree for $branch..." -ForegroundColor White + try { + # Ensure local tracking branch exists + git show-ref --verify --quiet "refs/heads/$branch" + if ($LASTEXITCODE -ne 0) { + git fetch origin "$branch" 2>&1 | Out-Null + git branch --track $branch "origin/$branch" 2>&1 | Out-Null + if ($LASTEXITCODE -ne 0) { throw "Failed to create tracking branch '$branch'" } + } + # Create the worktree using WorktreeLib naming convention + $safeBranch = ($branch -replace '[\\/:*?"<>|]','-') + $hash = Get-ShortHashFromString -Text $safeBranch + $folderName = "$(Split-Path -Leaf $repoRoot)-$hash" + $base = Get-WorktreeBasePath -RepoRoot $repoRoot + $folder = Join-Path $base $folderName + if (Test-Path $folder) { + # Orphaned directory from a previous failed run — remove it + Write-Host " PR #$prNum — removing orphaned directory $folder" -ForegroundColor Yellow + Remove-Item $folder -Recurse -Force -ErrorAction SilentlyContinue + git worktree prune 2>$null + if (Test-Path $folder) { + # Still locked — use an alternate path with timestamp suffix + $ts = [DateTimeOffset]::UtcNow.ToUnixTimeSeconds() + $folder = Join-Path $base "$folderName-$ts" + Write-Host " PR #$prNum — orphan locked, using $folder" -ForegroundColor Yellow + } + } + $wtAddOutput = git worktree add $folder $branch 2>&1 + if ($LASTEXITCODE -ne 0) { throw "git worktree add failed for '$branch' (exit $LASTEXITCODE): $wtAddOutput" } + # Skip submodule init here — Start-PRRework.ps1's own + # Get-OrCreateWorktree handles it, and calling it here can + # crash the process due to git stderr interaction with pwsh. + Write-Host " PR #$prNum — worktree created at $folder" -ForegroundColor DarkCyan + } + catch { + Write-Host " PR #$prNum — worktree creation FAILED: $($_.Exception.Message)" -ForegroundColor Red + $skippedResults += [PSCustomObject]@{ + PRNumber = $prNum; Status = 'Failed'; Iterations = 0 + FinalFindings = -1; WorktreePath = ''; SummaryPath = '' + Error = "Worktree creation failed: $($_.Exception.Message)" + } + continue + } + } + } + + $openPRs += $prNum + Write-Host " PR #$prNum — $($prInfo.title)" -ForegroundColor Green +} + +Write-Host "" +Write-Host "Phase 1 complete: $($openPRs.Count) open PRs ready, $($skippedResults.Count) skipped" -ForegroundColor Cyan + +if ($openPRs.Count -eq 0) { + Write-Host "No open PRs to process." -ForegroundColor Yellow + return $skippedResults +} + +# ── Phase 2: Run CLI rework in parallel ───────────────────────────────── +Write-Host "" +Write-Host "Phase 2: Running CLI rework in parallel (ThrottleLimit=$ThrottleLimit)..." -ForegroundColor Cyan +Write-Host "" + +$startTime = Get-Date + +$parallelResults = $openPRs | ForEach-Object -ThrottleLimit $ThrottleLimit -Parallel { + $prNum = $_ + $script = $using:reworkScript + $cli = $using:CLIType + $mdl = $using:Model + $maxIter = $using:MaxIterations + $minSev = $using:MinSeverity + $rvTimeout = $using:ReviewTimeoutMin + $fxTimeout = $using:FixTimeoutMin + $doForce = $using:Force + $doFresh = $using:Fresh + $doSkipTests = $using:SkipTests + + try { + $params = @{ + PRNumber = $prNum + CLIType = $cli + MaxIterations = $maxIter + MinSeverity = $minSev + ReviewTimeoutMin = $rvTimeout + FixTimeoutMin = $fxTimeout + } + if ($mdl) { $params['Model'] = $mdl } + if ($doForce) { $params['Force'] = $true } + if ($doFresh) { $params['Fresh'] = $true } + if ($doSkipTests) { $params['SkipTests'] = $true } + + $result = & $script @params + $result + } + catch { + [PSCustomObject]@{ + PRNumber = $prNum + Status = 'Failed' + Iterations = 0 + FinalFindings = -1 + WorktreePath = '' + SummaryPath = '' + Error = $_.Exception.Message + } + } +} + +$elapsed = (Get-Date) - $startTime + +# Merge skipped + parallel results +$results = @($skippedResults) + @($parallelResults) | Sort-Object PRNumber + +Write-Host "" +Write-Host ("=" * 70) -ForegroundColor Cyan +Write-Host " PR REWORK PARALLEL — RESULTS" -ForegroundColor Cyan +Write-Host ("=" * 70) -ForegroundColor Cyan +Write-Host "" +Write-Host "Elapsed: $($elapsed.ToString('hh\:mm\:ss'))" +Write-Host "" + +# Display results table +$results | Format-Table @( + @{Label = 'PR'; Expression = { $_.PRNumber }; Width = 8} + @{Label = 'Status'; Expression = { $_.Status }; Width = 15} + @{Label = 'Iters'; Expression = { $_.Iterations }; Width = 6} + @{Label = 'Findings'; Expression = { $_.FinalFindings }; Width = 10} + @{Label = 'Worktree'; Expression = { $_.WorktreePath }; Width = 50} +) -AutoSize + +# Summary stats +$clean = ($results | Where-Object Status -eq 'Clean').Count +$maxed = ($results | Where-Object Status -eq 'MaxIterations').Count +$failed = ($results | Where-Object Status -eq 'Failed').Count +$skipped = ($results | Where-Object Status -eq 'Skipped').Count + +Write-Host "" +Write-Host "Summary: $clean clean, $maxed max-iterations, $failed failed, $skipped skipped (of $($uniquePRs.Count) total)" -ForegroundColor $(if ($failed -gt 0) { 'Yellow' } elseif ($maxed -gt 0) { 'DarkYellow' } else { 'Green' }) +Write-Host "" + +if ($clean -gt 0) { + Write-Host "Clean PRs — ready for review and push:" -ForegroundColor Green + $results | Where-Object Status -eq 'Clean' | ForEach-Object { + Write-Host " PR #$($_.PRNumber): $($_.SummaryPath)" -ForegroundColor White + } +} +if ($maxed -gt 0) { + Write-Host "Max-iteration PRs — review summaries for remaining findings:" -ForegroundColor Yellow + $results | Where-Object Status -eq 'MaxIterations' | ForEach-Object { + Write-Host " PR #$($_.PRNumber): $($_.FinalFindings) findings remaining — $($_.SummaryPath)" -ForegroundColor White + } +} + +return $results diff --git a/.github/skills/pr-triage/LICENSE.txt b/.github/skills/pr-triage/LICENSE.txt new file mode 100644 index 000000000000..c9766a251fed --- /dev/null +++ b/.github/skills/pr-triage/LICENSE.txt @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to the Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2026 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/.github/skills/pr-triage/SKILL.md b/.github/skills/pr-triage/SKILL.md new file mode 100644 index 000000000000..fb21091e848a --- /dev/null +++ b/.github/skills/pr-triage/SKILL.md @@ -0,0 +1,120 @@ +--- +name: pr-triage +description: Toolkit for triaging, categorizing, and prioritizing open pull requests. Use when asked to triage PRs, categorize stale PRs, prioritize pending reviews, identify abandoned PRs, suggest PR actions, find PRs needing attention, generate PR triage reports, analyze PR backlogs, or recommend next steps for pending PRs. Supports categorization by staleness, review status, build failures, design gaps, and suggests actionable next steps including reviewer assignment. +license: Complete terms in LICENSE.txt +--- + +# PR Triage Skill + +Triage, categorize, and prioritize open pull requests. Generate reports with recommended actions per category. + +## What to Do + +Before running, confirm scope with the end-user first: + +- Which PR numbers should be triaged? +- Which AI engine should be used (`copilot` or `claude`)? +- Should step 2 reviews be reused (`-SkipReview`) or regenerated? + +Then run the orchestrator: + +```powershell +.github/skills/pr-triage/scripts/Start-PrTriage.ps1 -PRNumbers <N1,N2,...> +``` + +It runs 5 steps sequentially, writing results to `<OutputRoot>/<date>/<label>/` (default `Generated Files/pr-triage/<date>/<label>/`). Re-running resumes from where it left off. + +### Options + +| Parameter | Default | Description | +|-----------|---------|-------------| +| `-Repository` | `microsoft/PowerToys` | GitHub repo to triage | +| `-PRNumbers` | — | PR numbers to triage (**required**) | +| `-ThrottleLimit` | `5` | Max concurrent parallel jobs | +| `-RunDate` | today | Date folder name (YYYY-MM-DD) | +| `-CliType` | `copilot` | AI engine: `copilot` or `claude` | +| `-RunLabel` | engine name | Subfolder label under run date | +| `-OutputRoot` | `Generated Files/pr-triage` | Root folder for triage run outputs | +| `-ReviewOutputRoot` | `Generated Files/prReview` | Folder used by Step 2 PR reviews | +| `-LogPath` | `triage.log` under run folder | Main orchestration log file (step logs are also created) | +| `-Force` | `false` | Re-run even if output exists | +| `-SkipAiEnrichment` | `false` | Skip step 3 (use rules only) | +| `-SkipReview` | `false` | Skip step 2 and reuse existing review outputs | + +### Pipeline + +``` +1. Collect → all-prs.json (Get-OpenPrs.ps1) +2. Review → prReview/<N>/ (Start-PRReviewWorkflow.ps1, parallel) +3. AI Enrich → ai-enrichment.json (Invoke-AiEnrichment.ps1, sequential) +4. Categorize → categorized-prs.json (Invoke-PrCategorization.ps1, parallel enrichment) +5. Report → summary.md + cats/ (Export-TriageReport.ps1) +``` + +Each step produces a file. If the file exists on re-run, the step is skipped. Delete the file (or pass `-Force`) to redo it. Step 2 delegates to the pr-review skill. Step 3 can be skipped with `-SkipAiEnrichment` (Step 4 falls back to rule-based categorization). + +### Delta Tracking + +Step 5 automatically compares against the most recent previous run. The summary shows: new PRs, closed/merged PRs, category changes, and recurring action items still unresolved. Pass `-PreviousInputPath` to `Export-TriageReport.ps1` to compare against a specific run. + +### Check Progress + +```powershell +.github/skills/pr-triage/scripts/Get-TriageProgress.ps1 -Detailed +``` + +### Execution & Monitoring Rules + +This pipeline takes **20–60 minutes** for 10 PRs. The agent MUST: + +1. **Launch as a detached process** — VS Code terminal idle detection kills background processes after ~60s. Use `Start-Process -WindowStyle Hidden` with `Tee-Object` to a log file. +2. **Poll the log file every 60–120 seconds** until the pipeline prints the final `Triage complete!` line in `triage.log`. +3. **Track all 5 steps** — do NOT report success after Step 2 finishes; continue monitoring through Steps 3–5. +4. **On process death**, check `triage.log` and orchestrator logs, clean up partial output, and relaunch automatically. +5. **Report final results** only after `summary.md` is written — include category breakdown and quick-wins table. + +## Step References (load per step) + +| Step | Reference | Script | +|------|-----------|--------| +| 1 | [Collection](./references/step1-collection.md) | `Get-OpenPrs.ps1` | +| 2 | [Review](./references/step2-review.md) | `Start-PRReviewWorkflow.ps1` | +| 3 | [AI Enrichment](./references/step3-ai-enrichment.md) | `Invoke-AiEnrichment.ps1` | +| 4 | [Categorization](./references/step4-categorization.md) | `Invoke-PrCategorization.ps1` | +| 5 | [Reporting](./references/step5-reporting.md) | `Export-TriageReport.ps1` | + +Read each reference only when executing that step. + +## Scripts + +| Script | Purpose | +|--------|---------| +| [Start-PrTriage.ps1](./scripts/Start-PrTriage.ps1) | **Run this** — full pipeline | +| [Get-TriageProgress.ps1](./scripts/Get-TriageProgress.ps1) | Check run status | +| [Get-OpenPrs.ps1](./scripts/Get-OpenPrs.ps1) | Step 1: Collect | +| [Invoke-AiEnrichment.ps1](./scripts/Invoke-AiEnrichment.ps1) | Step 3: AI enrichment (dimension scoring) | +| [Invoke-PrCategorization.ps1](./scripts/Invoke-PrCategorization.ps1) | Step 4: Categorize | +| [Export-TriageReport.ps1](./scripts/Export-TriageReport.ps1) | Step 5: Report | +| [Get-PrDetails.ps1](./scripts/Get-PrDetails.ps1) | Utility: detailed PR enrichment | +| [Get-ReviewerSuggestions.ps1](./scripts/Get-ReviewerSuggestions.ps1) | Utility: suggest reviewers | + +## Dependencies + +| Skill | Used For | +|-------|----------| +| `parallel-job-orchestrator` | Parallel AI CLI execution in Step 2 (reviews) and Step 3 (enrichment) | +| `pr-review` | Step 2 PR reviews — delegates to `Start-PRReviewWorkflow.ps1` | + +Both `Start-PRReviewWorkflow.ps1` and `Invoke-AiEnrichment.ps1` delegate parallel +execution to the shared orchestrator. Do NOT introduce custom `ForEach-Object -Parallel`, +`Start-Job`, or `Start-Process` patterns — use the orchestrator instead. + +## Post-Execution Review + +After each triage run, review results and tighten instructions when needed: + +1. Verify run artifacts under `Generated Files/pr-triage/<date>/<label>/` are complete. +2. Validate category distribution and sampled actions in `summary.md` for plausibility. +3. Compare `ai-enrichment.json` success/failure counts and investigate unusual failure spikes. +4. If criteria or prompts produced noisy outcomes, update the relevant step reference in [references](./references). +5. If script parameters or behavior changed, keep this `SKILL.md` options/workflow in sync in the same PR. diff --git a/.github/skills/pr-triage/references/categorize-pr.prompt.md b/.github/skills/pr-triage/references/categorize-pr.prompt.md new file mode 100644 index 000000000000..ac5b1cdcbb64 --- /dev/null +++ b/.github/skills/pr-triage/references/categorize-pr.prompt.md @@ -0,0 +1,190 @@ +```prompt +# PR Triage Evaluation + +You are evaluating a pull request for the **PowerToys** repository. +Read ALL information below — metadata, discussion, images, AI code review — then score each evaluation dimension. + +## The PR + +- **PR #{{PR_NUMBER}}**: {{PR_TITLE}} +- **Author**: @{{PR_AUTHOR}} +- **URL**: {{PR_URL}} +- **Age**: {{AGE_DAYS}} days +- **Days since last activity**: {{DAYS_SINCE_ACTIVITY}} +- **Days since author last active**: {{DAYS_SINCE_AUTHOR_ACTIVITY}} +- **Size**: +{{ADDITIONS}} / -{{DELETIONS}} ({{CHANGED_FILES}} files) +- **Labels**: {{LABELS}} +- **Linked issues**: {{LINKED_ISSUES}} +- **Is draft**: {{IS_DRAFT}} + +### Review Status +- **Human approvals**: {{APPROVAL_COUNT}} +- **Changes requested**: {{CHANGES_REQUESTED_COUNT}} +- **CI status**: {{CHECKS_STATUS}} +- **Failing checks**: {{FAILING_CHECKS}} +- **Mergeable**: {{MERGEABLE}} + +### AI Code Review Summary +{{AI_REVIEW_SUMMARY}} + +## Your Tasks + +### Step 1: Read the PR discussion + +Use the GitHub MCP tools to understand the FULL context: + +1. **Fetch images and attachments** from PR #{{PR_NUMBER}} in `microsoft/PowerToys`: + - Use `github_issue_images` tool with owner=`microsoft`, repo=`PowerToys`, issueNumber={{PR_NUMBER}} + - Use `github_issue_attachments` tool with owner=`microsoft`, repo=`PowerToys`, issueNumber={{PR_NUMBER}}, extractFolder=`{{EXTRACT_FOLDER}}` + +2. **Read the full discussion** using: + ``` + gh pr view {{PR_NUMBER}} --repo microsoft/PowerToys --json body,comments,reviews,reviewRequests + ``` + +Pay attention to: +- What do reviewers actually think? Read between the lines. +- Does anyone say the fix **doesn't work** or is **broken**? +- Has the author said they'll open a **replacement PR**? +- Is there **disagreement** about the approach? +- Are reviewers asking for **fundamental redesign** vs minor tweaks? +- Are there **images** showing bugs, test results, or UI changes? + +### Step 2: Score each dimension + +Evaluate these 7 dimensions based on everything you read. Each dimension is independent. + +## Evaluation Dimensions + +### 1. `review_sentiment` — What do reviewers think? +How positive or negative is the overall reviewer sentiment? +- **1.0** = Enthusiastic approval, "LGTM", "great work" +- **0.7** = Positive, minor nits only +- **0.5** = Mixed — some approve, some have concerns +- **0.3** = Negative — significant objections raised +- **0.0** = Hostile rejection or "this doesn't work at all" +- If no reviews exist, score **0.5** with low confidence. + +### 2. `author_responsiveness` — Is the author engaged? +How actively is the author responding to feedback and keeping the PR moving? +- **1.0** = Responding promptly, pushing fixes, actively engaged +- **0.7** = Responding but slowly +- **0.5** = Unclear or no feedback to respond to yet +- **0.3** = Asked to make changes, hasn't responded in a while +- **0.0** = Author has gone silent for weeks, or explicitly abandoned + +### 3. `code_health` — Is the code ready? +Based on AI review findings AND human review comments, how healthy is the code? +- **1.0** = Clean — no issues found by AI or humans +- **0.7** = Minor issues only (style, naming, small improvements) +- **0.5** = Moderate concerns — some functional issues but fixable +- **0.3** = Serious problems — bugs, security issues, or design flaws found +- **0.0** = Fundamentally broken — "this doesn't work" confirmed in discussion + +### 4. `merge_readiness` — How close to merge? +Considering approvals, CI, discussion, and code health — how merge-ready is this? +- **1.0** = Ready to merge right now — approved, CI green, no objections +- **0.7** = Almost ready — just needs final approval or CI to finish +- **0.5** = Needs some work but on the right track +- **0.3** = Significant work remaining — redesign, major fixes, or blocked +- **0.0** = Not mergeable — should be closed, superseded, or fundamentally reworked + +### 5. `activity_level` — How alive is this PR? +How actively is this PR being worked on? +- **1.0** = Active discussion/commits in the last few days +- **0.7** = Some activity in the last 1-2 weeks +- **0.5** = Last activity was 2-4 weeks ago +- **0.3** = Stale — no activity for 1-2 months +- **0.0** = Dead — no activity for 3+ months, likely abandoned + +### 6. `direction_clarity` — Is there agreement on approach? +Do reviewers and the author agree on the direction/design of this PR? +- **1.0** = Clear agreement — everyone aligned on approach +- **0.7** = Mostly aligned, minor design suggestions +- **0.5** = Some open questions about approach, not yet resolved +- **0.3** = Significant disagreement — conflicting reviewer opinions +- **0.0** = Fundamental disagreement or no one knows what this should look like + +### 7. `superseded` — Has this been replaced? +Is there evidence this PR has been replaced by another PR or is obsolete? +- **1.0** = Explicitly superseded — author or maintainer links to replacement PR +- **0.7** = Author said "I'll open a new PR" or "this approach won't work" +- **0.3** = Hints of replacement but not confirmed +- **0.0** = No evidence of replacement + +## Output Format + +Respond with ONLY a JSON block (no other text): + +```json +{ + "dimensions": { + "review_sentiment": { + "score": 0.7, + "confidence": 0.85, + "reasoning": "One reviewer approved with minor nits, no objections raised." + }, + "author_responsiveness": { + "score": 0.5, + "confidence": 0.6, + "reasoning": "No feedback to respond to yet — PR is new with no review comments." + }, + "code_health": { + "score": 0.7, + "confidence": 0.8, + "reasoning": "AI review found 2 low-severity style issues. No functional problems." + }, + "merge_readiness": { + "score": 0.5, + "confidence": 0.75, + "reasoning": "Has 1 approval but CI is still running. No blockers in discussion." + }, + "activity_level": { + "score": 0.8, + "confidence": 0.95, + "reasoning": "Last commit was 3 days ago, author responded to comment yesterday." + }, + "direction_clarity": { + "score": 0.9, + "confidence": 0.8, + "reasoning": "Reviewers agree this is the right approach, small scope." + }, + "superseded": { + "score": 0.0, + "confidence": 0.95, + "reasoning": "No mention of replacement PR in discussion." + } + }, + "suggested_category": "approved-pending-merge", + "discussion_summary": "2-3 sentence summary of the key discussion points and any images you saw.", + "superseded_by": null, + "tags": ["tag1", "tag2"] +} +``` + +### Category Reference (for your `suggested_category` field) +| Category | Typical dimension pattern | +|----------|--------------------------| +| `ready-to-merge` | High merge_readiness, high review_sentiment, high code_health | +| `review-concerns` | Low code_health or low review_sentiment due to issues found | +| `approved-pending-merge` | High review_sentiment, moderate merge_readiness (CI pending) | +| `build-failures` | CI failing (from metadata), otherwise healthy | +| `fresh-awaiting-review` | High activity, no reviews yet, young PR | +| `in-active-review` | Active discussion, reviews happening | +| `stale-no-review` | Low activity, no reviews | +| `awaiting-author` | Low author_responsiveness, changes requested | +| `stale-with-feedback` | Low activity, has reviews but no resolution | +| `likely-abandoned` | Very low activity + very low author_responsiveness | +| `direction-unclear` | Low direction_clarity | +| `design-needed` | Low direction_clarity + large scope | +| `needs-attention` | Doesn't fit patterns above | +| `superseded` | High superseded score | + +### Critical Rules +- If discussion says "this doesn't work" → `code_health` must be ≤ 0.2 and `merge_readiness` must be ≤ 0.2. +- If author says "I'll open a new PR" → `superseded` must be ≥ 0.7. +- Read images! Screenshots might show the fix doesn't work. + +### Valid tags +`quick-win`, `large-pr`, `external-contributor`, `rescue-candidate`, `recommend-close`, `review-clean`, `review-high-severity`, `review-quality`, `review-design`, `review-security`, `has-replacement-pr`, `fix-does-not-work`, `author-unresponsive`. +``` diff --git a/.github/skills/pr-triage/references/step1-collection.md b/.github/skills/pr-triage/references/step1-collection.md new file mode 100644 index 000000000000..fbab32488c7b --- /dev/null +++ b/.github/skills/pr-triage/references/step1-collection.md @@ -0,0 +1,95 @@ +# Step 1: Collection - Fetch Selected PRs + +Collect the user-selected pull requests from the repository with core metadata needed for triage. + +## First ask the user for scope + +Before running collection, confirm: + +- PR numbers to triage +- AI engine preference (`copilot` or `claude`) +- Whether to reuse existing review output (`-SkipReview`) or regenerate + +## Data Collection + +### Primary Query + +Use the `Get-OpenPrs.ps1` script with explicit PR numbers: + +```powershell +.github/skills/pr-triage/scripts/Get-OpenPrs.ps1 ` + -Repository microsoft/PowerToys ` + -PRNumbers 45234,45235,45236 ` + -OutputPath all-prs.json +``` + +### Additional Metadata Per PR + +For each PR, also fetch: + +```powershell +# Check status (CI/checks) +gh pr checks $prNumber --json name,state,conclusion + +# Get linked issues +gh pr view $prNumber --json closingIssuesReferences +``` + +## Output Schema + +Save to `all-prs.json`: + +```json +{ + "collectedAt": "2026-02-04T10:30:00Z", + "totalCount": 47, + "prs": [ + { + "number": 12345, + "title": "Fix crash in FancyZones", + "author": "contributor123", + "createdAt": "2025-12-15T08:00:00Z", + "updatedAt": "2026-01-20T14:30:00Z", + "ageInDays": 51, + "daysSinceUpdate": 15, + "baseRefName": "main", + "headRefName": "fix/fancyzones-crash", + "labels": ["bug", "Area-FancyZones"], + "assignees": [], + "reviewRequests": [], + "isDraft": false, + "mergeable": "MERGEABLE", + "additions": 45, + "deletions": 12, + "changedFiles": 3, + "linkedIssues": [9876], + "checksStatus": "PENDING" + } + ] +} +``` + +## Scope model + +Collection is intentionally PR-number driven to keep triage reproducible and side-by-side comparable across engines. + +## Calculated Fields + +The script computes these derived fields: + +| Field | Calculation | +|-------|-------------| +| `ageInDays` | `(Now - createdAt).TotalDays` | +| `daysSinceUpdate` | `(Now - updatedAt).TotalDays` | +| `sizeCategory` | XS (<10), S (<50), M (<200), L (<500), XL (500+) | +| `checksStatus` | Aggregate: PASSING, FAILING, PENDING, NONE | + +## Error Handling + +- **Rate limiting**: If `gh` returns 403, wait and retry with exponential backoff +- **Missing fields**: Use null/defaults for optional fields +- **Large repos**: Use pagination (`--limit` + `--cursor`) for 500+ PRs + +## Next Step + +After collection, proceed to [Step 2: Review](./step2-review.md) to run detailed AI reviews on all collected PRs. diff --git a/.github/skills/pr-triage/references/step2-review.md b/.github/skills/pr-triage/references/step2-review.md new file mode 100644 index 000000000000..466cc7f8de47 --- /dev/null +++ b/.github/skills/pr-triage/references/step2-review.md @@ -0,0 +1,215 @@ +# Step 2: Review — Detailed PR Reviews via pr-review Skill + +`Start-PrTriage.ps1` delegates to the **pr-review** skill's `Start-PRReviewWorkflow.ps1` for every PR collected in Step 1. + +--- + +## Delegation + +The orchestrator passes the PR numbers collected in Step 1: + +```powershell +$reviewPrNumbers = ($allPrsData.Prs | ForEach-Object { [int]$_.Number }) + +& "$skillRoot/../pr-review/scripts/Start-PRReviewWorkflow.ps1" ` + -PRNumbers $reviewPrNumbers ` + -CLIType $CliType ` + -OutputRoot $ReviewOutputRoot ` + -MaxParallel $ThrottleLimit +``` + +Review output lands in `$ReviewOutputRoot/{N}/` (owned by pr-review skill). + +For the review-pr prompt specification, see [review-pr.prompt.md](../pr-review/references/review-pr.prompt.md). + +## Next Step + +After reviews complete, proceed to [Step 3: Categorization](./step3-categorization.md) to enrich, classify, and score PRs (incorporating review results). + +--- + +## Action Templates by Category + +The review skill produces detailed findings. The templates below describe the **recommended follow-up actions** per category. + +### Ready to Merge (`ready-to-merge`) + +**Who**: Maintainer with merge permissions + +```markdown +- [ ] Verify CI is still green +- [ ] Check for any last-minute comments +- [ ] Merge PR using squash/rebase per repo convention +- [ ] Delete branch if from fork +``` + +### Build Failures (`build-failures`) + +**Who**: PR author (if minor), or maintainer (if author inactive) + +**Minor failures** (lint, style, warnings): +```markdown +- [ ] Review failing checks: {list failing checks} +- [ ] Fix: {specific fix suggestion} +- [ ] Consider: Maintainer quick-fix if author inactive 30+ days +``` + +**Major failures** (compile, tests): +```markdown +- [ ] Identify root cause from CI logs +- [ ] Comment on PR with specific failure details +- [ ] If author inactive: Consider closing with "please reopen when fixed" +``` + +### Stale — No Review (`stale-no-review`) + +**Who**: Maintainer to assign reviewer + +```markdown +- [ ] Assign reviewer: {suggested_reviewer} (based on: {reason}) +- [ ] Alternative reviewers: {list} +- [ ] Set review deadline: {date} +- [ ] If no reviewer available: Post call for reviewers +``` + +### Awaiting Author (`awaiting-author`) + +**Who**: PR author, or maintainer to close + +**Author responsive recently**: +```markdown +- [ ] Ping author: @{author} — friendly reminder about pending changes +- [ ] Summarize outstanding items +- [ ] Set response deadline: {date} +``` + +**Author unresponsive 30+ days**: +```markdown +- [ ] Post closing notice: "Closing due to inactivity. Please reopen when ready." +- [ ] Close PR +- [ ] Add label: stale-closed +``` + +**Author unresponsive 60+ days**: +```markdown +- [ ] Close PR with thank-you message +- [ ] If changes valuable: Consider maintainer takeover PR +``` + +### Direction Unclear (`direction-unclear`) + +**Who**: Maintainer or technical lead + +```markdown +- [ ] Identify conflicting viewpoints +- [ ] Schedule decision meeting or async discussion +- [ ] Post decision summary to PR +- [ ] Update PR with clear next steps +``` + +### Design Needed (`design-needed`) + +**Who**: PR author with guidance from maintainer + +```markdown +- [ ] Request design document covering: + - Problem statement + - Proposed solution + - Alternatives considered + - Impact on existing functionality +- [ ] Suggest design reviewers +- [ ] Set design deadline +- [ ] Consider: Close PR, request design-first approach +``` + +### Issue Mismatch (`issue-mismatch`) + +**Who**: PR author with maintainer clarification + +```markdown +- [ ] Clarify mismatch between issue and PR +- [ ] Options: + 1. Update PR to address issue correctly + 2. Close PR and suggest correct approach + 3. Create new issue for what PR actually fixes +``` + +### Needs Attention (`needs-attention`) + +**Who**: Maintainer to investigate + +```markdown +- [ ] Review detailed findings from pr-review output +- [ ] Determine appropriate category after analysis +- [ ] Document findings in triage report +- [ ] Assign specific action based on review results +``` + +--- + +## Reviewer Suggestion Algorithm + +```powershell +function Get-SuggestedReviewers { + param($pr) + + $suggestions = @() + + # 1. CODEOWNERS matches + $codeowners = Get-CodeownersForFiles $pr.changedFiles + if ($codeowners) { + $suggestions += @{ + User = $codeowners[0] + Reason = "CODEOWNERS for $($pr.changedFiles[0])" + Confidence = "High" + } + } + + # 2. Recent reviewers of area + $areaLabel = $pr.labels | Where-Object { $_ -match "^Area-" } | Select-Object -First 1 + if ($areaLabel) { + $recentReviewers = Get-RecentReviewers -Area $areaLabel -Days 90 -Limit 3 + $suggestions += $recentReviewers | ForEach-Object { + @{ User = $_; Reason = "Recently reviewed $areaLabel PRs"; Confidence = "Medium" } + } + } + + # 3. File history (git blame) + $topCommitters = Get-TopCommitters -Files $pr.changedFiles -Limit 3 + $suggestions += $topCommitters | ForEach-Object { + @{ User = $_; Reason = "Frequent committer to changed files"; Confidence = "Medium" } + } + + # Deduplicate and exclude PR author + $suggestions | + Where-Object { $_.User -ne $pr.author } | + Sort-Object -Property Confidence -Descending | + Select-Object -First 5 +} +``` + +## Batch Actions + +Group similar actions for efficiency: + +```markdown +## Batch: Assign Reviewers (8 PRs) + +| PR | Suggested Reviewer | Reason | Alt 1 | Alt 2 | +|----|-------------------|--------|-------|-------| +| #12345 | @reviewer1 | CODEOWNERS | @r2 | @r3 | +| #12400 | @reviewer1 | Area-FancyZones | @r2 | @r4 | +``` + +## Review Output + +Each PR reviewed by the pr-review skill produces: + +``` +Generated Files/prReview/{N}/ +├── 00-OVERVIEW.md +├── 01-ANALYSIS.md +└── … +``` + +The triage summary in `summary.md` can link to these for deep-dive details. diff --git a/.github/skills/pr-triage/references/step3-ai-enrichment.md b/.github/skills/pr-triage/references/step3-ai-enrichment.md new file mode 100644 index 000000000000..d2592130c8da --- /dev/null +++ b/.github/skills/pr-triage/references/step3-ai-enrichment.md @@ -0,0 +1,104 @@ +# Step 3: AI Enrichment — AI CLI per PR + +`Invoke-AiEnrichment.ps1` enriches each PR with AI-derived signals by invoking the selected AI CLI (`copilot` or `claude`) with MCP tools. +It reads the full PR discussion, images, and AI code review findings (from Step 2), +then scores 7 dimensions. The actual category assignment happens in Step 4. + +--- + +## How It Works + +For each PR, the script: + +1. Builds a prompt from `categorize-pr.prompt.md` with PR metadata filled in +2. Launches the selected AI CLI +3. AI reads the PR discussion via `gh pr view` and fetches images/attachments via MCP tools +4. AI returns a JSON block with 7 dimension scores + +### Sequential Execution + +PRs are processed one at a time (not parallel) because the AI CLI + MCP server +are stateful. The script saves results incrementally so it can resume after interruption. + +### Resume & Cache + +- Existing results in `ai-enrichment.json` are loaded and skipped +- Per-PR raw output is cached under `<OutputRoot>/__tmp/cat-output-{N}.txt` +- If cached output exists and parses successfully, the CLI is not re-invoked +- Pass `-Force` to re-evaluate all PRs + +--- + +## 7 Evaluation Dimensions + +Each dimension is scored 0.0–1.0 with a confidence level and reasoning string. + +| Dimension | What it measures | +|-----------|-----------------| +| `review_sentiment` | How positive/negative reviewer feedback is | +| `author_responsiveness` | Is the author actively engaged? | +| `code_health` | Are there bugs, security issues, or design problems? | +| `merge_readiness` | How close to merge (approvals, CI, discussion)? | +| `activity_level` | How recently was this PR active? | +| `direction_clarity` | Do reviewers agree on the approach? | +| `superseded` | Has this PR been replaced by another? | + +See [categorize-pr.prompt.md](./categorize-pr.prompt.md) for full scoring rubrics. + +--- + +## Output + +`ai-enrichment.json`: + +```json +{ + "CategorizedAt": "2026-02-12T10:00:00Z", + "Repository": "microsoft/PowerToys", + "TotalCount": 112, + "AiSuccessCount": 108, + "AiFailedCount": 4, + "Results": [ + { + "Number": 45542, + "Dimensions": { + "review_sentiment": { "Score": 0.7, "Confidence": 0.85, "Reasoning": "..." }, + "author_responsiveness": { "Score": 0.5, "Confidence": 0.6, "Reasoning": "..." }, + "code_health": { "Score": 0.8, "Confidence": 0.9, "Reasoning": "..." }, + "merge_readiness": { "Score": 0.6, "Confidence": 0.75, "Reasoning": "..." }, + "activity_level": { "Score": 0.4, "Confidence": 0.95, "Reasoning": "..." }, + "direction_clarity": { "Score": 0.9, "Confidence": 0.8, "Reasoning": "..." }, + "superseded": { "Score": 0.0, "Confidence": 0.95, "Reasoning": "..." } + }, + "SuggestedCategory": "in-active-review", + "DiscussionSummary": "Reviewer approved with minor nits...", + "SupersededBy": null, + "Tags": ["review-clean"], + "Source": "ai" + } + ] +} +``` + +PRs where `Source` is `"failed"` will fall back to rule-based categorization in Step 4. + +--- + +## Script Parameters + +| Parameter | Default | Description | +|-----------|---------|-------------| +| `-InputPath` | (required) | Path to `all-prs.json` | +| `-OutputPath` | `ai-enrichment.json` | Where to write results | +| `-OutputRoot` | — | Run output root for temporary/cache files | +| `-Repository` | `microsoft/PowerToys` | GitHub repo | +| `-CliType` | `copilot` | AI engine: `copilot` or `claude` | +| `-ReviewOutputRoot` | `Generated Files/prReview` | Where Step 2 review output is read from | +| `-TimeoutMin` | `5` | Per-PR timeout in minutes | +| `-Force` | `false` | Re-evaluate all PRs | + +--- + +## Next Step + +After AI enrichment, proceed to [Step 4: Categorization](./step4-categorization.md) which merges AI dimensions with GitHub API data and assigns final categories. diff --git a/.github/skills/pr-triage/references/step4-categorization.md b/.github/skills/pr-triage/references/step4-categorization.md new file mode 100644 index 000000000000..5c927d995903 --- /dev/null +++ b/.github/skills/pr-triage/references/step4-categorization.md @@ -0,0 +1,180 @@ +# Step 4: Categorization + +**Script:** `Invoke-PrCategorization.ps1` + +Loads AI dimension scores from Step 3, enriches each PR with live GitHub API data, +and assigns one of 14 triage categories. + +## Inputs + +| Source | File | Contents | +|--------|------|----------| +| Step 1 | `all-prs.json` | Base PR metadata (number, title, author, labels, dates, diff stats) | +| Step 2 | `Generated Files/prReview/<N>/` | Per-PR review output (`.signal`, `00-OVERVIEW.md`, step files with `mcp-review-comment` blocks) | +| Step 3 | `ai-enrichment.json` | Per-PR dimension scores, suggested category, tags, discussion summary | + +## Phase 1 — Parallel Enrichment + +Each PR is enriched via `gh api` in parallel (`ForEach-Object -Parallel`). +The parallel block cannot call module functions, so API calls are inlined. + +| API call | Data extracted | +|----------|----------------| +| `repos/{repo}/pulls/{n}/reviews` | `ApprovalCount`, `ChangesRequestedCount`, `ReviewerLogins` | +| `repos/{repo}/issues/{n}/comments` | `CommentCount`, `LastCommentAt`, `AuthorLastActivityAt` | +| `repos/{repo}/commits/{ref}/check-runs` | `ChecksStatus` (SUCCESS/FAILURE/PENDING/UNKNOWN), `FailingChecks` | +| `repos/{repo}/pulls/{n}/commits` | `LastCommitAt` | + +Results are stored in a `ConcurrentDictionary[int, PSObject]`. + +## Phase 2 — Categorization + +After enrichment, each PR is categorized sequentially. +The script also loads review findings from Step 2 (`Get-ReviewFindings`). + +### Priority: AI dimensions → AI suggestion → deterministic rules + +1. **If AI dimensions exist** (from `ai-enrichment.json`): use `Get-CategoryFromDimensions` +2. **Otherwise**: fall back to `Get-CategoryFromRules` (deterministic) + +### AI Dimension Rules (`Get-CategoryFromDimensions`) + +Applied in priority order. First match wins. +Dimension abbreviations: `sup` = superseded, `mr` = merge_readiness, `rs` = review_sentiment, +`ch` = code_health, `ar` = author_responsiveness, `al` = activity_level, `dc` = direction_clarity. +Missing dimensions default to 0.5. Neutral band: 0.45–0.55 treated as "no signal" (replaces fragile `== 0.5`). + +| Rule | Condition | Category | +|------|-----------|----------| +| R-AI-1 | `sup ≥ 0.7` | `superseded` | +| R-AI-2 | `al ≤ 0.2 AND ar ≤ 0.2` | `likely-abandoned` | +| R-AI-3 | `mr ≥ 0.8 AND rs ≥ 0.7 AND ch ≥ 0.7` | `ready-to-merge` | +| R-AI-4 | `rs ≥ 0.7 AND 0.5 ≤ mr < 0.8 AND ch ≥ 0.4` | `approved-pending-merge` | +| R-AI-5 | `ch ≤ 0.3 AND mr ≤ 0.3` | `build-failures` | +| R-AI-6 | `rs ≤ 0.3` | `review-concerns` | +| R-AI-7 | `dc ≤ 0.3` | `design-needed` | +| R-AI-8 | `dc ≤ 0.5 AND rs ≤ 0.5` | `direction-unclear` | +| R-AI-9 | `ch ≤ 0.3` | `review-concerns` (reduced confidence) | +| R-AI-10 | `ar ≤ 0.3 AND al ≥ 0.3` | `awaiting-author` | +| R-AI-11 | `al ≤ 0.3 AND rs outside neutral band` | `stale-with-feedback` | +| R-AI-12 | `al ≤ 0.3 AND rs in neutral band` | `stale-no-review` | +| R-AI-13 | `al ≥ 0.6 AND rs in neutral band` | `fresh-awaiting-review` | +| R-AI-14 | `al ≥ 0.4 AND rs ≥ 0.5` | `in-active-review` | +| R-AI-15 | *(none matched)* | AI's `suggested_category` or `needs-attention` | + +Design principles: +- Terminal states first (superseded, abandoned) — no point evaluating quality on dead PRs +- Positive outcomes next (ready, approved) — with code-health guards +- Technical blockers (build failures) — no sentiment gate +- Human blockers split: reviewer pushback (R-AI-6) vs AI-detected code issues (R-AI-9, lower confidence) +- Activity buckets use neutral band instead of fragile float equality + +**Enrichment cross-check** (applied after dimension rules in the caller): +- `ready-to-merge` + CI actually FAILURE → `build-failures` (source: `ai-corrected`) +- `ready-to-merge` + changes requested > 0 → `review-concerns` (source: `ai-corrected`) + +Confidence = average of all dimension confidence scores. +Source = `ai-dimensions` (R-AI-1–14), `ai-suggested` (R-AI-15 with suggestion), `ai-fallback`, or `ai-corrected`. + +### Deterministic Fallback Rules (`Get-CategoryFromRules`) + +Used when AI categorization is unavailable. Based on enrichment data and PR dates. + +| Rule | Condition | Category | Confidence | +|------|-----------|----------|------------| +| 1 | Approved + CI green + mergeable + no changes requested | `ready-to-merge` | 0.80 | +| 2 | CI failing | `build-failures` | 0.85 | +| 3 | Approved + CI not failing + no changes requested | `approved-pending-merge` | 0.70 | +| 4 | Changes requested + author silent ≥ 14 days | `awaiting-author` | 0.75 | +| 5 | No activity ≥ 90 days | `likely-abandoned` | 0.80 | +| 6 | No activity ≥ 30 days + has reviews | `stale-with-feedback` | 0.65 | +| 7 | No activity ≥ 30 days + no reviews | `stale-no-review` | 0.65 | +| 8 | Changes requested + author responded within 14 days | `review-concerns` | 0.60 | +| 9 | Age ≤ 7 days + no reviews | `fresh-awaiting-review` | 0.70 | +| 10 | Activity ≤ 7 days + has comments or reviews | `in-active-review` | 0.60 | +| 11 | No reviews + age 7–30 days | `stale-no-review` | 0.50 | +| 12 | Has comments + no formal reviews + activity ≤ 14 days | `in-active-review` | 0.40 | +| 13 | *(none matched)* | `needs-attention` | 0.30 | + +## Phase 3 — Assembly + +`New-CategorizedPr` merges all data into the final per-PR object: + +### Review Findings (`Get-ReviewFindings`) + +Parses Step 2 prReview output for each PR: +- Reads `.signal` file for review signal +- Parses `mcp-review-comment` JSON blocks from step markdown files +- Counts findings by severity (high / medium / low) + +### Effort Estimation (`Get-EffortEstimate`) + +| Condition | Effort | +|-----------|--------| +| No review data | `unknown` | +| 0 findings | `trivial` | +| ≥ 3 high | `rework` | +| ≥ 1 high + ≥ 2 medium | `major` | +| ≥ 1 high OR ≥ 3 medium | `moderate` | +| ≥ 1 medium | `minor` | +| Else | `trivial` | + +### Signals and Tags + +**Signals** (quick-scan indicators): +- ✅ N approvals, ❌ N changes requested, 🔴 CI failing, 🟢 CI passing, 🔥 N high-sev, 💤 N days stale + +**Tags** (from AI + computed): +- AI-supplied tags from Step 3 +- `large-pr` if additions + deletions ≥ 500 +- `review-high-severity` if any high-severity findings +- `review-clean` if review exists with 0 findings + +## Output + +`categorized-prs.json` — top-level schema: + +```json +{ + "CategorizedAt": "ISO-8601", + "Repository": "microsoft/PowerToys", + "TotalCount": 42, + "CategoryCounts": { "ready-to-merge": 3, "review-concerns": 8, ... }, + "Prs": [ ... ] +} +``` + +Per-PR fields: + +| Field | Source | +|-------|--------| +| `Number`, `Title`, `Author`, `Url`, `Labels`, `LinkedIssues`, `Additions`, `Deletions`, `ChangedFiles` | Step 1 | +| `AgeInDays`, `DaysSinceActivity` | Computed from dates | +| `Category`, `Confidence`, `CategorizationSource` | Phase 2 | +| `Signals`, `Tags`, `Effort`, `EffortLabel` | Phase 3 | +| `DimensionScores`, `DiscussionSummary`, `SupersededBy` | Step 3 (AI) | +| `ChecksStatus`, `FailingChecks`, `ApprovalCount`, `ChangesRequestedCount` | Phase 1 enrichment | +| `ReviewData` | Step 2 review findings (severity counts, signal, summaries) | + +## 14 Categories + +| Category | Description | +|----------|-------------| +| `ready-to-merge` | Approved, CI green, no blockers | +| `approved-pending-merge` | Approved but CI pending or minor gap | +| `build-failures` | CI failing | +| `review-concerns` | Reviewers flagged issues | +| `design-needed` | Needs design discussion | +| `direction-unclear` | Purpose or approach unclear | +| `awaiting-author` | Waiting for author response | +| `fresh-awaiting-review` | New PR, no reviews yet | +| `in-active-review` | Active discussion happening | +| `stale-with-feedback` | Inactive, has reviewer feedback | +| `stale-no-review` | Inactive, never reviewed | +| `likely-abandoned` | No activity for extended period | +| `superseded` | Replaced by another PR | +| `needs-attention` | Fallback — doesn't fit other categories | + +## Next Step + +→ [Step 5: Reporting](./step5-reporting.md) diff --git a/.github/skills/pr-triage/references/step5-reporting.md b/.github/skills/pr-triage/references/step5-reporting.md new file mode 100644 index 000000000000..6b68af495787 --- /dev/null +++ b/.github/skills/pr-triage/references/step5-reporting.md @@ -0,0 +1,185 @@ +# Step 5: Reporting — Generate Triage Reports + +`Export-TriageReport.ps1` produces a human-readable summary and per-category markdown files from `categorized-prs.json`. This is the final step — AI enrichment (Step 3) and categorization (Step 4) are already complete. + +--- + +## Parameters + +| Parameter | Type | Required | Description | +|-----------|------|----------|-------------| +| `-InputPath` | string | Yes | Path to `categorized-prs.json` from Step 4 | +| `-OutputDir` | string | Yes | Run output directory (e.g., `Generated Files/pr-triage/2026-02-12`) | +| `-Repository` | string | Yes | GitHub repo in `owner/repo` format | +| `-IncludeDetailedReview` | switch | No | Include AI review details in category reports | +| `-PreviousInputPath` | string | No | Path to a previous run's `categorized-prs.json` for delta comparison | + +## Report Structure + +``` +Generated Files/pr-triage/{date}/ +├── summary.md # Executive summary with deltas +├── categorized-prs.json # Machine-readable data (from Step 4) +├── all-prs.json # Raw collection data (from Step 1) +├── ai-enrichment.json # AI dimension scores (from Step 3) +└── categories/ + ├── fresh-awaiting-review.md + ├── in-active-review.md + ├── approved-pending-merge.md + ├── review-concerns.md + ├── build-failures.md + ├── awaiting-author.md + ├── stale-with-feedback.md + ├── stale-no-review.md + ├── direction-unclear.md + ├── design-needed.md + ├── likely-abandoned.md + └── superseded.md +``` + +## Summary Sections + +The `summary.md` report contains these sections in order: + +### 1. Header + +```markdown +# PR Triage Summary — microsoft/PowerToys + +**Generated:** {timestamp} +**Total open PRs:** {n} +**AI categorized:** {n} | **Rule-based:** {n} +``` + +### 2. Delta Sections (when `-PreviousInputPath` provided) + +Four delta sections appear after the header — see [Delta Tracking](#delta-tracking) below. + +### 3. Category Breakdown + +Bar-chart table linking to per-category reports: + +```markdown +| Category | Count | | +|----------|------:|---| +| 🆕 [Fresh - Awaiting Review](categories/fresh-awaiting-review.md) | 30 | █████ | +| 🔧 [Build Failures](categories/build-failures.md) | 20 | ███ | +| ... +``` + +### 4. 🚨 Critical — Needs Immediate Attention + +PRs with high-severity AI review findings, long staleness, or both: + +```markdown +| PR | Author | Category | Age | Signals | +|-----|--------|----------|----:|---------| +``` + +### 5. ⚡ Quick Wins + +PRs tagged as `quick-win` by the AI enrichment, sorted by effort level: + +```markdown +| PR | Author | Effort | Approvals | +|-----|--------|--------|----------:| +``` + +### 6. Category Reports (links) + +Footer with links to all per-category detail files. + +--- + +## Delta Tracking + +When `-PreviousInputPath` is supplied, the report compares the current run against a previous run to show what changed. The orchestrator (`Start-PrTriage.ps1`) automatically finds the most recent previous run folder. + +### How It Works + +1. **`Get-RunDelta`** compares current vs previous `categorized-prs.json` by PR number +2. **`Format-DeltaMarkdown`** generates four markdown sections from the delta + +### Delta Sections + +#### 📊 Changes Since Last Run + +Overview counts: previous total, current total (with delta), new PRs, closed/merged, category changes, unchanged, recurring action items. + +```markdown +| Metric | Count | +|--------|------:| +| Previous total | 119 | +| Current total | 112 (-7) | +| New PRs | 3 | +| Closed/merged | 10 | +| Category changed | 103 | +| Unchanged | 6 | +``` + +#### 🔀 Category Changes + +PRs that changed category between runs. Shows before→after with signal icons: + +```markdown +| PR | Author | Before | After | Signals | +|-----|--------|--------|-------|---------| +| [#45506](...) | @jiripolasek | 💬 in-active-review | ✅ approved-pending-merge | ✅1 approvals | +``` + +#### 🆕 New PRs Since Last Run + +PRs present in current but absent from previous: + +```markdown +| PR | Author | Category | Age | Signals | +|-----|--------|----------|----:|---------| +``` + +#### ✅ Closed/Merged Since Last Run + +PRs in previous but absent from current (merged or closed): + +```markdown +| PR | Author | Was | Age | +|-----|--------|-----|----:| +``` + +#### ⚠️ Recurring — Action Still Needed + +PRs stuck in the same actionable category across both runs. These are items where suggested actions from the previous triage have **not** been taken. + +Actionable categories: `review-concerns`, `build-failures`, `awaiting-author`, `stale-no-review`, `stale-with-feedback`, `direction-unclear`, `design-needed`, `needs-attention`. + +### Orchestrator Integration + +`Start-PrTriage.ps1` automatically finds the previous run: + +```powershell +$prevRun = Get-ChildItem $triageRoot -Directory | + Where-Object { $_.Name -match '^\d{4}-\d{2}-\d{2}$' -and $_.Name -lt $RunDate } | + Sort-Object Name -Descending | Select-Object -First 1 +``` + +It passes `-PreviousInputPath` pointing to the previous run's `categorized-prs.json`. + +--- + +## Category Report Template + +Each `categories/{name}.md` file contains: + +1. **Category header** with count +2. **PR table** with columns: PR link+title, author, age, signals (approvals, CI status, high-severity count, staleness) +3. **Per-PR detail blocks** (when `-IncludeDetailedReview` is set): AI review findings, discussion summary, dimension scores + +## Report Freshness + +- Reports are point-in-time snapshots +- Generation timestamp is shown prominently +- Delta sections highlight what changed since the last run +- Recommend running triage at least weekly + +## Done + +This is the final pipeline step. Open `summary.md` to review the triage results. diff --git a/.github/skills/pr-triage/scripts/Export-TriageReport.ps1 b/.github/skills/pr-triage/scripts/Export-TriageReport.ps1 new file mode 100644 index 000000000000..99b1f24bd980 --- /dev/null +++ b/.github/skills/pr-triage/scripts/Export-TriageReport.ps1 @@ -0,0 +1,548 @@ +<# +.SYNOPSIS + Generate triage reports (summary.md + per-category .md) from categorized PRs. + Business logic returns data; formatting functions convert to markdown. + +.PARAMETER InputPath + Path to categorized-prs.json. +.PARAMETER OutputDir + Directory for generated reports. +.PARAMETER Repository + GitHub repository (owner/repo). +.PARAMETER IncludeDetailedReview + Include dimension score tables and review findings per PR. +.PARAMETER PreviousInputPath + Path to previous run's categorized-prs.json for delta comparison. + When provided, summary.md includes sections showing what changed. +#> +[CmdletBinding()] +param( + [Parameter(Mandatory)] + [string]$InputPath, + [Parameter(Mandatory)] + [string]$OutputDir, + [string]$Repository = 'microsoft/PowerToys', + [switch]$IncludeDetailedReview, + [string]$PreviousInputPath, + [string]$LogPath +) + +$ErrorActionPreference = 'Stop' + +if ([string]::IsNullOrWhiteSpace($LogPath)) { + $LogPath = Join-Path (Get-Location) 'Export-TriageReport.log' +} + +$logDir = Split-Path -Parent $LogPath +if (-not [string]::IsNullOrWhiteSpace($logDir) -and -not (Test-Path $logDir)) { + New-Item -ItemType Directory -Path $logDir -Force | Out-Null +} + +"[$(Get-Date -Format o)] Starting Export-TriageReport" | Out-File -FilePath $LogPath -Encoding utf8 -Append + +function Write-LogHost { + [CmdletBinding()] + param( + [Parameter(Position = 0, ValueFromRemainingArguments = $true)] + [object[]]$Object, + [object]$ForegroundColor, + [object]$BackgroundColor, + [switch]$NoNewline, + [Object]$Separator + ) + + $message = [string]::Join(' ', ($Object | ForEach-Object { [string]$_ })) + "[$(Get-Date -Format o)] $message" | Out-File -FilePath $LogPath -Encoding utf8 -Append + + $invokeParams = @{} + if ($PSBoundParameters.ContainsKey('ForegroundColor') -and -not [string]::IsNullOrWhiteSpace([string]$ForegroundColor)) { $invokeParams.ForegroundColor = $ForegroundColor } + if ($PSBoundParameters.ContainsKey('BackgroundColor') -and -not [string]::IsNullOrWhiteSpace([string]$BackgroundColor)) { $invokeParams.BackgroundColor = $BackgroundColor } + if ($NoNewline) { $invokeParams.NoNewline = $true } + if ($PSBoundParameters.ContainsKey('Separator')) { $invokeParams.Separator = $Separator } + + Microsoft.PowerShell.Utility\Write-Host @invokeParams -Object $message +} + +# ═════════════════════════════════════════════════════════════════════════ +# Data definitions — category metadata +# ═════════════════════════════════════════════════════════════════════════ + +$script:Categories = @( + @{ Code = 'ready-to-merge'; Name = 'Ready to Merge'; Emoji = '🟢' } + @{ Code = 'review-concerns'; Name = 'Review Concerns'; Emoji = '🔴' } + @{ Code = 'approved-pending-merge'; Name = 'Approved Pending Merge'; Emoji = '✅' } + @{ Code = 'build-failures'; Name = 'Build Failures'; Emoji = '🔧' } + @{ Code = 'fresh-awaiting-review'; Name = 'Fresh - Awaiting Review'; Emoji = '🆕' } + @{ Code = 'in-active-review'; Name = 'In Active Review'; Emoji = '💬' } + @{ Code = 'stale-no-review'; Name = 'Stale - No Review'; Emoji = '😶' } + @{ Code = 'awaiting-author'; Name = 'Awaiting Author'; Emoji = '⏳' } + @{ Code = 'stale-with-feedback'; Name = 'Stale With Feedback'; Emoji = '📝' } + @{ Code = 'likely-abandoned'; Name = 'Likely Abandoned'; Emoji = '💀' } + @{ Code = 'direction-unclear'; Name = 'Direction Unclear'; Emoji = '🧭' } + @{ Code = 'design-needed'; Name = 'Design Needed'; Emoji = '📐' } + @{ Code = 'superseded'; Name = 'Superseded'; Emoji = '🔄' } + @{ Code = 'needs-attention'; Name = 'Needs Attention'; Emoji = '⚡' } +) + +# ═════════════════════════════════════════════════════════════════════════ +# Pure data functions — return objects +# ═════════════════════════════════════════════════════════════════════════ + +function Get-PriorityScore ([PSCustomObject]$Pr) { + <# Weighted composite score for sorting within a category. Higher = more urgent. #> + $score = 0 + $score += ($Pr.AgeInDays / 30) * 10 # Age weight + $score += ($Pr.DaysSinceActivity / 14) * 15 # Staleness weight + + if ($Pr.ApprovalCount -gt 0) { $score += 20 } + if ($Pr.ChecksStatus -eq 'FAILURE') { $score += 15 } + if ($Pr.ReviewData -and $Pr.ReviewData.HighSeverity -gt 0) { $score += 25 } + + $lines = $Pr.Additions + $Pr.Deletions + if ($lines -ge 500) { $score += 10 } + + return [Math]::Round($score, 1) +} + +function Get-CategoryMeta ([string]$Code) { + <# Looks up category metadata. #> + $match = $script:Categories | Where-Object { $_.Code -eq $Code } + if ($match) { return $match } + return @{ Code = $Code; Name = $Code; Emoji = '❓' } +} + +function Get-SummaryData ([PSCustomObject[]]$Prs) { + <# Aggregates summary statistics from categorized PRs. #> + $catGroups = $Prs | Group-Object Category | Sort-Object Count -Descending + + $breakdown = @() + foreach ($g in $catGroups) { + $meta = Get-CategoryMeta $g.Name + $breakdown += [PSCustomObject]@{ + Code = $g.Name + Name = $meta.Name + Emoji = $meta.Emoji + Count = $g.Count + } + } + + # Critical PRs: ready-to-merge, review-concerns with high severity, build-failures + $critical = @($Prs | Where-Object { + $_.Category -in 'ready-to-merge', 'build-failures' -or + ($_.Category -eq 'review-concerns' -and $_.ReviewData -and $_.ReviewData.HighSeverity -gt 0) + } | Sort-Object { Get-PriorityScore $_ } -Descending | Select-Object -First 10) + + # Quick wins: trivial/minor effort, has approvals or review-clean + $quickWins = @($Prs | Where-Object { + $_.Effort -in 'trivial', 'minor' -and + ($_.ApprovalCount -gt 0 -or $_.Tags -contains 'review-clean') + } | Sort-Object AgeInDays | Select-Object -First 10) + + # AI coverage + $aiCount = @($Prs | Where-Object { $_.CategorizationSource -like 'ai-*' }).Count + $ruleCount = @($Prs | Where-Object { $_.CategorizationSource -like 'rules*' }).Count + + return [PSCustomObject]@{ + TotalCount = $Prs.Count + CategoryBreakdown = $breakdown + CriticalPrs = $critical + QuickWins = $quickWins + AiCategorized = $aiCount + RuleCategorized = $ruleCount + } +} + +function Get-CategoryPrs ([PSCustomObject[]]$AllPrs, [string]$CategoryCode) { + <# Returns PRs for a category, sorted by priority. #> + return @($AllPrs | Where-Object { $_.Category -eq $CategoryCode } | + Sort-Object { Get-PriorityScore $_ } -Descending) +} + +# ═════════════════════════════════════════════════════════════════════════ +# Delta comparison — compare current run to previous run +# ═════════════════════════════════════════════════════════════════════════ + +function Get-RunDelta ([PSCustomObject[]]$CurrentPrs, [PSCustomObject[]]$PreviousPrs) { + <# + Compares two triage runs. Returns a delta object with: + - NewPrs: in current, not in previous (new or reopened) + - ClosedPrs: in previous, not in current (merged or closed) + - CategoryChanges: same PR, different category + - Unchanged: same PR, same category + - RecurringAction: PRs in an actionable category for 2+ runs + #> + $prevLookup = @{} + foreach ($p in $PreviousPrs) { $prevLookup[[int]$p.Number] = $p } + + $curLookup = @{} + foreach ($p in $CurrentPrs) { $curLookup[[int]$p.Number] = $p } + + $newPrs = @() + $categoryChanges = @() + $unchanged = @() + + foreach ($cur in $CurrentPrs) { + $n = [int]$cur.Number + $prev = $prevLookup[$n] + if (-not $prev) { + $newPrs += $cur + } elseif ($prev.Category -ne $cur.Category) { + $categoryChanges += [PSCustomObject]@{ + Number = $n + Title = $cur.Title + Author = $cur.Author + Url = $cur.Url + OldCategory = $prev.Category + NewCategory = $cur.Category + AgeInDays = $cur.AgeInDays + Signals = $cur.Signals + } + } else { + $unchanged += $cur + } + } + + $closedPrs = @() + foreach ($prev in $PreviousPrs) { + $n = [int]$prev.Number + if (-not $curLookup[$n]) { + $closedPrs += $prev + } + } + + # Actionable categories where staying put means nobody acted + $actionableCategories = @( + 'review-concerns', 'build-failures', 'awaiting-author', + 'stale-no-review', 'stale-with-feedback', 'direction-unclear', + 'design-needed', 'needs-attention' + ) + $recurringAction = @($unchanged | Where-Object { $_.Category -in $actionableCategories }) + + return [PSCustomObject]@{ + NewPrs = $newPrs + ClosedPrs = $closedPrs + CategoryChanges = $categoryChanges + Unchanged = $unchanged + RecurringAction = $recurringAction + PreviousTotal = $PreviousPrs.Count + CurrentTotal = $CurrentPrs.Count + } +} + +function Format-DeltaMarkdown ([PSCustomObject]$Delta, [string]$PreviousDate) { + <# Generates delta sections for summary.md. #> + $lines = @() + + # ── Overview ── + $lines += "## 📊 Changes Since Last Run ($PreviousDate)" + $lines += '' + $diffTotal = $Delta.CurrentTotal - $Delta.PreviousTotal + $diffSign = if ($diffTotal -ge 0) { '+' } else { '' } + $lines += "| Metric | Count |" + $lines += "|--------|------:|" + $lines += "| Previous total | $($Delta.PreviousTotal) |" + $lines += "| Current total | $($Delta.CurrentTotal) ($diffSign$diffTotal) |" + $lines += "| New PRs | $($Delta.NewPrs.Count) |" + $lines += "| Closed/merged | $($Delta.ClosedPrs.Count) |" + $lines += "| Category changed | $($Delta.CategoryChanges.Count) |" + $lines += "| Unchanged | $($Delta.Unchanged.Count) |" + $lines += '' + + # ── Category changes ── + if ($Delta.CategoryChanges.Count -gt 0) { + $lines += '## 🔀 Category Changes' + $lines += '' + $lines += '| PR | Author | Before | After | Signals |' + $lines += '|-----|--------|--------|-------|---------|' + foreach ($ch in $Delta.CategoryChanges) { + $oldMeta = Get-CategoryMeta $ch.OldCategory + $newMeta = Get-CategoryMeta $ch.NewCategory + $sigs = if ($ch.Signals) { $ch.Signals -join ', ' } else { '' } + $lines += "| [#$($ch.Number)]($($ch.Url)) $($ch.Title) | @$($ch.Author) | $($oldMeta.Emoji) $($ch.OldCategory) | $($newMeta.Emoji) $($ch.NewCategory) | $sigs |" + } + $lines += '' + } + + # ── New PRs ── + if ($Delta.NewPrs.Count -gt 0) { + $lines += '## 🆕 New PRs Since Last Run' + $lines += '' + $lines += '| PR | Author | Category | Age | Signals |' + $lines += '|-----|--------|----------|----:|---------|' + foreach ($pr in $Delta.NewPrs | Sort-Object AgeInDays) { + $meta = Get-CategoryMeta $pr.Category + $sigs = if ($pr.Signals) { $pr.Signals -join ', ' } else { '' } + $lines += "| [#$($pr.Number)]($($pr.Url)) $($pr.Title) | @$($pr.Author) | $($meta.Emoji) $($pr.Category) | $($pr.AgeInDays)d | $sigs |" + } + $lines += '' + } + + # ── Closed/merged ── + if ($Delta.ClosedPrs.Count -gt 0) { + $lines += '## ✅ Closed/Merged Since Last Run' + $lines += '' + $lines += '| PR | Author | Was | Age |' + $lines += '|-----|--------|-----|----:|' + foreach ($pr in $Delta.ClosedPrs | Sort-Object AgeInDays -Descending | Select-Object -First 20) { + $meta = Get-CategoryMeta $pr.Category + $lines += "| [#$($pr.Number)]($($pr.Url)) $($pr.Title) | @$($pr.Author) | $($meta.Emoji) $($pr.Category) | $($pr.AgeInDays)d |" + } + if ($Delta.ClosedPrs.Count -gt 20) { + $lines += "| | | *...and $($Delta.ClosedPrs.Count - 20) more* | |" + } + $lines += '' + } + + # ── Recurring action needed ── + if ($Delta.RecurringAction.Count -gt 0) { + $lines += '## ⚠️ Recurring — Action Still Needed' + $lines += '' + $lines += 'These PRs were in an actionable category last run and **still are**. Consider taking action.' + $lines += '' + $lines += '| PR | Author | Category | Age | Stale |' + $lines += '|-----|--------|----------|----:|------:|' + foreach ($pr in $Delta.RecurringAction | Sort-Object DaysSinceActivity -Descending | Select-Object -First 20) { + $meta = Get-CategoryMeta $pr.Category + $lines += "| [#$($pr.Number)]($($pr.Url)) $($pr.Title) | @$($pr.Author) | $($meta.Emoji) $($pr.Category) | $($pr.AgeInDays)d | $($pr.DaysSinceActivity)d |" + } + if ($Delta.RecurringAction.Count -gt 20) { + $lines += "| | | *...and $($Delta.RecurringAction.Count - 20) more* | | |" + } + $lines += '' + } + + return $lines -join "`n" +} + +# ═════════════════════════════════════════════════════════════════════════ +# Formatting functions — convert data to markdown strings +# ═════════════════════════════════════════════════════════════════════════ + +function Format-SummaryMarkdown ([PSCustomObject]$Summary, [string]$Repository, [string]$DeltaMarkdown) { + <# Generates the full summary.md content. #> + $lines = @() + $lines += "# PR Triage Summary — $Repository" + $lines += '' + $lines += "**Generated:** $(Get-Date -Format 'yyyy-MM-dd HH:mm:ss')" + $lines += "**Total open PRs:** $($Summary.TotalCount)" + $lines += "**AI categorized:** $($Summary.AiCategorized) | **Rule-based:** $($Summary.RuleCategorized)" + $lines += '' + + # Delta sections go right after the header — most actionable info first + if ($DeltaMarkdown) { + $lines += $DeltaMarkdown + } + + # Category breakdown table + $lines += '## Category Breakdown' + $lines += '' + $lines += '| Category | Count | |' + $lines += '|----------|------:|---|' + foreach ($cat in $Summary.CategoryBreakdown) { + $bar = '█' * [Math]::Min($cat.Count, 40) + $lines += "| $($cat.Emoji) [$($cat.Name)](categories/$($cat.Code).md) | $($cat.Count) | $bar |" + } + $lines += '' + + # Critical PRs + if ($Summary.CriticalPrs.Count -gt 0) { + $lines += '## 🚨 Critical — Needs Immediate Attention' + $lines += '' + $lines += '| PR | Author | Category | Age | Signals |' + $lines += '|-----|--------|----------|----:|---------|' + foreach ($pr in $Summary.CriticalPrs) { + $sigs = ($pr.Signals -join ', ') + $lines += "| [#$($pr.Number)]($($pr.Url)) $($pr.Title) | @$($pr.Author) | $($pr.Category) | $($pr.AgeInDays)d | $sigs |" + } + $lines += '' + } + + # Quick wins + if ($Summary.QuickWins.Count -gt 0) { + $lines += '## ⚡ Quick Wins' + $lines += '' + $lines += '| PR | Author | Effort | Approvals |' + $lines += '|-----|--------|--------|----------:|' + foreach ($pr in $Summary.QuickWins) { + $lines += "| [#$($pr.Number)]($($pr.Url)) $($pr.Title) | @$($pr.Author) | $($pr.EffortLabel) | $($pr.ApprovalCount) |" + } + $lines += '' + } + + # Category links + $lines += '## Category Reports' + $lines += '' + foreach ($cat in $Summary.CategoryBreakdown) { + $lines += "- $($cat.Emoji) [$($cat.Name)](categories/$($cat.Code).md) ($($cat.Count) PRs)" + } + + return $lines -join "`n" +} + +function Format-DimensionTable ([PSCustomObject]$DimScores) { + <# Generates a dimension score table with bar charts. #> + if (-not $DimScores) { return '' } + + $dimNames = @('review_sentiment','author_responsiveness','code_health','merge_readiness','activity_level','direction_clarity','superseded') + $lines = @() + $lines += '| Dimension | Score | Confidence | |' + $lines += '|-----------|------:|-----------:|---|' + + foreach ($name in $dimNames) { + $d = $null + if ($DimScores -is [hashtable]) { $d = $DimScores[$name] } + elseif ($DimScores.PSObject.Properties[$name]) { $d = $DimScores.$name } + + if ($d -and $null -ne $d.Score) { + $filled = [Math]::Round($d.Score * 10) + $bar = ('█' * $filled) + ('░' * (10 - $filled)) + $label = $name -replace '_', ' ' + $lines += "| $label | $($d.Score) | $($d.Confidence) | $bar |" + } + } + return $lines -join "`n" +} + +function Format-PrDetail ([PSCustomObject]$Pr, [switch]$IncludeReview) { + <# Formats a single PR's detail section for a category report. #> + $lines = @() + $lines += "### [#$($pr.Number)]($($pr.Url)) $($pr.Title)" + $lines += '' + $lines += "**Author:** @$($pr.Author) | **Age:** $($pr.AgeInDays) days | **Last Activity:** $($pr.DaysSinceActivity) days ago" + $lines += "**Size:** +$($pr.Additions)/-$($pr.Deletions) ($($pr.ChangedFiles) files) | **Effort:** $($pr.EffortLabel)" + + # Source badge + $srcBadge = switch -Wildcard ($pr.CategorizationSource) { + 'ai-dimensions' { '🤖 AI (dimensions)' } + 'ai-suggested' { '🤖 AI (suggested)' } + 'rules' { '📏 Rules' } + default { $pr.CategorizationSource } + } + $lines += "**Categorized by:** $srcBadge (confidence: $($pr.Confidence))" + + if ($pr.Signals -and $pr.Signals.Count -gt 0) { + $lines += "**Signals:** $($pr.Signals -join ' | ')" + } + if ($pr.Tags -and $pr.Tags.Count -gt 0) { + $lines += "**Tags:** $($pr.Tags -join ', ')" + } + if ($pr.Labels -and $pr.Labels.Count -gt 0) { + $lines += "**Labels:** $($pr.Labels -join ', ')" + } + + # Discussion summary from AI + if ($pr.DiscussionSummary) { + $lines += '' + $lines += "> 💬 $($pr.DiscussionSummary)" + } + + # Superseded by + if ($pr.SupersededBy) { + $lines += '' + $lines += "> 🔄 **Superseded by:** $($pr.SupersededBy)" + } + + # Dimension scores + if ($IncludeReview -and $pr.DimensionScores) { + $lines += '' + $lines += Format-DimensionTable $pr.DimensionScores + } + + # Review findings + if ($IncludeReview -and $pr.ReviewData -and $pr.ReviewData.HasReview) { + $rd = $pr.ReviewData + $lines += '' + $lines += "**AI Review:** $($rd.TotalFindings) findings ($($rd.HighSeverity)H/$($rd.MedSeverity)M/$($rd.LowSeverity)L)" + if ($rd.FindingSummaries -and $rd.FindingSummaries.Count -gt 0) { + foreach ($f in $rd.FindingSummaries) { $lines += " - $f" } + } + } + + $lines += '' + $lines += '---' + return $lines -join "`n" +} + +function Format-CategoryReport ([string]$CategoryCode, [PSCustomObject[]]$Prs, [string]$Repository, [switch]$IncludeReview) { + <# Formats a full category report page. #> + $meta = Get-CategoryMeta $CategoryCode + + $lines = @() + $lines += "# $($meta.Emoji) $($meta.Name)" + $lines += '' + $lines += "[← Back to Summary](../summary.md)" + $lines += '' + $lines += "**$($Prs.Count) PRs** in this category" + $lines += '' + + # Quick table + $lines += '| PR | Author | Age | Signals |' + $lines += '|-----|--------|----:|---------|' + foreach ($pr in $Prs) { + $sigs = if ($pr.Signals) { $pr.Signals -join ', ' } else { '' } + $lines += "| [#$($pr.Number)]($($pr.Url)) $($pr.Title) | @$($pr.Author) | $($pr.AgeInDays)d | $sigs |" + } + $lines += '' + $lines += '---' + $lines += '' + + # Detailed entries + foreach ($pr in $Prs) { + $lines += Format-PrDetail -Pr $pr -IncludeReview:$IncludeReview + $lines += '' + } + + return $lines -join "`n" +} + +# ═════════════════════════════════════════════════════════════════════════ +# Main — load data, compute, write files +# ═════════════════════════════════════════════════════════════════════════ + +$inputData = Get-Content $InputPath -Raw | ConvertFrom-Json +$prs = if ($inputData.Prs) { $inputData.Prs } else { $inputData } + +if ($prs.Count -eq 0) { + Write-LogHost 'No PRs to report on.' -ForegroundColor Yellow + return +} + +$catDir = Join-Path $OutputDir 'categories' +if (-not (Test-Path $catDir)) { New-Item -ItemType Directory -Path $catDir -Force | Out-Null } + +# Load previous run for delta comparison +$deltaMarkdown = $null +if ($PreviousInputPath -and (Test-Path $PreviousInputPath)) { + $prevData = Get-Content $PreviousInputPath -Raw | ConvertFrom-Json + $prevPrs = if ($prevData.Prs) { $prevData.Prs } else { $prevData } + $prevDate = if ($PreviousInputPath -match '(\d{4}-\d{2}-\d{2})') { + $Matches[1] + } elseif ($prevData.CategorizedAt) { + ([DateTime]::Parse($prevData.CategorizedAt)).ToString('yyyy-MM-dd') + } else { 'previous run' } + + $delta = Get-RunDelta -CurrentPrs $prs -PreviousPrs $prevPrs + $deltaMarkdown = Format-DeltaMarkdown -Delta $delta -PreviousDate $prevDate + + Write-LogHost " Delta: $($delta.NewPrs.Count) new, $($delta.ClosedPrs.Count) closed, $($delta.CategoryChanges.Count) changed, $($delta.RecurringAction.Count) recurring" -ForegroundColor Cyan +} + +# Compute summary data +$summary = Get-SummaryData -Prs $prs + +# Write summary.md +$summaryMd = Format-SummaryMarkdown -Summary $summary -Repository $Repository -DeltaMarkdown $deltaMarkdown +$summaryMd | Set-Content (Join-Path $OutputDir 'summary.md') -Encoding UTF8 +Write-LogHost " summary.md ($($summary.TotalCount) PRs, $($summary.CategoryBreakdown.Count) categories)" -ForegroundColor Green + +# Write per-category reports +foreach ($cat in $summary.CategoryBreakdown) { + $catPrs = Get-CategoryPrs -AllPrs $prs -CategoryCode $cat.Code + if ($catPrs.Count -eq 0) { continue } + + $report = Format-CategoryReport -CategoryCode $cat.Code -Prs $catPrs -Repository $Repository -IncludeReview:$IncludeDetailedReview + $report | Set-Content (Join-Path $catDir "$($cat.Code).md") -Encoding UTF8 + Write-LogHost " categories/$($cat.Code).md ($($catPrs.Count) PRs)" -ForegroundColor Green +} + +Write-LogHost "Reports generated: $OutputDir" -ForegroundColor Cyan diff --git a/.github/skills/pr-triage/scripts/Get-OpenPrs.ps1 b/.github/skills/pr-triage/scripts/Get-OpenPrs.ps1 new file mode 100644 index 000000000000..3ba5c7472e8e --- /dev/null +++ b/.github/skills/pr-triage/scripts/Get-OpenPrs.ps1 @@ -0,0 +1,215 @@ +<# +.SYNOPSIS + Fetches all open pull requests from a GitHub repository with core metadata. + +.DESCRIPTION + Queries GitHub for all open PRs and returns structured data suitable for triage. + Computes derived fields like age, staleness, and size category. + +.PARAMETER Repository + GitHub repository in owner/repo format. Default: microsoft/PowerToys + +.PARAMETER PRNumbers + Specific PR numbers to fetch. If provided, list/query filters are skipped. + +.PARAMETER Limit + Maximum number of PRs to fetch. Default: 500 + +.PARAMETER ExcludeDrafts + If set, excludes draft PRs from results. + +.PARAMETER MinAgeDays + Only include PRs older than this many days. Default: 0 (all) + +.PARAMETER Labels + Filter by label(s). Multiple labels use AND logic. + +.PARAMETER OutputPath + Path to save JSON output. If not specified, outputs to pipeline. + +.EXAMPLE + .\Get-OpenPrs.ps1 -Repository "microsoft/PowerToys" -MinAgeDays 30 + Fetches all open PRs older than 30 days. + +.EXAMPLE + .\Get-OpenPrs.ps1 -ExcludeDrafts -OutputPath ".\all-prs.json" + Fetches non-draft PRs and saves to JSON file. + +.NOTES + Requires: gh CLI authenticated with repo access. +#> +[CmdletBinding()] +param( + [string]$Repository = "microsoft/PowerToys", + [int[]]$PRNumbers, + [int]$Limit = 500, + [switch]$ExcludeDrafts, + [int]$MinAgeDays = 0, + [string[]]$Labels, + [string]$OutputPath, + [string]$LogPath +) + +$ErrorActionPreference = "Stop" + +if ([string]::IsNullOrWhiteSpace($LogPath)) { + $LogPath = Join-Path (Get-Location) 'Get-OpenPrs.log' +} + +$logDir = Split-Path -Parent $LogPath +if (-not [string]::IsNullOrWhiteSpace($logDir) -and -not (Test-Path $logDir)) { + New-Item -ItemType Directory -Path $logDir -Force | Out-Null +} + +"[$(Get-Date -Format o)] Starting Get-OpenPrs" | Out-File -FilePath $LogPath -Encoding utf8 -Append + +function Write-LogHost { + [CmdletBinding()] + param( + [Parameter(Position = 0, ValueFromRemainingArguments = $true)] + [object[]]$Object, + [object]$ForegroundColor, + [object]$BackgroundColor, + [switch]$NoNewline, + [Object]$Separator + ) + + $message = [string]::Join(' ', ($Object | ForEach-Object { [string]$_ })) + "[$(Get-Date -Format o)] $message" | Out-File -FilePath $LogPath -Encoding utf8 -Append + + $invokeParams = @{} + if ($PSBoundParameters.ContainsKey('ForegroundColor') -and -not [string]::IsNullOrWhiteSpace([string]$ForegroundColor)) { $invokeParams.ForegroundColor = $ForegroundColor } + if ($PSBoundParameters.ContainsKey('BackgroundColor') -and -not [string]::IsNullOrWhiteSpace([string]$BackgroundColor)) { $invokeParams.BackgroundColor = $BackgroundColor } + if ($NoNewline) { $invokeParams.NoNewline = $true } + if ($PSBoundParameters.ContainsKey('Separator')) { $invokeParams.Separator = $Separator } + + Microsoft.PowerShell.Utility\Write-Host @invokeParams -Object $message +} + +function Write-Info($msg) { Write-LogHost $msg -ForegroundColor Cyan } +function Write-Warn($msg) { Write-LogHost $msg -ForegroundColor Yellow } + +# Build gh query +if ($PRNumbers -and $PRNumbers.Count -gt 0) { + Write-Info "Fetching selected PRs from ${Repository}: $($PRNumbers -join ', ')" + $rawPrs = @() + foreach ($n in ($PRNumbers | Sort-Object -Unique)) { + try { + $pr = gh pr view $n --repo $Repository --json number,title,author,createdAt,updatedAt,headRefName,baseRefName,labels,assignees,reviewRequests,isDraft,mergeable,url,additions,deletions,changedFiles,state | ConvertFrom-Json + if ($pr -and $pr.state -eq 'OPEN') { + $rawPrs += $pr + } else { + Write-Warn "PR #$n is not OPEN (skipped)." + } + } catch { + Write-Warn "Failed to fetch PR #$n (skipped): $($_.Exception.Message)" + } + } +} else { + $ghArgs = @( + "pr", "list", + "--repo", $Repository, + "--state", "open", + "--limit", $Limit, + "--json", "number,title,author,createdAt,updatedAt,headRefName,baseRefName,labels,assignees,reviewRequests,isDraft,mergeable,url,additions,deletions,changedFiles" + ) + + if ($Labels) { + foreach ($label in $Labels) { + $ghArgs += "--label" + $ghArgs += $label + } + } + + Write-Info "Fetching open PRs from $Repository..." + $rawPrs = & gh @ghArgs | ConvertFrom-Json +} + +if (-not $rawPrs) { + Write-Warn "No PRs found." + return +} + +Write-Info "Found $($rawPrs.Count) open PRs. Processing..." + +$now = Get-Date + +# Process each PR +$processedPrs = $rawPrs | ForEach-Object { + $pr = $_ + $createdAt = [DateTime]::Parse($pr.createdAt) + $updatedAt = [DateTime]::Parse($pr.updatedAt) + $ageInDays = [Math]::Floor(($now - $createdAt).TotalDays) + $daysSinceUpdate = [Math]::Floor(($now - $updatedAt).TotalDays) + $linesChanged = $pr.additions + $pr.deletions + + # Size category + $sizeCategory = switch ($true) { + ($linesChanged -lt 10) { "XS" } + ($linesChanged -lt 50) { "S" } + ($linesChanged -lt 200) { "M" } + ($linesChanged -lt 500) { "L" } + default { "XL" } + } + + # Extract label names + $labelNames = $pr.labels | ForEach-Object { $_.name } + + # Author login + $authorLogin = $pr.author.login + + [PSCustomObject]@{ + Number = $pr.number + Title = $pr.title + Author = $authorLogin + Url = $pr.url + CreatedAt = $pr.createdAt + UpdatedAt = $pr.updatedAt + AgeInDays = $ageInDays + DaysSinceUpdate = $daysSinceUpdate + BaseRefName = $pr.baseRefName + HeadRefName = $pr.headRefName + Labels = $labelNames + Assignees = ($pr.assignees | ForEach-Object { $_.login }) + ReviewRequests = ($pr.reviewRequests | ForEach-Object { $_.login }) + IsDraft = $pr.isDraft + Mergeable = $pr.mergeable + Additions = $pr.additions + Deletions = $pr.deletions + ChangedFiles = $pr.changedFiles + LinesChanged = $linesChanged + SizeCategory = $sizeCategory + } +} + +# Apply filters +if ($ExcludeDrafts) { + $processedPrs = $processedPrs | Where-Object { -not $_.IsDraft } +} + +if ($MinAgeDays -gt 0) { + $processedPrs = $processedPrs | Where-Object { $_.AgeInDays -ge $MinAgeDays } +} + +# Build output object +$output = [PSCustomObject]@{ + CollectedAt = $now.ToString("o") + Repository = $Repository + TotalCount = $processedPrs.Count + Filters = @{ + ExcludeDrafts = $ExcludeDrafts.IsPresent + MinAgeDays = $MinAgeDays + Labels = $Labels + } + Prs = @($processedPrs) +} + +Write-Info "Processed $($processedPrs.Count) PRs after filtering." + +# Output +if ($OutputPath) { + $output | ConvertTo-Json -Depth 10 | Set-Content -Path $OutputPath -Encoding UTF8 + Write-Info "Saved to $OutputPath" +} else { + $output | ConvertTo-Json -Depth 10 +} diff --git a/.github/skills/pr-triage/scripts/Get-PrDetails.ps1 b/.github/skills/pr-triage/scripts/Get-PrDetails.ps1 new file mode 100644 index 000000000000..e65add99d2b0 --- /dev/null +++ b/.github/skills/pr-triage/scripts/Get-PrDetails.ps1 @@ -0,0 +1,200 @@ +<# +.SYNOPSIS + Enriches a PR with detailed metadata including reviews, comments, and CI status. + +.DESCRIPTION + Fetches additional data for a single PR to enable accurate categorization: + - Review history and states + - Comment activity + - CI/check status + - Commit history + - Linked issue details + +.PARAMETER PullRequestNumber + The PR number to enrich. + +.PARAMETER Repository + GitHub repository in owner/repo format. Default: microsoft/PowerToys + +.PARAMETER OutputPath + Path to save JSON output. If not specified, outputs to pipeline. + +.EXAMPLE + .\Get-PrDetails.ps1 -PullRequestNumber 12345 + Returns enriched PR data as JSON. + +.EXAMPLE + .\Get-PrDetails.ps1 -PullRequestNumber 12345 -OutputPath ".\pr-12345-details.json" + Saves enriched PR data to file. + +.NOTES + Requires: gh CLI authenticated with repo access. +#> +[CmdletBinding()] +param( + [Parameter(Mandatory = $true)] + [int]$PullRequestNumber, + + [string]$Repository = "microsoft/PowerToys", + + [string]$OutputPath +) + +$ErrorActionPreference = "Stop" + +function Write-Info($msg) { Write-Host $msg -ForegroundColor Cyan } +function Write-Warn($msg) { Write-Host $msg -ForegroundColor Yellow } + +$owner, $repo = $Repository -split "/" +$now = Get-Date + +Write-Info "Fetching details for PR #$PullRequestNumber..." + +# 1. Base PR data +$jsonFields = @( + 'number','title','author','createdAt','updatedAt', + 'headRefName','baseRefName','headRefOid', + 'labels','assignees','reviewRequests', + 'isDraft','mergeable','url', + 'additions','deletions','changedFiles', + 'closingIssuesReferences','files' +) -join ',' +$prData = gh pr view $PullRequestNumber --repo $Repository --json $jsonFields | ConvertFrom-Json + +# 2. Reviews +Write-Info " Fetching reviews..." +$reviews = gh api "repos/$owner/$repo/pulls/$PullRequestNumber/reviews" 2>$null | ConvertFrom-Json +$reviewData = $reviews | ForEach-Object { + [PSCustomObject]@{ + Id = $_.id + User = $_.user.login + State = $_.state + SubmittedAt = $_.submitted_at + BodyLength = if ($_.body) { $_.body.Length } else { 0 } + } +} + +# 3. Comments (issue comments) +Write-Info " Fetching comments..." +$comments = gh api "repos/$owner/$repo/issues/$PullRequestNumber/comments" 2>$null | ConvertFrom-Json +$commentData = $comments | ForEach-Object { + [PSCustomObject]@{ + Id = $_.id + User = $_.user.login + CreatedAt = $_.created_at + BodyLength = if ($_.body) { $_.body.Length } else { 0 } + } +} + +# 4. Review comments (inline) +$reviewCommentCount = (gh api "repos/$owner/$repo/pulls/$PullRequestNumber/comments" 2>$null | ConvertFrom-Json).Count + +# 5. CI Status +Write-Info " Fetching CI status..." +$checksRaw = gh pr checks $PullRequestNumber --repo $Repository --json name,state,conclusion 2>$null +$checks = if ($checksRaw) { $checksRaw | ConvertFrom-Json } else { @() } + +$checksDetail = $checks | ForEach-Object { + [PSCustomObject]@{ + Name = $_.name + State = $_.state + Conclusion = $_.conclusion + } +} + +$failingChecks = $checks | Where-Object { $_.conclusion -eq "failure" } | ForEach-Object { $_.name } +$checksStatus = if ($checks.Count -eq 0) { + "NONE" +} elseif ($failingChecks.Count -gt 0) { + "FAILING" +} elseif ($checks | Where-Object { $_.state -eq "pending" }) { + "PENDING" +} else { + "PASSING" +} + +# 6. Commits +Write-Info " Fetching commits..." +$commits = gh api "repos/$owner/$repo/pulls/$PullRequestNumber/commits" 2>$null | ConvertFrom-Json +$commitData = $commits | ForEach-Object { + [PSCustomObject]@{ + Sha = $_.sha.Substring(0, 7) + Message = ($_.commit.message -split "`n")[0] + Author = $_.commit.author.name + Date = $_.commit.author.date + } +} + +# Calculate derived fields +$authorLogin = $prData.author.login +$lastReviewAt = ($reviewData | Sort-Object SubmittedAt -Descending | Select-Object -First 1).SubmittedAt +$lastCommentAt = ($commentData | Sort-Object CreatedAt -Descending | Select-Object -First 1).CreatedAt +$lastCommentBy = ($commentData | Sort-Object CreatedAt -Descending | Select-Object -First 1).User +$lastCommitAt = ($commitData | Sort-Object Date -Descending | Select-Object -First 1).Date + +# Author's last activity (commit or comment) +$authorComments = $commentData | Where-Object { $_.User -eq $authorLogin } +$authorLastComment = ($authorComments | Sort-Object CreatedAt -Descending | Select-Object -First 1).CreatedAt +$authorLastCommit = $lastCommitAt # Assuming author made commits +$authorLastActivityAt = @($authorLastComment, $authorLastCommit) | + Where-Object { $_ } | + Sort-Object -Descending | + Select-Object -First 1 + +# Review counts by state +$approvalCount = ($reviewData | Where-Object { $_.State -eq "APPROVED" }).Count +$changesRequestedCount = ($reviewData | Where-Object { $_.State -eq "CHANGES_REQUESTED" }).Count + +# Build enrichment object +$enrichment = [PSCustomObject]@{ + Reviews = @($reviewData) + LastReviewAt = $lastReviewAt + ApprovalCount = $approvalCount + ChangesRequestedCount = $changesRequestedCount + CommentCount = $commentData.Count + ReviewCommentCount = $reviewCommentCount + LastCommentAt = $lastCommentAt + LastCommentBy = $lastCommentBy + AuthorLastActivityAt = $authorLastActivityAt + CommitCount = $commitData.Count + LastCommitAt = $lastCommitAt + Commits = @($commitData) + ChecksStatus = $checksStatus + ChecksDetail = @($checksDetail) + FailingChecks = @($failingChecks) +} + +# Build output +$output = [PSCustomObject]@{ + Number = $prData.number + Title = $prData.title + Author = $authorLogin + Url = $prData.url + CreatedAt = $prData.createdAt + UpdatedAt = $prData.updatedAt + HeadRefOid = $prData.headRefOid + BaseRefName = $prData.baseRefName + HeadRefName = $prData.headRefName + Labels = ($prData.labels | ForEach-Object { $_.name }) + Assignees = ($prData.assignees | ForEach-Object { $_.login }) + ReviewRequests = ($prData.reviewRequests | ForEach-Object { $_.login }) + IsDraft = $prData.isDraft + Mergeable = $prData.mergeable + Additions = $prData.additions + Deletions = $prData.deletions + ChangedFiles = $prData.changedFiles + Files = ($prData.files | ForEach-Object { $_.path }) + LinkedIssues = ($prData.closingIssuesReferences | ForEach-Object { $_.number }) + Enrichment = $enrichment + EnrichedAt = $now.ToString("o") +} + +Write-Info "Enrichment complete for PR #$PullRequestNumber" + +# Output +if ($OutputPath) { + $output | ConvertTo-Json -Depth 10 | Set-Content -Path $OutputPath -Encoding UTF8 + Write-Info "Saved to $OutputPath" +} else { + $output | ConvertTo-Json -Depth 10 +} diff --git a/.github/skills/pr-triage/scripts/Get-ReviewerSuggestions.ps1 b/.github/skills/pr-triage/scripts/Get-ReviewerSuggestions.ps1 new file mode 100644 index 000000000000..f5a767f00168 --- /dev/null +++ b/.github/skills/pr-triage/scripts/Get-ReviewerSuggestions.ps1 @@ -0,0 +1,210 @@ +<# +.SYNOPSIS + Suggests reviewers for a PR based on file ownership and history. + +.DESCRIPTION + Analyzes changed files in a PR and suggests appropriate reviewers using: + 1. CODEOWNERS file matches + 2. Recent reviewers of similar PRs (by area label) + 3. Recent committers to the changed files + 4. Fallback to team defaults + +.PARAMETER PullRequestNumber + The PR number to analyze. + +.PARAMETER Repository + GitHub repository in owner/repo format. Default: microsoft/PowerToys + +.PARAMETER ChangedFiles + Array of file paths changed in the PR. If not provided, fetches from GitHub. + +.PARAMETER CacheDir + Directory for caching reviewer history. Default: __cache + +.PARAMETER MaxSuggestions + Maximum number of reviewers to suggest. Default: 5 + +.EXAMPLE + .\Get-ReviewerSuggestions.ps1 -PullRequestNumber 12345 + Suggests reviewers for PR #12345. + +.EXAMPLE + .\Get-ReviewerSuggestions.ps1 -PullRequestNumber 12345 -ChangedFiles @("src/modules/FancyZones/file.cpp") + Suggests reviewers based on provided file list. + +.NOTES + Requires: gh CLI authenticated with repo access. +#> +[CmdletBinding()] +param( + [Parameter(Mandatory = $true)] + [int]$PullRequestNumber, + + [string]$Repository = "microsoft/PowerToys", + + [string[]]$ChangedFiles, + + [string]$CacheDir = "__cache", + + [int]$MaxSuggestions = 5, + + [string]$PrAuthor +) + +$ErrorActionPreference = "Stop" + +function Write-Info($msg) { Write-Host $msg -ForegroundColor Cyan } + +$owner, $repo = $Repository -split "/" + +# Get PR author and changed files if not provided +if (-not $PrAuthor -or -not $ChangedFiles) { + Write-Info "Fetching PR details..." + $prData = gh pr view $PullRequestNumber --repo $Repository --json author,files | ConvertFrom-Json + if (-not $PrAuthor) { $PrAuthor = $prData.author.login } + if (-not $ChangedFiles) { $ChangedFiles = $prData.files | ForEach-Object { $_.path } } +} + +$suggestions = @() + +# 1. Check CODEOWNERS +Write-Info "Checking CODEOWNERS..." +$codeownersContent = $null +try { + $codeownersContent = gh api "repos/$owner/$repo/contents/.github/CODEOWNERS" --jq ".content" 2>$null + if ($codeownersContent) { + $codeownersContent = [System.Text.Encoding]::UTF8.GetString([Convert]::FromBase64String($codeownersContent)) + } +} catch { + # CODEOWNERS may not exist +} + +if ($codeownersContent) { + $codeownersLines = $codeownersContent -split "`n" | Where-Object { $_ -and $_ -notmatch "^\s*#" } + + foreach ($file in $ChangedFiles) { + foreach ($line in $codeownersLines) { + $parts = $line -split "\s+" + if ($parts.Count -ge 2) { + $pattern = $parts[0] + $owners = $parts[1..($parts.Count - 1)] | ForEach-Object { $_ -replace "^@", "" } + + # Simple pattern matching (not full glob support) + $regexPattern = $pattern -replace "\*\*", ".*" -replace "\*", "[^/]*" + if ($file -match $regexPattern) { + foreach ($ownerEntry in $owners) { + if ($ownerEntry -ne $PrAuthor) { + $suggestions += [PSCustomObject]@{ + User = $ownerEntry + Reason = "CODEOWNERS for $pattern" + Confidence = "High" + Source = "CODEOWNERS" + } + } + } + } + } + } + } +} + +# 2. Find area labels and recent reviewers +Write-Info "Finding recent reviewers by area..." +$prLabels = gh pr view $PullRequestNumber --repo $Repository --json labels --jq ".labels[].name" 2>$null +$areaLabels = $prLabels | Where-Object { $_ -match "^Area-" } + +foreach ($areaLabel in $areaLabels) { + # Get recent PRs with this label that have been reviewed + $recentPrs = gh pr list --repo $Repository --state merged --label $areaLabel --limit 10 --json number,reviews 2>$null | ConvertFrom-Json + + $recentReviewers = @{} + foreach ($rpr in $recentPrs) { + foreach ($review in $rpr.reviews) { + if ($review.author.login -and $review.author.login -ne $PrAuthor) { + $reviewer = $review.author.login + if (-not $recentReviewers[$reviewer]) { + $recentReviewers[$reviewer] = 0 + } + $recentReviewers[$reviewer]++ + } + } + } + + $topReviewers = $recentReviewers.GetEnumerator() | Sort-Object Value -Descending | Select-Object -First 3 + foreach ($entry in $topReviewers) { + $suggestions += [PSCustomObject]@{ + User = $entry.Key + Reason = "Recently reviewed $($entry.Value) PRs with label $areaLabel" + Confidence = "Medium" + Source = "RecentReviewer" + } + } +} + +# 3. Git blame for changed files (recent committers) +Write-Info "Finding recent committers to changed files..." +$topFiles = $ChangedFiles | Select-Object -First 5 # Limit to avoid too many API calls + +foreach ($file in $topFiles) { + try { + $commits = gh api "repos/$owner/$repo/commits?path=$([Uri]::EscapeDataString($file))&per_page=10" 2>$null | ConvertFrom-Json + $committers = @{} + foreach ($commit in $commits) { + if ($commit.author -and $commit.author.login -and $commit.author.login -ne $PrAuthor) { + $committer = $commit.author.login + if (-not $committers[$committer]) { + $committers[$committer] = 0 + } + $committers[$committer]++ + } + } + + $topCommitter = $committers.GetEnumerator() | Sort-Object Value -Descending | Select-Object -First 1 + if ($topCommitter) { + $suggestions += [PSCustomObject]@{ + User = $topCommitter.Key + Reason = "Frequent committer to $file" + Confidence = "Medium" + Source = "GitHistory" + } + } + } catch { + # File may be new or API error + } +} + +# Deduplicate and rank +$uniqueSuggestions = @{} +foreach ($s in $suggestions) { + if (-not $uniqueSuggestions[$s.User]) { + $uniqueSuggestions[$s.User] = $s + } else { + # Keep the higher confidence one + $existing = $uniqueSuggestions[$s.User] + $confOrder = @{ "High" = 3; "Medium" = 2; "Low" = 1 } + if ($confOrder[$s.Confidence] -gt $confOrder[$existing.Confidence]) { + $uniqueSuggestions[$s.User] = $s + } + } +} + +# Sort by confidence and return top N +$confOrder = @{ "High" = 3; "Medium" = 2; "Low" = 1 } +$finalSuggestions = $uniqueSuggestions.Values | + Sort-Object { $confOrder[$_.Confidence] } -Descending | + Select-Object -First $MaxSuggestions + +# Output +$output = [PSCustomObject]@{ + PullRequestNumber = $PullRequestNumber + Author = $PrAuthor + ChangedFilesCount = $ChangedFiles.Count + Suggestions = @($finalSuggestions) +} + +$output | ConvertTo-Json -Depth 5 + +Write-Info "`nTop suggestions:" +foreach ($s in $finalSuggestions) { + Write-Info " @$($s.User) - $($s.Reason) [$($s.Confidence)]" +} diff --git a/.github/skills/pr-triage/scripts/Get-TriageProgress.ps1 b/.github/skills/pr-triage/scripts/Get-TriageProgress.ps1 new file mode 100644 index 000000000000..156f055f456a --- /dev/null +++ b/.github/skills/pr-triage/scripts/Get-TriageProgress.ps1 @@ -0,0 +1,273 @@ +<# +.SYNOPSIS + Inspect a pr-triage run and report progress without modifying anything. + +.DESCRIPTION + Reads the result files on disk for a given run date and reports: + - Which tasks (PRs) have been started / completed / failed / timed out + - Per-PR enrichment progress and log file status + - Aggregate counts and overall pipeline status + - Heartbeat and log-file liveness for running tasks + + This script is safe to call at any time — it only reads, never writes. + Other skills or humans can call this to decide whether to wait, resume, or kill. + +.PARAMETER RunDate + Date folder to inspect (YYYY-MM-DD). Default: today. + +.PARAMETER RunRoot + Override the run root directory. Default: Generated Files/pr-triage/<RunDate> + +.PARAMETER Detailed + Show per-step status for every task. + +.PARAMETER AsJson + Output machine-readable JSON instead of human-readable text. + +.EXAMPLE + .\Get-TriageProgress.ps1 + Shows progress for today's run. + +.EXAMPLE + .\Get-TriageProgress.ps1 -RunDate 2026-02-10 -Detailed + Shows per-step progress for the 2026-02-10 run. + +.EXAMPLE + .\Get-TriageProgress.ps1 -AsJson | ConvertFrom-Json + Returns structured progress data. +#> +[CmdletBinding()] +param( + [string]$RunDate, + [string]$RunRoot, + [string]$ReviewOutputRoot = 'Generated Files/prReview', + [switch]$Detailed, + [switch]$AsJson +) + +$ErrorActionPreference = 'Stop' + +# Load TaskRunner library +$scriptDir = Split-Path -Parent $MyInvocation.MyCommand.Path +. (Join-Path $scriptDir 'TaskRunner.ps1') + +# Resolve paths +$repoRoot = git rev-parse --show-toplevel 2>$null +if (-not $repoRoot) { $repoRoot = (Get-Location).Path } + +$resolvedReviewOutputRoot = if ([System.IO.Path]::IsPathRooted($ReviewOutputRoot)) { + $ReviewOutputRoot +} else { + Join-Path $repoRoot $ReviewOutputRoot +} + +if (-not $RunDate) { $RunDate = (Get-Date).ToString('yyyy-MM-dd') } +if (-not $RunRoot) { $RunRoot = Join-Path $repoRoot "Generated Files/pr-triage/$RunDate" } + +if (-not (Test-Path $RunRoot)) { + if ($AsJson) { + @{ Error = "No run found at $RunRoot" } | ConvertTo-Json + } else { + Write-Host "No triage run found at: $RunRoot" -ForegroundColor Yellow + } + return +} + +# Step definitions match the orchestrator + +# ── Scan global output files ─────────────────────────────────────────────── + +$globalFiles = @( + @{ Name = 'all-prs.json'; Path = Join-Path $RunRoot 'all-prs.json' } + @{ Name = 'categorized-prs.json'; Path = Join-Path $RunRoot 'categorized-prs.json' } + @{ Name = 'summary.md'; Path = Join-Path $RunRoot 'summary.md' } +) + +# ── Scan review task folders (Step 2 — reviews) ────────────────────────────── + +$reviewRunRoot = Join-Path $RunRoot 'reviews' +$reviewDirs = Get-ChildItem -Path $reviewRunRoot -Directory -ErrorAction SilentlyContinue +$prReviewRoot = $resolvedReviewOutputRoot + +$reviewSummaries = @() +if ($reviewDirs) { + foreach ($d in $reviewDirs) { + $dir = $d.FullName + $prNum = $d.Name + $completed = Test-Path (Join-Path $dir '.completed') + $failed = Test-Path (Join-Path $dir '.failed') + $timedOut = Test-Path (Join-Path $dir '.timeout') + $terminal = $completed -or $failed -or $timedOut + $logSummary = Get-TaskLogSummary -TaskDir $dir + $hbAlive = Test-HeartbeatAlive -TaskDir $dir + $alive = (-not $terminal) -and ($hbAlive -or $logSummary.LogAlive) + + # Check actual review output (step files in prReview/<PR>/) + $reviewOutDir = Join-Path $prReviewRoot $prNum + $stepFileCount = 0 + $hasOverview = $false + $hasSignal = $false + $signalStatus = $null + $signalCompletedCount = 0 + $signalSkippedCount = 0 + $signalLastStep = $null + if (Test-Path $reviewOutDir) { + $stepFiles = Get-ChildItem -Path $reviewOutDir -Filter '*.md' -ErrorAction SilentlyContinue + $stepFileCount = ($stepFiles | Where-Object { $_.Name -match '^\d{2}-' }).Count + $hasOverview = Test-Path (Join-Path $reviewOutDir '00-OVERVIEW.md') + $signalPath = Join-Path $reviewOutDir '.signal' + $hasSignal = Test-Path $signalPath + if ($hasSignal) { + try { + $sig = Get-Content $signalPath -Raw | ConvertFrom-Json + $signalStatus = $sig.status + $signalCompletedCount = @($sig.completedSteps).Count + $signalSkippedCount = @($sig.skippedSteps).Count + $signalLastStep = $sig.lastStep + } catch { } + } + } + + $reviewSummaries += [PSCustomObject]@{ + PR = $prNum + Completed = $completed + Failed = $failed + TimedOut = $timedOut + Alive = $alive + LogAlive = (-not $terminal) -and $logSummary.LogAlive + LogCount = $logSummary.LogCount + LatestLog = $logSummary.LatestLog + LogSizeKB = $logSummary.LatestSizeKB + StepFiles = $stepFileCount + HasOverview = $hasOverview + HasSignal = $hasSignal + SignalStatus = $signalStatus + SignalStepsDone = $signalCompletedCount + SignalStepsSkip = $signalSkippedCount + SignalLastStep = $signalLastStep + } + } +} + +$reviewCompleted = ($reviewSummaries | Where-Object { $_.Completed }).Count +$reviewFailed = ($reviewSummaries | Where-Object { $_.Failed }).Count +$reviewTimedOut = ($reviewSummaries | Where-Object { $_.TimedOut }).Count +$reviewAlive = ($reviewSummaries | Where-Object { $_.Alive }).Count +$reviewTotal = $reviewSummaries.Count +$hasReviewStep = $reviewTotal -gt 0 + +# Count PRs from all-prs.json for overall totals +$allPrsFile = Join-Path $RunRoot 'all-prs.json' +$totalPRs = 0 +if (Test-Path $allPrsFile) { + try { + $allPrsData = Get-Content $allPrsFile -Raw | ConvertFrom-Json + $totalPRs = $allPrsData.TotalCount + } catch { } +} + +$globalStatus = [ordered]@{} +foreach ($gf in $globalFiles) { + $globalStatus[$gf.Name] = (Test-Path $gf.Path) -and ((Get-Item $gf.Path -ErrorAction SilentlyContinue).Length -gt 0) +} + +# ── Build result ──────────────────────────────────────────────────────────── + +$result = [PSCustomObject]@{ + RunDate = $RunDate + RunRoot = $RunRoot + TotalPRs = $totalPRs + GlobalFiles = $globalStatus + HasReviewStep = $hasReviewStep + Reviews = if ($hasReviewStep) { + [PSCustomObject]@{ + Total = $reviewTotal + Completed = $reviewCompleted + Failed = $reviewFailed + TimedOut = $reviewTimedOut + Running = $reviewAlive + Pending = $reviewTotal - $reviewCompleted - $reviewFailed - $reviewTimedOut - $reviewAlive + } + } else { $null } + AllDone = ($globalStatus.Values | Where-Object { -not $_ }).Count -eq 0 -and + $totalPRs -gt 0 -and + (-not $hasReviewStep -or $reviewCompleted -eq $reviewTotal) +} + +if ($Detailed) { + if ($hasReviewStep) { + $result | Add-Member -NotePropertyName 'ReviewTasks' -NotePropertyValue $reviewSummaries + } +} + +# ── Output ────────────────────────────────────────────────────────────────── + +if ($AsJson) { + $result | ConvertTo-Json -Depth 5 + return +} + +# Human-readable output +Write-Host '' +Write-Host "═══════════════════════════════════════════════════════════════" -ForegroundColor Cyan +Write-Host " PR Triage Progress — $RunDate" -ForegroundColor Cyan +Write-Host "═══════════════════════════════════════════════════════════════" -ForegroundColor Cyan +Write-Host '' + +$pct = if ($totalPRs -gt 0) { [math]::Round(($completedPRs / $totalPRs) * 100) } else { 0 } +Write-Host " PR enrichment: $completedPRs/$totalPRs ($pct%)" -ForegroundColor $(if ($pct -eq 100) { 'Green' } else { 'Yellow' }) + +if ($failedPRs -gt 0) { Write-Host " Failed: $failedPRs" -ForegroundColor Red } +if ($timedOutPRs -gt 0) { Write-Host " Timed out: $timedOutPRs" -ForegroundColor Red } +if ($alivePRs -gt 0) { Write-Host " Running: $alivePRs" -ForegroundColor Cyan } +if ($logAlivePRs -gt 0) { Write-Host " Log active: $logAlivePRs (CLI producing output)" -ForegroundColor Cyan } +Write-Host " Log files: $totalLogFiles total across all tasks" -ForegroundColor Gray + +Write-Host '' +Write-Host ' Global files:' -ForegroundColor Gray +foreach ($gf in $globalFiles) { + $exists = $globalStatus[$gf.Name] + $icon = if ($exists) { '✓' } else { '○' } + $color = if ($exists) { 'Green' } else { 'DarkGray' } + Write-Host " $icon $($gf.Name)" -ForegroundColor $color +} + +if ($Detailed -and $hasReviewStep) { + Write-Host '' + $reviewPct = if ($reviewTotal -gt 0) { [math]::Round(($reviewCompleted / $reviewTotal) * 100) } else { 0 } + Write-Host " PR reviews (Step 2): $reviewCompleted/$reviewTotal ($reviewPct%)" -ForegroundColor $(if ($reviewPct -eq 100) { 'Green' } else { 'Yellow' }) + + if ($reviewFailed -gt 0) { Write-Host " Failed: $reviewFailed" -ForegroundColor Red } + if ($reviewTimedOut -gt 0) { Write-Host " Timed out: $reviewTimedOut" -ForegroundColor Red } + if ($reviewAlive -gt 0) { Write-Host " Running: $reviewAlive" -ForegroundColor Cyan } + + if ($Detailed -and $reviewSummaries.Count -gt 0) { + Write-Host '' + Write-Host ' Per-PR detail (reviews):' -ForegroundColor Gray + foreach ($rs in $reviewSummaries | Sort-Object PR) { + $icon = if ($rs.Completed) { '✓' } elseif ($rs.Failed) { '✗' } elseif ($rs.Alive) { '⟳' } else { '○' } + $color = if ($rs.Completed) { 'Green' } elseif ($rs.Failed) { 'Red' } elseif ($rs.Alive) { 'Cyan' } else { 'DarkGray' } + $stepInfo = if ($rs.StepFiles -gt 0) { " [$($rs.StepFiles) step files]" } else { '' } + $signalInfo = if ($rs.HasSignal) { + $done = $rs.SignalStepsDone + $skip = $rs.SignalStepsSkip + $last = if ($rs.SignalLastStep) { " → $($rs.SignalLastStep)" } else { '' } + " ✔signal($($rs.SignalStatus): ${done}done/${skip}skip${last})" + } else { '' } + $logInfo = '' + if ($rs.LogCount -gt 0) { + $logAliveTag = if ($rs.LogAlive) { ' ✉️ active' } else { '' } + $logInfo = " [log: $($rs.LatestLog) $($rs.LogSizeKB)KB$logAliveTag]" + } + Write-Host " $icon PR #$($rs.PR)$stepInfo$signalInfo$logInfo" -ForegroundColor $color + } + } +} + +Write-Host '' +if ($result.AllDone) { + Write-Host " ✅ Triage run complete! Open summary.md to review." -ForegroundColor Green +} else { + Write-Host " ⏳ Run in progress or incomplete. Re-run orchestrator to resume." -ForegroundColor Yellow +} +Write-Host '' diff --git a/.github/skills/pr-triage/scripts/Invoke-AiEnrichment.ps1 b/.github/skills/pr-triage/scripts/Invoke-AiEnrichment.ps1 new file mode 100644 index 000000000000..627e0df96614 --- /dev/null +++ b/.github/skills/pr-triage/scripts/Invoke-AiEnrichment.ps1 @@ -0,0 +1,509 @@ +<# +.SYNOPSIS + Evaluates PRs using Copilot/Claude CLI via the parallel-job-orchestrator. + Returns dimension scores per PR; category derivation happens in Step 4. + +.DESCRIPTION + For each PR, builds a categorization prompt, delegates execution to the + parallel-job-orchestrator skill, and parses structured JSON results from + each job's output file. Completed PR results are cached on disk (resumable). + + DO NOT add [CmdletBinding()], [Parameter(Mandatory)], or [ValidateSet()] + here — those attributes make the script "advanced" which propagates + ErrorActionPreference and can crash the orchestrator's monitoring loop. + +.PARAMETER InputPath + Path to JSON with PR data (from Get-OpenPrs.ps1). +.PARAMETER OutputPath + Where to save evaluation results. Default: ai-enrichment.json +.PARAMETER Repository + GitHub repo in owner/repo format. Default: microsoft/PowerToys +.PARAMETER MaxConcurrent + Maximum parallel AI CLI jobs. Default: 3. +.PARAMETER InactivityTimeoutSeconds + Kill CLI if log doesn't grow for this many seconds. Default: 120. +.PARAMETER MaxRetryCount + Retry attempts after inactivity kill. Default: 2. +.PARAMETER TimeoutMin + Legacy param — converted to InactivityTimeoutSeconds if set. Default: 5. +.PARAMETER Force + Re-evaluate PRs that already have results. +#> +param( + [string]$InputPath, + [string]$OutputPath = 'ai-enrichment.json', + [string]$Repository = 'microsoft/PowerToys', + [int]$MaxConcurrent = 20, + [int]$InactivityTimeoutSeconds = 120, + [int]$MaxRetryCount = 2, + [int]$TimeoutMin = 5, + [string]$CLIType = 'copilot', + [string]$OutputRoot, + [string]$ReviewOutputRoot = 'Generated Files/prReview', + [string]$LogPath, + [switch]$Force +) + +$ErrorActionPreference = 'Stop' + +# Manual validation +if (-not $InputPath -or -not (Test-Path $InputPath)) { + Write-Error "Invoke-AiEnrichment: -InputPath is required and must exist. Got: '$InputPath'" + return +} +if ($CLIType -notin 'copilot', 'claude') { + Write-Error "Invoke-AiEnrichment: Invalid -CLIType '$CLIType'. Must be 'copilot' or 'claude'." + return +} + +# ── logging ────────────────────────────────────────────────────────────── + +if ([string]::IsNullOrWhiteSpace($LogPath)) { + $LogPath = Join-Path (Get-Location) 'Invoke-AiEnrichment.log' +} + +$logDir = Split-Path -Parent $LogPath +if (-not [string]::IsNullOrWhiteSpace($logDir) -and -not (Test-Path $logDir)) { + New-Item -ItemType Directory -Path $logDir -Force | Out-Null +} + +"[$(Get-Date -Format o)] Starting Invoke-AiEnrichment" | Out-File -FilePath $LogPath -Encoding utf8 -Append + +function Write-LogHost { + param( + [Parameter(Position = 0, ValueFromRemainingArguments = $true)] + [object[]]$Object, + [object]$ForegroundColor, + [object]$BackgroundColor, + [switch]$NoNewline, + [Object]$Separator + ) + + $message = [string]::Join(' ', ($Object | ForEach-Object { [string]$_ })) + "[$(Get-Date -Format o)] $message" | Out-File -FilePath $LogPath -Encoding utf8 -Append + + $invokeParams = @{} + if ($PSBoundParameters.ContainsKey('ForegroundColor') -and -not [string]::IsNullOrWhiteSpace([string]$ForegroundColor)) { $invokeParams.ForegroundColor = $ForegroundColor } + if ($PSBoundParameters.ContainsKey('BackgroundColor') -and -not [string]::IsNullOrWhiteSpace([string]$BackgroundColor)) { $invokeParams.BackgroundColor = $BackgroundColor } + if ($NoNewline) { $invokeParams.NoNewline = $true } + if ($PSBoundParameters.ContainsKey('Separator')) { $invokeParams.Separator = $Separator } + + Microsoft.PowerShell.Utility\Write-Host @invokeParams -Object $message +} + +# ── Resolve paths ──────────────────────────────────────────────────────── +$repoRoot = git rev-parse --show-toplevel 2>$null +if (-not $repoRoot) { $repoRoot = (Get-Location).Path } + +$scriptDir = Split-Path -Parent $MyInvocation.MyCommand.Path +$promptTemplatePath = Join-Path (Split-Path $scriptDir) 'references' 'categorize-pr.prompt.md' +$mcpConfigPath = Join-Path $repoRoot '.github' 'skills' 'pr-review' 'references' 'mcp-config.json' +$tmpDir = if ($OutputRoot) { Join-Path $OutputRoot '__tmp' } else { Join-Path $repoRoot 'Generated Files' 'pr-triage' '__tmp' } +$reviewRoot = if ([System.IO.Path]::IsPathRooted($ReviewOutputRoot)) { $ReviewOutputRoot } else { Join-Path $repoRoot $ReviewOutputRoot } + +if (-not (Test-Path $promptTemplatePath)) { throw "Prompt template not found: $promptTemplatePath" } +if (-not (Test-Path $mcpConfigPath)) { throw "MCP config not found: $mcpConfigPath" } +if (-not (Test-Path $tmpDir)) { New-Item -ItemType Directory -Path $tmpDir -Force | Out-Null } + +$promptTemplate = Get-Content $promptTemplatePath -Raw +$promptTemplate = $promptTemplate -replace '^\s*```prompt\s*', '' +$promptTemplate = $promptTemplate -replace '\s*```\s*$', '' + +$ClaudeEnrichmentJsonSchema = '{"type":"object","properties":{"dimensions":{"type":"object","properties":{"review_sentiment":{"type":"object","properties":{"score":{"type":"number"},"confidence":{"type":"number"},"reasoning":{"type":"string"}},"required":["score","confidence","reasoning"]},"author_responsiveness":{"type":"object","properties":{"score":{"type":"number"},"confidence":{"type":"number"},"reasoning":{"type":"string"}},"required":["score","confidence","reasoning"]},"code_health":{"type":"object","properties":{"score":{"type":"number"},"confidence":{"type":"number"},"reasoning":{"type":"string"}},"required":["score","confidence","reasoning"]},"merge_readiness":{"type":"object","properties":{"score":{"type":"number"},"confidence":{"type":"number"},"reasoning":{"type":"string"}},"required":["score","confidence","reasoning"]},"activity_level":{"type":"object","properties":{"score":{"type":"number"},"confidence":{"type":"number"},"reasoning":{"type":"string"}},"required":["score","confidence","reasoning"]},"direction_clarity":{"type":"object","properties":{"score":{"type":"number"},"confidence":{"type":"number"},"reasoning":{"type":"string"}},"required":["score","confidence","reasoning"]},"superseded":{"type":"object","properties":{"score":{"type":"number"},"confidence":{"type":"number"},"reasoning":{"type":"string"}},"required":["score","confidence","reasoning"]}},"required":["review_sentiment","author_responsiveness","code_health","merge_readiness","activity_level","direction_clarity","superseded"]},"suggested_category":{"type":"string"},"discussion_summary":{"type":"string"},"superseded_by":{"type":["string","null"]},"tags":{"type":"array","items":{"type":"string"}}},"required":["dimensions","suggested_category","discussion_summary","superseded_by","tags"]}' + +# ── Resolve real copilot binary (skip the .ps1 bootstrapper) ───────────── +function Get-CopilotExecutablePath { + $copilotCmd = Get-Command copilot -ErrorAction SilentlyContinue + if (-not $copilotCmd) { return 'copilot' } + + if ($copilotCmd.Source -match '\.ps1$') { + $bootstrapDir = Split-Path $copilotCmd.Source -Parent + $savedPath = $env:PATH + $env:PATH = ($env:PATH -split ';' | Where-Object { $_ -ne $bootstrapDir }) -join ';' + $realCmd = Get-Command copilot -ErrorAction SilentlyContinue + $env:PATH = $savedPath + if ($realCmd) { return $realCmd.Source } + } + + return $copilotCmd.Source +} + +$CopilotExe = if ($CLIType -eq 'copilot') { Get-CopilotExecutablePath } else { $null } + +# ── Dimension names — single source of truth ───────────────────────────── +$DimensionNames = @( + 'review_sentiment' + 'author_responsiveness' + 'code_health' + 'merge_readiness' + 'activity_level' + 'direction_clarity' + 'superseded' +) + +# ═════════════════════════════════════════════════════════════════════════ +# Pure functions — return objects, no Write-Host +# ═════════════════════════════════════════════════════════════════════════ + +function Get-AiReviewSummary ([int]$PRNumber) { + $reviewDir = Join-Path $reviewRoot $PRNumber.ToString() + if (-not (Test-Path $reviewDir)) { return 'No AI code review available for this PR.' } + + $overviewPath = Join-Path $reviewDir '00-OVERVIEW.md' + if (-not (Test-Path $overviewPath)) { return 'AI review directory exists but no overview found.' } + + $overview = Get-Content $overviewPath -Raw + $findings = @() + Get-ChildItem -Path $reviewDir -Filter '*.md' -ErrorAction SilentlyContinue | + Where-Object { $_.Name -match '^\d{2}-' } | ForEach-Object { + $content = Get-Content $_.FullName -Raw -ErrorAction SilentlyContinue + if ($content -match 'mcp-review-comment') { + [regex]::Matches($content, '(?s)```mcp-review-comment\s*\n(.*?)```') | ForEach-Object { + try { + $json = $_.Groups[1].Value | ConvertFrom-Json -ErrorAction SilentlyContinue + if ($json.severity) { $findings += "- [$($json.severity.ToUpper())] $($json.body)" } + } catch { } + } + } + } + + return "**Overview:**`n$overview`n`n**Findings ($($findings.Count) total):**`n$($findings -join "`n")" +} + +function Build-PrPrompt ([PSCustomObject]$Pr) { + $e = $Pr.Enrichment + $now = Get-Date + $createdAt = [DateTime]::Parse($Pr.CreatedAt) + $ageInDays = [Math]::Floor(($now - $createdAt).TotalDays) + + $timestamps = @($Pr.UpdatedAt) + if ($e) { $timestamps += $e.LastCommentAt, $e.LastCommitAt } + $lastActivity = $timestamps | + Where-Object { -not [string]::IsNullOrWhiteSpace($_) } | + ForEach-Object { [DateTime]::Parse($_) } | + Sort-Object -Descending | Select-Object -First 1 + $daysSinceActivity = if ($lastActivity) { [Math]::Floor(($now - $lastActivity).TotalDays) } else { $ageInDays } + + $authorLastStr = if ($e) { $e.AuthorLastActivityAt } else { $null } + $authorLast = if ($authorLastStr) { [DateTime]::Parse($authorLastStr) } else { $null } + $daysSinceAuthor = if ($authorLast) { [Math]::Floor(($now - $authorLast).TotalDays) } else { $ageInDays } + + $replacements = @{ + '{{PR_NUMBER}}' = $Pr.Number + '{{PR_TITLE}}' = ($Pr.Title -replace '[{}]', '') + '{{PR_AUTHOR}}' = $Pr.Author + '{{PR_URL}}' = $Pr.Url + '{{AGE_DAYS}}' = $ageInDays + '{{DAYS_SINCE_ACTIVITY}}' = $daysSinceActivity + '{{DAYS_SINCE_AUTHOR_ACTIVITY}}' = $daysSinceAuthor + '{{ADDITIONS}}' = $Pr.Additions + '{{DELETIONS}}' = $Pr.Deletions + '{{CHANGED_FILES}}' = $Pr.ChangedFiles + '{{LABELS}}' = $(if ($Pr.Labels) { $Pr.Labels -join ', ' } else { '(none)' }) + '{{LINKED_ISSUES}}' = $(if ($Pr.LinkedIssues) { $Pr.LinkedIssues -join ', ' } else { '(none)' }) + '{{IS_DRAFT}}' = $Pr.IsDraft + '{{APPROVAL_COUNT}}' = $(if ($e) { $e.ApprovalCount } else { 'UNKNOWN' }) + '{{CHANGES_REQUESTED_COUNT}}' = $(if ($e) { $e.ChangesRequestedCount } else { 'UNKNOWN' }) + '{{CHECKS_STATUS}}' = $(if ($e -and $e.ChecksStatus) { $e.ChecksStatus } else { 'UNKNOWN' }) + '{{FAILING_CHECKS}}' = $(if ($e -and $e.FailingChecks) { $e.FailingChecks -join ', ' } else { '(none)' }) + '{{MERGEABLE}}' = $(if ($Pr.Mergeable) { $Pr.Mergeable } else { 'UNKNOWN' }) + '{{AI_REVIEW_SUMMARY}}' = (Get-AiReviewSummary -PRNumber ([int]$Pr.Number)) + '{{EXTRACT_FOLDER}}' = ((Join-Path $tmpDir "pr-$($Pr.Number)") -replace '\\', '/') + } + + $prompt = $promptTemplate + foreach ($kv in $replacements.GetEnumerator()) { + $prompt = $prompt -replace [regex]::Escape($kv.Key), $kv.Value + } + return $prompt +} + +function ConvertFrom-AiResponse ([string]$RawOutput) { + if ([string]::IsNullOrWhiteSpace($RawOutput)) { return $null } + + try { + $root = $RawOutput | ConvertFrom-Json -ErrorAction Stop + if ($root.dimensions) { + # Direct dimensions object (e.g. raw JSON response) + $RawOutput = ($root | ConvertTo-Json -Depth 20) + } elseif ($root.structured_output -and $root.structured_output.dimensions) { + # Claude CLI --output-format json --json-schema puts the result + # in "structured_output", not "result". + $RawOutput = ($root.structured_output | ConvertTo-Json -Depth 20) + } elseif ($root.result -and ($root.result -is [string]) -and $root.result.Length -gt 0) { + $RawOutput = [string]$root.result + } elseif ($root.content -and $root.content.Count -gt 0) { + $textParts = @() + foreach ($c in $root.content) { + if ($c.text) { $textParts += [string]$c.text } + } + if ($textParts.Count -gt 0) { $RawOutput = ($textParts -join "`n") } + } + } catch { } + + $jsonMatch = [regex]::Match($RawOutput, '(?s)```json?\s*\n(\{.*\})\s*\n```') + if ($jsonMatch.Success) { $jsonText = $jsonMatch.Groups[1].Value } + else { + $jsonMatch = [regex]::Match($RawOutput, '(?s)(\{[^{}]*"dimensions"\s*:\s*\{.*\})') + if (-not $jsonMatch.Success) { return $null } + $jsonText = $jsonMatch.Value + } + + try { + $parsed = $jsonText | ConvertFrom-Json -ErrorAction Stop + if (-not $parsed.dimensions) { return $null } + + $dims = @{} + foreach ($name in $DimensionNames) { + $d = $parsed.dimensions.$name + if ($d -and $null -ne $d.score) { + $dims[$name] = [PSCustomObject]@{ + Score = [Math]::Round([double]$d.score, 2) + Confidence = if ($d.confidence) { [Math]::Round([double]$d.confidence, 2) } else { 0.5 } + Reasoning = if ($d.reasoning) { [string]$d.reasoning } else { '' } + } + } + } + if ($dims.Count -eq 0) { return $null } + + return [PSCustomObject]@{ + Dimensions = $dims + SuggestedCategory = $parsed.suggested_category + DiscussionSummary = $parsed.discussion_summary + SupersededBy = $parsed.superseded_by + Tags = @($parsed.tags) + } + } catch { return $null } +} + +function New-EvalResult ([int]$Number, $AiResult, [string]$Source) { + return [PSCustomObject]@{ + Number = $Number + Dimensions = if ($AiResult) { $AiResult.Dimensions } else { @{} } + SuggestedCategory = if ($AiResult) { $AiResult.SuggestedCategory } else { $null } + Tags = if ($AiResult) { $AiResult.Tags } else { @() } + DiscussionSummary = if ($AiResult) { $AiResult.DiscussionSummary } else { $null } + SupersededBy = if ($AiResult) { $AiResult.SupersededBy } else { $null } + Source = $Source + } +} + +function New-FallbackAiResult ([PSCustomObject]$Pr, [string]$Reason) { + $normalizedReason = if ([string]::IsNullOrWhiteSpace($Reason)) { 'AI output not parseable' } else { $Reason } + $dims = @{} + foreach ($name in $DimensionNames) { + $dims[$name] = [PSCustomObject]@{ + Score = 0.5 + Confidence = 0.2 + Reasoning = "Fallback value: $normalizedReason" + } + } + + return [PSCustomObject]@{ + Dimensions = $dims + SuggestedCategory = $null + DiscussionSummary = "Fallback enrichment used for PR #$($Pr.Number): $normalizedReason" + SupersededBy = $null + Tags = @('ai-fallback') + } +} + +function New-EvalOutput ([hashtable]$Results, [string]$RepoName) { + return [PSCustomObject]@{ + CategorizedAt = (Get-Date).ToString('o') + Repository = $RepoName + AiEngine = $CLIType + TotalCount = $Results.Count + AiSuccessCount = @($Results.Values | Where-Object { $_.Source -eq 'ai' }).Count + AiFallbackCount = @($Results.Values | Where-Object { $_.Source -eq 'fallback' }).Count + AiFailedCount = @($Results.Values | Where-Object { $_.Source -eq 'failed' }).Count + Results = @($Results.Values | Sort-Object Number) + } +} + +# ═════════════════════════════════════════════════════════════════════════ +# Main — build job definitions → orchestrator → parse results +# ═════════════════════════════════════════════════════════════════════════ + +$inputData = Get-Content $InputPath -Raw | ConvertFrom-Json +$prs = if ($inputData.Prs) { $inputData.Prs } else { $inputData } + +# Resume: load existing +$allResults = @{} +if (-not $Force -and (Test-Path $OutputPath)) { + try { + (Get-Content $OutputPath -Raw | ConvertFrom-Json).Results | + ForEach-Object { $allResults[[int]$_.Number] = $_ } + Write-LogHost " Resumed: $($allResults.Count) existing results" -ForegroundColor DarkGray + } catch { } +} + +$prsToProcess = @($prs | Where-Object { $Force -or -not $allResults.ContainsKey([int]$_.Number) }) +Write-LogHost "AI Evaluation: $($prs.Count) total, $($prsToProcess.Count) to process, $($prs.Count - $prsToProcess.Count) skipped" -ForegroundColor Cyan + +if ($prsToProcess.Count -eq 0) { + Write-LogHost ' Nothing to do' -ForegroundColor Green + return +} + +# Build all prompts upfront +$promptMap = @{} +foreach ($pr in $prsToProcess) { $promptMap[[int]$pr.Number] = Build-PrPrompt -Pr $pr } + +# ── Build orchestrator job definitions ────────────────────────────────── + +$jobDefs = @(foreach ($pr in $prsToProcess) { + $n = [int]$pr.Number + $prompt = $promptMap[$n] + $flatPrompt = ($prompt -replace "[\r\n]+", ' ').Trim() + + $prOutputDir = Join-Path $tmpDir "enrich-$n" + New-Item -ItemType Directory -Path $prOutputDir -Force | Out-Null + + if ($CLIType -eq 'copilot') { + $logFile = Join-Path $prOutputDir "_copilot-enrich.log" + $cliArgs = @( + '--additional-mcp-config', "@$mcpConfigPath", + '-p', $flatPrompt, + '--yolo', '--no-custom-instructions', '-s', '--agent', 'TriagePR' + ) + + @{ + Label = "enrich-pr-$n" + ExecutionParameters = @{ + JobName = "enrich-pr-$n" + Command = $CopilotExe + Arguments = $cliArgs + WorkingDir = $repoRoot + OutputDir = $prOutputDir + LogPath = $logFile + } + MonitorFiles = @($logFile) + CleanupTask = $null + } + } + else { + $debugFile = Join-Path $prOutputDir "_claude-debug.log" + $logFile = Join-Path $prOutputDir "_claude-enrich.log" + $cliArgs = @( + '-p', $flatPrompt, + '--dangerously-skip-permissions', + '--output-format', 'json', + '--json-schema', $ClaudeEnrichmentJsonSchema, + '--debug', 'all', '--debug-file', $debugFile, + '--agent', 'TriagePR' + ) + + @{ + Label = "enrich-pr-$n" + ExecutionParameters = @{ + JobName = "enrich-pr-$n" + Command = 'claude' + Arguments = $cliArgs + WorkingDir = $repoRoot + OutputDir = $prOutputDir + LogPath = $logFile + } + MonitorFiles = @($debugFile) + CleanupTask = { + param($Tracker) + $outDir = $Tracker.ExecutionParameters.OutputDir + $dbg = Join-Path $outDir '_claude-debug.log' + if (Test-Path $dbg) { + $fi = [System.IO.FileInfo]::new($dbg) + if ($fi.Length -gt 0) { + $sizeMB = [math]::Round($fi.Length / 1MB, 1) + Remove-Item $dbg -Force + Write-Host "[$($Tracker.Label)] Cleaned debug log (${sizeMB} MB)" + } + } + # Claude CLI auto-creates a 0-byte 'latest' marker file — remove it. + $latest = Join-Path $outDir 'latest' + if (Test-Path $latest) { Remove-Item $latest -Force } + } + } + } +}) + +Write-LogHost "`nBuilt $($jobDefs.Count) enrichment job(s)" -ForegroundColor Cyan +$jobDefs | ForEach-Object { Write-LogHost " $($_.Label)" -ForegroundColor Gray } + +# ── Run orchestrator ──────────────────────────────────────────────────── + +$orchestratorPath = Join-Path $scriptDir '..\..\parallel-job-orchestrator\scripts\Invoke-SimpleJobOrchestrator.ps1' +if (-not (Test-Path $orchestratorPath)) { + throw "Orchestrator not found: $orchestratorPath" +} + +$savedEAP = $ErrorActionPreference +$ErrorActionPreference = 'Continue' + +$orchResults = & $orchestratorPath ` + -JobDefinitions $jobDefs ` + -MaxConcurrent $MaxConcurrent ` + -InactivityTimeoutSeconds $InactivityTimeoutSeconds ` + -MaxRetryCount $MaxRetryCount ` + -PollIntervalSeconds 5 ` + -LogDir $tmpDir + +$ErrorActionPreference = $savedEAP + +# ── Parse results from output files ──────────────────────────────────── + +$prLookup = @{} +foreach ($pr in $prsToProcess) { $prLookup[[int]$pr.Number] = $pr } + +foreach ($r in $orchResults) { + # Extract PR number from label (e.g. "enrich-pr-45601" → 45601) + if ($r.Label -notmatch '(\d+)$') { continue } + $n = [int]$Matches[1] + $pr = $prLookup[$n] + if (-not $pr) { continue } + + $prOutputDir = Join-Path $tmpDir "enrich-$n" + + if ($r.Status -eq 'Completed') { + # Read the CLI output from the log file + $logFile = $r.LogPath + $outputFile = Join-Path $tmpDir "cat-output-$n.txt" + + $raw = $null + if ($logFile -and (Test-Path $logFile)) { + $raw = Get-Content $logFile -Raw -ErrorAction SilentlyContinue + } + + if ($raw) { + $raw | Set-Content $outputFile -Encoding UTF8 -ErrorAction SilentlyContinue + } + + $parsed = if ($raw) { ConvertFrom-AiResponse -RawOutput $raw } else { $null } + + if ($parsed) { + $allResults[$n] = New-EvalResult -Number $n -AiResult $parsed -Source 'ai' + $scores = ($DimensionNames | ForEach-Object { + $d = $parsed.Dimensions[$_]; if ($d) { "$($_.Substring(0,4))=$($d.Score)" } + }) -join ' ' + Write-LogHost " ✓ #$n $scores" -ForegroundColor Green + } + else { + $fallback = New-FallbackAiResult -Pr $pr -Reason 'CLI completed but no parseable AI response' + $allResults[$n] = New-EvalResult -Number $n -AiResult $fallback -Source 'fallback' + Write-LogHost " ⚠ #$n FALLBACK: no parseable response" -ForegroundColor Yellow + } + } + else { + $fallback = New-FallbackAiResult -Pr $pr -Reason "Job status: $($r.Status)" + $allResults[$n] = New-EvalResult -Number $n -AiResult $fallback -Source 'fallback' + Write-LogHost " ⚠ #$n FALLBACK: $($r.Status)" -ForegroundColor Yellow + } +} + +# Final save +$output = New-EvalOutput -Results $allResults -RepoName $Repository +$output | ConvertTo-Json -Depth 10 | Set-Content $OutputPath -Encoding UTF8 +Write-LogHost "Done: $($output.AiSuccessCount) succeeded, $($output.AiFallbackCount) fallback, $($output.AiFailedCount) failed → $OutputPath" -ForegroundColor Cyan + +# Cleanup temp +Get-ChildItem $tmpDir -File -Filter 'cat-*' -ErrorAction SilentlyContinue | Remove-Item -Force -ErrorAction SilentlyContinue +Get-ChildItem $tmpDir -Directory -Filter 'enrich-*' -ErrorAction SilentlyContinue | Remove-Item -Recurse -Force -ErrorAction SilentlyContinue +Get-ChildItem $tmpDir -Directory -Filter 'pr-*' -ErrorAction SilentlyContinue | Remove-Item -Recurse -Force -ErrorAction SilentlyContinue diff --git a/.github/skills/pr-triage/scripts/Invoke-PrCategorization.ps1 b/.github/skills/pr-triage/scripts/Invoke-PrCategorization.ps1 new file mode 100644 index 000000000000..3563079de42f --- /dev/null +++ b/.github/skills/pr-triage/scripts/Invoke-PrCategorization.ps1 @@ -0,0 +1,606 @@ +<# +.SYNOPSIS + Enrich PRs with GitHub API data, merge AI evaluation, and categorize. + Pure business-logic — all functions return objects, no Write-Host in helpers. + +.PARAMETER InputPath + Path to all-prs.json (from Get-OpenPrs.ps1). +.PARAMETER OutputPath + Where to write categorized-prs.json. Default: categorized-prs.json +.PARAMETER Repository + GitHub repo (owner/repo). Default: microsoft/PowerToys +.PARAMETER ThrottleLimit + Max parallel API calls. Default: 5. +.PARAMETER AiEnrichmentPath + Path to ai-enrichment.json (from Invoke-AiEnrichment.ps1). +#> +# NOTE: Do NOT use [CmdletBinding()] or [Parameter(Mandatory)] here. +# This script may be called from within an orchestrator job scope where +# advanced function attributes propagate ErrorActionPreference and crash. +param( + [string]$InputPath, + [string]$OutputPath = 'categorized-prs.json', + [string]$Repository = 'microsoft/PowerToys', + [int]$ThrottleLimit = 20, + [string]$AiEnrichmentPath, + [string]$ReviewOutputRoot = 'Generated Files/prReview', + [string]$LogPath +) + +$ErrorActionPreference = 'Stop' + +# Manual validation (replacing [Parameter(Mandatory)]) +if (-not $InputPath -or -not (Test-Path $InputPath)) { + Write-Error "Invoke-PrCategorization: -InputPath is required and must exist. Got: '$InputPath'" + return +} + +if ([string]::IsNullOrWhiteSpace($LogPath)) { + $LogPath = Join-Path (Get-Location) 'Invoke-PrCategorization.log' +} + +$logDir = Split-Path -Parent $LogPath +if (-not [string]::IsNullOrWhiteSpace($logDir) -and -not (Test-Path $logDir)) { + New-Item -ItemType Directory -Path $logDir -Force | Out-Null +} + +"[$(Get-Date -Format o)] Starting Invoke-PrCategorization" | Out-File -FilePath $LogPath -Encoding utf8 -Append + +function Write-LogHost { + param( + [Parameter(Position = 0, ValueFromRemainingArguments = $true)] + [object[]]$Object, + [object]$ForegroundColor, + [object]$BackgroundColor, + [switch]$NoNewline, + [Object]$Separator + ) + + $message = [string]::Join(' ', ($Object | ForEach-Object { [string]$_ })) + "[$(Get-Date -Format o)] $message" | Out-File -FilePath $LogPath -Encoding utf8 -Append + + $invokeParams = @{} + if ($PSBoundParameters.ContainsKey('ForegroundColor') -and -not [string]::IsNullOrWhiteSpace([string]$ForegroundColor)) { $invokeParams.ForegroundColor = $ForegroundColor } + if ($PSBoundParameters.ContainsKey('BackgroundColor') -and -not [string]::IsNullOrWhiteSpace([string]$BackgroundColor)) { $invokeParams.BackgroundColor = $BackgroundColor } + if ($NoNewline) { $invokeParams.NoNewline = $true } + if ($PSBoundParameters.ContainsKey('Separator')) { $invokeParams.Separator = $Separator } + + Microsoft.PowerShell.Utility\Write-Host @invokeParams -Object $message +} + +$repoRoot = git rev-parse --show-toplevel 2>$null +if (-not $repoRoot) { $repoRoot = (Get-Location).Path } + +$resolvedReviewOutputRoot = if ([System.IO.Path]::IsPathRooted($ReviewOutputRoot)) { + $ReviewOutputRoot +} else { + Join-Path $repoRoot $ReviewOutputRoot +} + +# ═════════════════════════════════════════════════════════════════════════ +# Pure functions — return objects, zero side effects +# ═════════════════════════════════════════════════════════════════════════ + +#region Review findings + +function Get-ReviewFindings ([int]$PRNumber, [string]$ReviewRoot) { + <# Parses prReview output. Returns structured review data object. #> + $reviewDir = Join-Path $ReviewRoot $PRNumber.ToString() + $result = [PSCustomObject]@{ + HasReview = $false + Signal = $null + HighSeverity = 0 + MedSeverity = 0 + LowSeverity = 0 + TotalFindings = 0 + StepIssues = @() + Findings = @() + } + + if (-not (Test-Path $reviewDir)) { return $result } + + # Signal file + $signalFile = Join-Path $reviewDir '.signal' + if (Test-Path $signalFile) { + $result.Signal = (Get-Content $signalFile -Raw).Trim() + } + + # Overview + $overviewPath = Join-Path $reviewDir '00-OVERVIEW.md' + if (-not (Test-Path $overviewPath)) { return $result } + $result.HasReview = $true + + # Parse step files for mcp-review-comment blocks + Get-ChildItem -Path $reviewDir -Filter '*.md' -ErrorAction SilentlyContinue | + Where-Object { $_.Name -match '^\d{2}-' } | ForEach-Object { + $stepName = $_.BaseName + $content = Get-Content $_.FullName -Raw -ErrorAction SilentlyContinue + if (-not $content) { return } + + [regex]::Matches($content, '(?s)```mcp-review-comment\s*\n(.*?)```') | ForEach-Object { + try { + $json = $_.Groups[1].Value | ConvertFrom-Json -ErrorAction SilentlyContinue + if ($json.severity) { + $result.Findings += [PSCustomObject]@{ + Severity = $json.severity.ToLower() + Body = $json.body + Path = $json.path + Line = $json.line + Step = $stepName + } + } + } catch { } + } + } + + $result.TotalFindings = $result.Findings.Count + $result.HighSeverity = @($result.Findings | Where-Object { $_.Severity -eq 'high' }).Count + $result.MedSeverity = @($result.Findings | Where-Object { $_.Severity -eq 'medium' }).Count + $result.LowSeverity = @($result.Findings | Where-Object { $_.Severity -eq 'low' }).Count + + return $result +} + +function Get-FindingSummaries ([PSCustomObject[]]$Findings, [int]$MaxPerSeverity = 3) { + <# Sorts findings by severity, truncates body, returns string array. #> + $severityOrder = @{ 'high' = 0; 'medium' = 1; 'low' = 2 } + $sorted = $Findings | Sort-Object { $severityOrder[$_.Severity] ?? 3 } + + $lines = @() + $grouped = $sorted | Group-Object Severity + foreach ($g in $grouped) { + $items = $g.Group | Select-Object -First $MaxPerSeverity + foreach ($f in $items) { + $body = if ($f.Body.Length -gt 120) { $f.Body.Substring(0, 117) + '...' } else { $f.Body } + $lines += "[$($f.Severity.ToUpper())] $body" + } + } + return $lines +} + +#endregion + +#region Effort estimation + +function Get-EffortEstimate ([PSCustomObject]$ReviewData) { + <# Returns effort string based on review severity counts. #> + if (-not $ReviewData.HasReview) { return 'unknown' } + $h = $ReviewData.HighSeverity; $m = $ReviewData.MedSeverity; $l = $ReviewData.LowSeverity + $total = $ReviewData.TotalFindings + + if ($total -eq 0) { return 'trivial' } + if ($h -ge 3) { return 'rework' } + if ($h -ge 1 -and $m -ge 2) { return 'major' } + if ($h -ge 1 -or $m -ge 3) { return 'moderate' } + if ($m -ge 1) { return 'minor' } + return 'trivial' +} + +function Get-EffortLabel ([string]$Effort) { + <# Maps effort string to human-readable label. #> + switch ($Effort) { + 'trivial' { '✅ Trivial (clean or near-clean)' } + 'minor' { '🔧 Minor (small fixes needed)' } + 'moderate' { '⚠️ Moderate (several issues)' } + 'major' { '🔴 Major (significant work)' } + 'rework' { '🚨 Rework (fundamental problems)' } + default { '❓ Unknown (no review data)' } + } +} + +#endregion + +#region AI dimension rules + +function Get-CategoryFromDimensions ([hashtable]$Dims, [string]$SuggestedCategory) { + <# + Derives a triage category from AI evaluation dimension scores. + Uses rules R-AI-1 through R-AI-15 in priority order. + Returns @{ Category; Confidence; Source }. + + Design principles: + - Superseded/abandoned checked first (terminal states trump quality signals) + - Positive outcomes next (ready, approved) — with code-health guards + - Technical blockers (build failures) — no sentiment gate + - Human blockers (review concerns, direction) — split rs vs ch + - Activity-based buckets last (stale, fresh, active) + - Neutral band (0.45–0.55) instead of float equality on 0.5 + - Enrichment cross-check happens in the caller after this returns + #> + # Helper to safely get a dimension score + $s = { param([string]$Name) if ($Dims[$Name]) { $Dims[$Name].Score } else { 0.5 } } + + $sup = & $s 'superseded' + $mr = & $s 'merge_readiness' + $rs = & $s 'review_sentiment' + $ch = & $s 'code_health' + $ar = & $s 'author_responsiveness' + $al = & $s 'activity_level' + $dc = & $s 'direction_clarity' + + # Neutral band — AI rarely returns exactly 0.5; treat 0.45–0.55 as "no signal" + $isNeutral = { param([double]$v) $v -ge 0.45 -and $v -le 0.55 } + + # Get average confidence + $allConf = @($Dims.Values | ForEach-Object { $_.Confidence } | Where-Object { $_ }) + $avgConf = if ($allConf.Count -gt 0) { ($allConf | Measure-Object -Average).Average } else { 0.5 } + + # ── Terminal states ────────────────────────────────────────────────── + + # R-AI-1: Superseded — binary signal, highest priority + if ($sup -ge 0.7) { return @{ Category = 'superseded'; Confidence = $avgConf; Source = 'ai-dimensions' } } + + # R-AI-2: Likely abandoned — dead PR; no point evaluating quality + if ($al -le 0.2 -and $ar -le 0.2) { return @{ Category = 'likely-abandoned'; Confidence = $avgConf; Source = 'ai-dimensions' } } + + # ── Positive outcomes ──────────────────────────────────────────────── + + # R-AI-3: Ready to merge — all three quality gates high + if ($mr -ge 0.8 -and $rs -ge 0.7 -and $ch -ge 0.7) { return @{ Category = 'ready-to-merge'; Confidence = $avgConf; Source = 'ai-dimensions' } } + + # R-AI-4: Approved pending merge — positive reviews + moderate readiness + acceptable code health + if ($rs -ge 0.7 -and $mr -ge 0.5 -and $mr -lt 0.8 -and $ch -ge 0.4) { return @{ Category = 'approved-pending-merge'; Confidence = $avgConf; Source = 'ai-dimensions' } } + + # ── Technical blockers ─────────────────────────────────────────────── + + # R-AI-5: Build failures — low code health + low merge readiness (no sentiment gate) + if ($ch -le 0.3 -and $mr -le 0.3) { return @{ Category = 'build-failures'; Confidence = $avgConf; Source = 'ai-dimensions' } } + + # ── Human blockers ─────────────────────────────────────────────────── + + # R-AI-6: Review concerns — reviewers explicitly flagged problems + if ($rs -le 0.3) { return @{ Category = 'review-concerns'; Confidence = $avgConf; Source = 'ai-dimensions' } } + + # R-AI-7: Design needed — direction very unclear + if ($dc -le 0.3) { return @{ Category = 'design-needed'; Confidence = $avgConf; Source = 'ai-dimensions' } } + + # R-AI-8: Direction unclear — moderate confusion + lukewarm reviews + if ($dc -le 0.5 -and $rs -le 0.5) { return @{ Category = 'direction-unclear'; Confidence = $avgConf; Source = 'ai-dimensions' } } + + # R-AI-9: Low code health — AI sees quality issues reviewers may have missed + # Separate from R-AI-6 (reviewer pushback) — lower confidence since reviewers aren't complaining + if ($ch -le 0.3) { return @{ Category = 'review-concerns'; Confidence = [Math]::Max($avgConf - 0.1, 0.1); Source = 'ai-dimensions' } } + + # ── Author responsiveness ──────────────────────────────────────────── + + # R-AI-10: Awaiting author — unresponsive but PR still has activity from others + if ($ar -le 0.3 -and $al -ge 0.3) { return @{ Category = 'awaiting-author'; Confidence = $avgConf; Source = 'ai-dimensions' } } + + # ── Activity-based buckets (use neutral band) ──────────────────────── + + # R-AI-11: Stale with feedback — low activity, has reviewer signal (not neutral) + if ($al -le 0.3 -and -not (& $isNeutral $rs)) { return @{ Category = 'stale-with-feedback'; Confidence = $avgConf; Source = 'ai-dimensions' } } + + # R-AI-12: Stale, no review — low activity, neutral sentiment (nobody looked) + if ($al -le 0.3 -and (& $isNeutral $rs)) { return @{ Category = 'stale-no-review'; Confidence = $avgConf; Source = 'ai-dimensions' } } + + # R-AI-13: Fresh, awaiting review — high activity, neutral sentiment + if ($al -ge 0.6 -and (& $isNeutral $rs)) { return @{ Category = 'fresh-awaiting-review'; Confidence = $avgConf; Source = 'ai-dimensions' } } + + # R-AI-14: In active review — moderate+ activity with reviewer engagement + if ($al -ge 0.4 -and $rs -ge 0.5) { return @{ Category = 'in-active-review'; Confidence = $avgConf; Source = 'ai-dimensions' } } + + # R-AI-15: Fallback — use AI suggestion or needs-attention + if ($SuggestedCategory) { return @{ Category = $SuggestedCategory; Confidence = [Math]::Max($avgConf - 0.1, 0.1); Source = 'ai-suggested' } } + return @{ Category = 'needs-attention'; Confidence = 0.3; Source = 'ai-fallback' } +} + +#endregion + +#region Deterministic fallback rules + +function Get-CategoryFromRules ([PSCustomObject]$Pr, [PSCustomObject]$Enrichment) { + <# + Deterministic rule-based categorization when AI evaluation is not available. + Returns @{ Category; Confidence; Source }. + #> + $e = $Enrichment + $now = Get-Date + $age = [Math]::Floor(($now - [DateTime]::Parse($Pr.CreatedAt)).TotalDays) + + $lastActivity = @($Pr.UpdatedAt, $e.LastCommentAt, $e.LastCommitAt) | + Where-Object { -not [string]::IsNullOrWhiteSpace($_) } | + ForEach-Object { [DateTime]::Parse($_) } | + Sort-Object -Descending | Select-Object -First 1 + $daysSinceActivity = if ($lastActivity) { [Math]::Floor(($now - $lastActivity).TotalDays) } else { $age } + + $authorLast = if ($e.AuthorLastActivityAt) { [DateTime]::Parse($e.AuthorLastActivityAt) } else { $null } + $daysSinceAuthor = if ($authorLast) { [Math]::Floor(($now - $authorLast).TotalDays) } else { $age } + + $hasApprovals = $e.ApprovalCount -gt 0 + $hasChangesReq = $e.ChangesRequestedCount -gt 0 + $ciGreen = $e.ChecksStatus -eq 'SUCCESS' + $ciFailing = $e.ChecksStatus -eq 'FAILURE' + $hasComments = $e.CommentCount -gt 0 + $hasReviews = $hasApprovals -or $hasChangesReq + + # Rule 1: Approved + CI green + no unresolved objections + if ($hasApprovals -and $ciGreen -and -not $hasChangesReq -and $Pr.Mergeable -eq 'MERGEABLE') { + return @{ Category = 'ready-to-merge'; Confidence = 0.8; Source = 'rules' } + } + # Rule 2: CI failing + if ($ciFailing) { + return @{ Category = 'build-failures'; Confidence = 0.85; Source = 'rules' } + } + # Rule 3: Approved but CI not green + if ($hasApprovals -and -not $ciFailing -and -not $hasChangesReq) { + return @{ Category = 'approved-pending-merge'; Confidence = 0.7; Source = 'rules' } + } + # Rule 4: Changes requested + author silent > 14 days + if ($hasChangesReq -and $daysSinceAuthor -ge 14) { + return @{ Category = 'awaiting-author'; Confidence = 0.75; Source = 'rules' } + } + # Rule 5: Likely abandoned (90+ days no activity) + if ($daysSinceActivity -ge 90) { + return @{ Category = 'likely-abandoned'; Confidence = 0.8; Source = 'rules' } + } + # Rule 6: Stale with feedback (30+ days, has reviews) + if ($daysSinceActivity -ge 30 -and $hasReviews) { + return @{ Category = 'stale-with-feedback'; Confidence = 0.65; Source = 'rules' } + } + # Rule 7: Stale, no review (30+ days, no reviews) + if ($daysSinceActivity -ge 30 -and -not $hasReviews) { + return @{ Category = 'stale-no-review'; Confidence = 0.65; Source = 'rules' } + } + # Rule 8: Changes requested, author responding + if ($hasChangesReq -and $daysSinceAuthor -lt 14) { + return @{ Category = 'review-concerns'; Confidence = 0.6; Source = 'rules' } + } + # Rule 9: Fresh PR, no reviews yet (< 7 days) + if ($age -le 7 -and -not $hasReviews) { + return @{ Category = 'fresh-awaiting-review'; Confidence = 0.7; Source = 'rules' } + } + # Rule 10: In active review (recent activity + has comments/reviews) + if ($daysSinceActivity -le 7 -and ($hasComments -or $hasReviews)) { + return @{ Category = 'in-active-review'; Confidence = 0.6; Source = 'rules' } + } + # Rule 11: Medium age, no review + if (-not $hasReviews -and $age -gt 7 -and $age -le 30) { + return @{ Category = 'stale-no-review'; Confidence = 0.5; Source = 'rules' } + } + # Rule 12: Has comments but not formally reviewed + if ($hasComments -and -not $hasReviews -and $daysSinceActivity -le 14) { + return @{ Category = 'in-active-review'; Confidence = 0.4; Source = 'rules' } + } + # Rule 13: Fallback + return @{ Category = 'needs-attention'; Confidence = 0.3; Source = 'rules-fallback' } +} + +#endregion + +#region Build categorized PR object + +function New-CategorizedPr { + param( + [PSCustomObject]$Pr, + [PSCustomObject]$Enrichment, + [PSCustomObject]$ReviewData, + [hashtable]$CatResult, + [PSCustomObject]$AiEval + ) + + $now = Get-Date + $age = [Math]::Floor(($now - [DateTime]::Parse($Pr.CreatedAt)).TotalDays) + + $lastActivity = @($Pr.UpdatedAt, $Enrichment.LastCommentAt, $Enrichment.LastCommitAt) | + Where-Object { -not [string]::IsNullOrWhiteSpace($_) } | + ForEach-Object { [DateTime]::Parse($_) } | + Sort-Object -Descending | Select-Object -First 1 + $daysSinceActivity = if ($lastActivity) { [Math]::Floor(($now - $lastActivity).TotalDays) } else { $age } + + $effort = Get-EffortEstimate -ReviewData $ReviewData + $findingSummaries = Get-FindingSummaries -Findings $ReviewData.Findings + + # Signals for quick scanning + $signals = @() + if ($Enrichment.ApprovalCount -gt 0) { $signals += "✅$($Enrichment.ApprovalCount) approvals" } + if ($Enrichment.ChangesRequestedCount -gt 0) { $signals += "❌$($Enrichment.ChangesRequestedCount) changes requested" } + if ($Enrichment.ChecksStatus -eq 'FAILURE') { $signals += '🔴 CI failing' } + if ($Enrichment.ChecksStatus -eq 'SUCCESS') { $signals += '🟢 CI passing' } + if ($ReviewData.HighSeverity -gt 0) { $signals += "🔥$($ReviewData.HighSeverity) high-sev" } + if ($daysSinceActivity -ge 30) { $signals += "💤 $daysSinceActivity days stale" } + + # Tags from AI + computed + $tags = @() + if ($AiEval -and $AiEval.Tags) { $tags += $AiEval.Tags } + if ($Pr.Additions + $Pr.Deletions -ge 500) { $tags += 'large-pr' } + if ($ReviewData.HighSeverity -gt 0) { $tags += 'review-high-severity' } + if ($ReviewData.HasReview -and $ReviewData.TotalFindings -eq 0) { $tags += 'review-clean' } + $tags = @($tags | Select-Object -Unique) + + return [PSCustomObject]@{ + Number = $Pr.Number + Title = $Pr.Title + Author = $Pr.Author + Url = $Pr.Url + AgeInDays = $age + DaysSinceActivity = $daysSinceActivity + Category = $CatResult.Category + Confidence = [Math]::Round($CatResult.Confidence, 2) + CategorizationSource = $CatResult.Source + Signals = $signals + Tags = $tags + Effort = $effort + EffortLabel = Get-EffortLabel $effort + DimensionScores = if ($AiEval) { $AiEval.Dimensions } else { $null } + DiscussionSummary = if ($AiEval) { $AiEval.DiscussionSummary } else { $null } + SupersededBy = if ($AiEval) { $AiEval.SupersededBy } else { $null } + Labels = $Pr.Labels + LinkedIssues = $Pr.LinkedIssues + Additions = $Pr.Additions + Deletions = $Pr.Deletions + ChangedFiles = $Pr.ChangedFiles + ChecksStatus = $Enrichment.ChecksStatus + FailingChecks = $Enrichment.FailingChecks + ApprovalCount = $Enrichment.ApprovalCount + ChangesRequestedCount = $Enrichment.ChangesRequestedCount + ReviewData = [PSCustomObject]@{ + HasReview = $ReviewData.HasReview + Signal = $ReviewData.Signal + HighSeverity = $ReviewData.HighSeverity + MedSeverity = $ReviewData.MedSeverity + LowSeverity = $ReviewData.LowSeverity + TotalFindings = $ReviewData.TotalFindings + FindingSummaries = $findingSummaries + } + } +} + +#endregion + +# ═════════════════════════════════════════════════════════════════════════ +# Main — orchestrate enrichment and categorization +# ═════════════════════════════════════════════════════════════════════════ + +$inputData = Get-Content $InputPath -Raw | ConvertFrom-Json +$prs = if ($inputData.Prs) { $inputData.Prs } else { $inputData } + +# Load AI evaluation results if available +$aiLookup = @{} +if ($AiEnrichmentPath -and (Test-Path $AiEnrichmentPath)) { + $aiData = Get-Content $AiEnrichmentPath -Raw | ConvertFrom-Json + foreach ($r in $aiData.Results) { + $aiLookup[[int]$r.Number] = $r + } + Write-LogHost "Loaded $($aiLookup.Count) AI evaluation results" -ForegroundColor Cyan +} + +Write-LogHost "Categorizing $($prs.Count) PRs (throttle: $ThrottleLimit)..." -ForegroundColor Cyan + +# ── Parallel enrichment ───────────────────────────────────────────────── +# We enrich all PRs first (parallel GitHub API calls) then categorize. + +$enriched = [System.Collections.Concurrent.ConcurrentDictionary[int, PSCustomObject]]::new() +$total = $prs.Count + +$prs | ForEach-Object -ThrottleLimit $ThrottleLimit -Parallel { + $pr = $_ + $n = [int]$pr.Number + $repo = $using:Repository + $dict = $using:enriched + + # Inline enrichment (cannot call module functions from -Parallel) + $owner, $repoName = $repo -split '/' + + $reviews = @() + try { $reviews = gh api "repos/$repo/pulls/$n/reviews" --paginate 2>$null | ConvertFrom-Json } catch { } + $approvals = @($reviews | Where-Object { $_.state -eq 'APPROVED' }).Count + $changesReq = @($reviews | Where-Object { $_.state -eq 'CHANGES_REQUESTED' }).Count + $reviewerLogins = @($reviews | Where-Object { $_.state -in 'APPROVED','CHANGES_REQUESTED' } | ForEach-Object { $_.user.login } | Select-Object -Unique) + + $comments = @() + try { $comments = gh api "repos/$repo/issues/$n/comments" --paginate 2>$null | ConvertFrom-Json } catch { } + + $lastCommentAt = $null; $authorLastAt = $null + if ($comments.Count -gt 0) { + $lastCommentAt = ($comments | Sort-Object created_at -Descending | Select-Object -First 1).created_at + $ac = @($comments | Where-Object { $_.user.login -eq $pr.Author }) + if ($ac.Count -gt 0) { $authorLastAt = ($ac | Sort-Object created_at -Descending | Select-Object -First 1).created_at } + } + + $checksStatus = 'UNKNOWN'; $failingChecks = @() + try { + $cj = gh api "repos/$repo/commits/$($pr.HeadRefName)/check-runs" 2>$null | ConvertFrom-Json + if ($cj.check_runs) { + $failed = @($cj.check_runs | Where-Object { $_.conclusion -eq 'failure' }) + $pending = @($cj.check_runs | Where-Object { $_.status -ne 'completed' }) + $failingChecks = @($failed | ForEach-Object { $_.name }) + if ($failed.Count -gt 0) { $checksStatus = 'FAILURE' } + elseif ($pending.Count -gt 0) { $checksStatus = 'PENDING' } + else { $checksStatus = 'SUCCESS' } + } + } catch { } + + $lastCommitAt = $null + try { + $commits = gh api "repos/$repo/pulls/$n/commits" --paginate 2>$null | ConvertFrom-Json + if ($commits.Count -gt 0) { + $lastCommitAt = ($commits | Sort-Object { $_.commit.committer.date } -Descending | Select-Object -First 1).commit.committer.date + } + } catch { } + + $enrichObj = [PSCustomObject]@{ + ApprovalCount = $approvals + ChangesRequestedCount = $changesReq + ReviewerLogins = $reviewerLogins + CommentCount = $comments.Count + ChecksStatus = $checksStatus + FailingChecks = $failingChecks + LastCommentAt = $lastCommentAt + LastCommitAt = $lastCommitAt + AuthorLastActivityAt = $authorLastAt + } + + $dict[$n] = $enrichObj +} + +Write-LogHost " Enriched $($enriched.Count)/$total PRs" -ForegroundColor Green + +# ── Categorize each PR ────────────────────────────────────────────────── + +$categorized = @() +$done = 0 + +foreach ($pr in $prs) { + $n = [int]$pr.Number + $done++ + + $e = $enriched[$n] + if (-not $e) { + # Should not happen but handle gracefully + $e = [PSCustomObject]@{ + ApprovalCount = 0; ChangesRequestedCount = 0; ReviewerLogins = @() + CommentCount = 0; ChecksStatus = 'UNKNOWN'; FailingChecks = @() + LastCommentAt = $null; LastCommitAt = $null; AuthorLastActivityAt = $null + } + } + + $reviewData = Get-ReviewFindings -PRNumber $n -ReviewRoot $resolvedReviewOutputRoot + $aiEval = $aiLookup[$n] + + # Determine category: AI dimensions > AI suggestion > rules + if ($aiEval -and $aiEval.Dimensions -and ($aiEval.Dimensions -is [hashtable] -and $aiEval.Dimensions.Count -gt 0 -or $aiEval.Dimensions.PSObject.Properties.Count -gt 0)) { + # Convert PSCustomObject dimensions to hashtable if needed + $dims = @{} + if ($aiEval.Dimensions -is [hashtable]) { + $dims = $aiEval.Dimensions + } else { + foreach ($prop in $aiEval.Dimensions.PSObject.Properties) { + $dims[$prop.Name] = $prop.Value + } + } + $catResult = Get-CategoryFromDimensions -Dims $dims -SuggestedCategory $aiEval.SuggestedCategory + + # Cross-check: enrichment hard facts override obvious AI misjudgments + if ($catResult.Category -eq 'ready-to-merge' -and $e.ChecksStatus -eq 'FAILURE') { + $catResult = @{ Category = 'build-failures'; Confidence = 0.9; Source = 'ai-corrected' } + } elseif ($catResult.Category -eq 'ready-to-merge' -and $e.ChangesRequestedCount -gt 0) { + $catResult = @{ Category = 'review-concerns'; Confidence = 0.7; Source = 'ai-corrected' } + } + } else { + $catResult = Get-CategoryFromRules -Pr $pr -Enrichment $e + } + + $catPr = New-CategorizedPr -Pr $pr -Enrichment $e -ReviewData $reviewData -CatResult $catResult -AiEval $aiEval + $categorized += $catPr + + Write-LogHost " [$done/$total] #$n → $($catResult.Category) ($($catResult.Source))" -ForegroundColor Gray +} + +# ── Write output ──────────────────────────────────────────────────────── + +$output = [PSCustomObject]@{ + CategorizedAt = (Get-Date).ToString('o') + Repository = $Repository + TotalCount = $categorized.Count + CategoryCounts = @{} + Prs = $categorized +} + +# Build category counts +$categorized | Group-Object Category | ForEach-Object { + $output.CategoryCounts[$_.Name] = $_.Count +} + +$output | ConvertTo-Json -Depth 10 | Set-Content $OutputPath -Encoding UTF8 +Write-LogHost "Done: $($categorized.Count) PRs categorized → $OutputPath" -ForegroundColor Cyan diff --git a/.github/skills/pr-triage/scripts/Start-PrTriage.ps1 b/.github/skills/pr-triage/scripts/Start-PrTriage.ps1 new file mode 100644 index 000000000000..0a121fd9aa8c --- /dev/null +++ b/.github/skills/pr-triage/scripts/Start-PrTriage.ps1 @@ -0,0 +1,415 @@ +<# +.SYNOPSIS + Orchestrate a full PR triage run: collect → review → categorize → report. + Fully resumable — skips any step whose output file already exists. + +.DESCRIPTION + This is the main entry point for the pr-triage skill. It: + + 1. Collects all open PRs via Get-OpenPrs.ps1 → all-prs.json + 2. Runs detailed AI reviews via the pr-review skill → prReview/<N>/ + 3. AI enrichment via Invoke-AiEnrichment.ps1 → ai-enrichment.json + 4. Categorizes via Invoke-PrCategorization.ps1 → categorized-prs.json + 5. Generates summary.md and per-category reports via Export-TriageReport.ps1 + + Resume: Just re-run the same command. The orchestrator checks which output + files exist on disk and skips completed work. No external state JSON needed. + + Progress: While this is running (or after a crash), call: + .\Get-TriageProgress.ps1 [-Detailed] [-AsJson] + to see exactly where things stand. + +.PARAMETER Repository + GitHub repository in owner/repo format. Default: microsoft/PowerToys + +.PARAMETER PRNumbers + PR numbers to triage. Required. + +.PARAMETER MaxConcurrent + Max parallel review / enrichment jobs. Default: 20. + +.PARAMETER TimeoutMin + Per-job timeout in minutes (used by review step). Default: 5. + +.PARAMETER RunDate + Date folder name (YYYY-MM-DD). Default: today. + +.PARAMETER Force + Re-run all steps even if output files exist. + +.EXAMPLE + .\Start-PrTriage.ps1 -PRNumbers 45234,45235 + Run full triage for specific PRs. + +.EXAMPLE + # Resume after crash + .\Start-PrTriage.ps1 + # It auto-detects completed steps and picks up where it left off. +#> +# NOTE: Do NOT use [CmdletBinding()], [Parameter(Mandatory)], or [ValidateSet()] +# here. These make the script "advanced" which propagates ErrorActionPreference +# through PS7's plumbing and can silently crash child scope monitoring loops. +param( + [string]$Repository = 'microsoft/PowerToys', + [int[]]$PRNumbers, + [int]$MaxConcurrent = 20, + [int]$TimeoutMin = 5, + [string]$RunDate, + [string]$CLIType = 'copilot', + [string]$RunLabel, + [string]$OutputRoot = 'Generated Files/pr-triage', + [string]$ReviewOutputRoot = 'Generated Files/prReview', + [string]$LogPath, + [switch]$Force, + [switch]$SkipAiEnrichment, + [switch]$SkipReview +) + +# Use 'Stop' so any gh-cli or JSON error is immediately caught. +$ErrorActionPreference = 'Stop' + +# Manual validation (replacing [Parameter(Mandatory)] and [ValidateSet()]) +if (-not $PRNumbers -or $PRNumbers.Count -eq 0) { + Write-Error 'Start-PrTriage: -PRNumbers is required.' + return +} +if ($CLIType -notin 'copilot', 'claude') { + Write-Error "Start-PrTriage: Invalid -CLIType '$CLIType'. Must be 'copilot' or 'claude'." + return +} + +# ── Load libraries ────────────────────────────────────────────────────────── + +$scriptDir = Split-Path -Parent $MyInvocation.MyCommand.Path + +# ── Resolve paths ─────────────────────────────────────────────────────────── + +$repoRoot = git rev-parse --show-toplevel 2>$null +if (-not $repoRoot) { $repoRoot = (Get-Location).Path } + +$resolvedReviewOutputRoot = if ([System.IO.Path]::IsPathRooted($ReviewOutputRoot)) { + $ReviewOutputRoot +} else { + Join-Path $repoRoot $ReviewOutputRoot +} + +$resolvedOutputRoot = if ([System.IO.Path]::IsPathRooted($OutputRoot)) { + $OutputRoot +} else { + Join-Path $repoRoot $OutputRoot +} + +if (-not $RunDate) { $RunDate = (Get-Date).ToString('yyyy-MM-dd') } +if (-not $RunLabel) { $RunLabel = $CLIType } + +$triageRoot = $resolvedOutputRoot +$runFolder = Join-Path $RunDate $RunLabel +$runRoot = Join-Path $triageRoot $runFolder +$cacheDir = Join-Path $triageRoot '__cache' + +foreach ($d in @($runRoot, $cacheDir)) { + if (-not (Test-Path $d)) { New-Item -ItemType Directory -Path $d -Force | Out-Null } +} + +if (-not $LogPath) { $LogPath = Join-Path $runRoot 'triage.log' } +if (-not [System.IO.Path]::IsPathRooted($LogPath)) { $LogPath = Join-Path $runRoot $LogPath } +$logDir = Split-Path -Parent $LogPath +if (-not (Test-Path $logDir)) { New-Item -ItemType Directory -Path $logDir -Force | Out-Null } + +function Write-LogHost { + param( + [Parameter(Position = 0, ValueFromRemainingArguments = $true)] + [object[]]$Object, + [object]$ForegroundColor, + [object]$BackgroundColor, + [switch]$NoNewline, + [object]$Separator = ' ' + ) + + $message = if ($null -eq $Object) { '' } else { ($Object -join [string]$Separator) } + Add-Content -Path $LogPath -Value ("[{0}] {1}" -f (Get-Date).ToString('o'), $message) + + $writeParams = @{} + if ($PSBoundParameters.ContainsKey('Object')) { $writeParams.Object = $Object } + if ($PSBoundParameters.ContainsKey('ForegroundColor') -and -not [string]::IsNullOrWhiteSpace([string]$ForegroundColor)) { $writeParams.ForegroundColor = $ForegroundColor } + if ($PSBoundParameters.ContainsKey('BackgroundColor') -and -not [string]::IsNullOrWhiteSpace([string]$BackgroundColor)) { $writeParams.BackgroundColor = $BackgroundColor } + if ($PSBoundParameters.ContainsKey('NoNewline')) { $writeParams.NoNewline = $NoNewline } + if ($PSBoundParameters.ContainsKey('Separator')) { $writeParams.Separator = $Separator } + Microsoft.PowerShell.Utility\Write-Host @writeParams +} + +$allPrsFile = Join-Path $runRoot 'all-prs.json' +$aiCatFile = Join-Path $runRoot 'ai-enrichment.json' +$categorizedFile = Join-Path $runRoot 'categorized-prs.json' +$summaryFile = Join-Path $runRoot 'summary.md' + +$totalSteps = 5 + +Write-LogHost '' +Write-LogHost '═══════════════════════════════════════════════════════════════' -ForegroundColor Cyan +Write-LogHost " PR Triage — $RunDate" -ForegroundColor Cyan +Write-LogHost " Repository: $Repository" -ForegroundColor Cyan +Write-LogHost " PRs: $($PRNumbers -join ', ')" -ForegroundColor Cyan +Write-LogHost " AI Engine: $CLIType" -ForegroundColor Cyan +Write-LogHost " Run Label: $RunLabel" -ForegroundColor Cyan +Write-LogHost " Triage Root: $triageRoot" -ForegroundColor Cyan +Write-LogHost " Review Root: $resolvedReviewOutputRoot" -ForegroundColor Cyan +Write-LogHost " Log File: $LogPath" -ForegroundColor Cyan +Write-LogHost ' Pipeline: Collect → Review → AI Enrich → Categorize → Report' -ForegroundColor Cyan +Write-LogHost '═══════════════════════════════════════════════════════════════' -ForegroundColor Cyan +Write-LogHost '' + +$batchStart = Get-Date + +# ═══════════════════════════════════════════════════════════════════════════ +# STEP 1 — Collect open PRs +# ═══════════════════════════════════════════════════════════════════════════ + +$step1Done = (Test-Path $allPrsFile) -and ((Get-Item $allPrsFile).Length -gt 0) -and -not $Force + +if ($step1Done) { + Write-LogHost "[1/$totalSteps] Collection — already done (all-prs.json exists)" -ForegroundColor Green + $allPrsData = Get-Content $allPrsFile -Raw | ConvertFrom-Json +} else { + Write-LogHost "[1/$totalSteps] Collecting selected PRs..." -ForegroundColor Yellow + + $collectParams = @{ + Repository = $Repository + PRNumbers = $PRNumbers + OutputPath = $allPrsFile + LogPath = (Join-Path $runRoot 'step1-collection.log') + } + + & (Join-Path $scriptDir 'Get-OpenPrs.ps1') @collectParams + + if (-not (Test-Path $allPrsFile)) { + Write-LogHost 'Collection failed — all-prs.json not created.' -ForegroundColor Red + return + } + $allPrsData = Get-Content $allPrsFile -Raw | ConvertFrom-Json + Write-LogHost " Collected $($allPrsData.TotalCount) PRs" -ForegroundColor Green +} + +$prNumbers = $allPrsData.Prs | ForEach-Object { $_.Number } + +# ═══════════════════════════════════════════════════════════════════════════ +# STEP 2 — Detailed PR reviews via pr-review skill +# ═══════════════════════════════════════════════════════════════════════════ +# +# Run reviews immediately after collection so their output is available +# for categorization and reporting. Delegates to the pr-review skill's +# Start-PRReviewWorkflow.ps1 in the current PowerShell context. +# Output goes to Generated Files/prReview/<PR>/ as usual. +# ═══════════════════════════════════════════════════════════════════════════ + +Write-LogHost '' +Write-LogHost "[2/$totalSteps] Running detailed PR reviews..." -ForegroundColor Yellow + +$reviewWorkflow = Join-Path $repoRoot '.github' 'skills' 'pr-review' 'scripts' 'Start-PRReviewWorkflow.ps1' + +if (-not (Test-Path $reviewWorkflow)) { + Write-LogHost "pr-review skill not found — expected: $reviewWorkflow" -ForegroundColor Red + return +} + +$reviewPrNumbers = @($allPrsData.Prs | ForEach-Object { [int]$_.Number }) + +if ($reviewPrNumbers.Count -eq 0) { + Write-LogHost '[2] No PRs to review' -ForegroundColor Green +} elseif ($SkipReview) { + Write-LogHost ' Review step skipped (-SkipReview)' -ForegroundColor Yellow +} else { + Write-LogHost " PRs to review: $($reviewPrNumbers.Count)" -ForegroundColor Gray + Write-LogHost " PR numbers: $($reviewPrNumbers -join ', ')" -ForegroundColor Gray + + $reviewLogFile = Join-Path $runRoot 'step2-review.log' + + # Check if all PRs already have completed reviews (skip condition) + $reviewOutDir = $resolvedReviewOutputRoot + $alreadyReviewed = 0 + foreach ($n in $reviewPrNumbers) { + $overview = Join-Path $reviewOutDir $n.ToString() '00-OVERVIEW.md' + if (Test-Path $overview) { $alreadyReviewed++ } + } + + if ($alreadyReviewed -eq $reviewPrNumbers.Count -and -not $Force) { + Write-LogHost " All $alreadyReviewed/$($reviewPrNumbers.Count) PRs already reviewed — skipping" -ForegroundColor Green + } else { + $reviewParams = @{ + PRNumbers = $reviewPrNumbers + CLIType = $CLIType + OutputRoot = $resolvedReviewOutputRoot + MaxConcurrent = $MaxConcurrent + InactivityTimeoutSeconds = [Math]::Max(($TimeoutMin * 60), 60) + LogPath = $reviewLogFile + } + if ($Force) { $reviewParams.Force = $true } + + try { + & $reviewWorkflow @reviewParams + } catch { + Write-LogHost " Review step failed (non-fatal — continuing): $($_.Exception.Message)" -ForegroundColor Yellow + } + + $completedReviews = 0 + foreach ($n in $reviewPrNumbers) { + $overview = Join-Path $reviewOutDir $n.ToString() '00-OVERVIEW.md' + if (Test-Path $overview) { $completedReviews++ } + } + Write-LogHost " Reviews: $completedReviews/$($reviewPrNumbers.Count) completed" -ForegroundColor $(if ($completedReviews -eq $reviewPrNumbers.Count) { 'Green' } else { 'Yellow' }) + } +} + +# ═══════════════════════════════════════════════════════════════════════════ +# STEP 3 — AI Enrichment (sequential AI CLI per PR) +# ═══════════════════════════════════════════════════════════════════════════ +# +# Each PR gets its own AI CLI invocation that reads: +# - PR metadata and AI code review findings (from Step 2) +# - Full discussion comments via gh CLI +# - Images and attachments via GitHub MCP tools +# This enriches each PR with 7 dimension scores and context-aware signals. +# Actual category assignment happens in Step 4. +# ═══════════════════════════════════════════════════════════════════════════ + +$step3Done = (Test-Path $aiCatFile) -and ((Get-Item $aiCatFile).Length -gt 0) -and -not $Force + +if ($SkipAiEnrichment) { + Write-LogHost '' + Write-LogHost "[3/$totalSteps] AI Enrichment — skipped (-SkipAiEnrichment)" -ForegroundColor Yellow +} elseif ($step3Done) { + Write-LogHost '' + Write-LogHost "[3/$totalSteps] AI Enrichment — already done (ai-enrichment.json exists)" -ForegroundColor Green +} else { + Write-LogHost '' + Write-LogHost "[3/$totalSteps] AI Enrichment ($CLIType) — reading discussions + images per PR..." -ForegroundColor Yellow + + $aiCatScript = Join-Path $scriptDir 'Invoke-AiEnrichment.ps1' + if (-not (Test-Path $aiCatScript)) { + Write-LogHost " AI enrichment script not found: $aiCatScript" -ForegroundColor Red + Write-LogHost " Falling back to rule-based categorization only" -ForegroundColor Yellow + } else { + $aiCatParams = @{ + InputPath = $allPrsFile + OutputPath = $aiCatFile + Repository = $Repository + TimeoutMin = $TimeoutMin + CLIType = $CLIType + OutputRoot = $runRoot + ReviewOutputRoot = $resolvedReviewOutputRoot + LogPath = (Join-Path $runRoot 'step3-ai-enrichment.log') + MaxConcurrent = $MaxConcurrent + } + if ($Force) { $aiCatParams.Force = $true } + + & $aiCatScript @aiCatParams + + if (Test-Path $aiCatFile) { + $aiCatData = Get-Content $aiCatFile -Raw | ConvertFrom-Json + Write-LogHost " AI enriched: $($aiCatData.AiSuccessCount) PRs" -ForegroundColor Green + if ($aiCatData.AiFailedCount -gt 0) { + Write-LogHost " AI failed (will use rule fallback): $($aiCatData.AiFailedCount) PRs" -ForegroundColor Yellow + } + } else { + Write-LogHost " AI enrichment did not produce output (non-fatal — Step 4 uses rules only)" -ForegroundColor Yellow + } + } +} + +# ═══════════════════════════════════════════════════════════════════════════ +# STEP 4 — Categorization +# ═══════════════════════════════════════════════════════════════════════════ +# +# Enriches PRs via GitHub API (reviews, CI, activity timestamps), merges +# with AI enrichment from Step 3, and assigns final triage categories. +# PRs with AI dimensions use dimension rules; others get rule-based fallback. +# ═══════════════════════════════════════════════════════════════════════════ + +$step4Done = (Test-Path $categorizedFile) -and ((Get-Item $categorizedFile).Length -gt 0) -and -not $Force + +if ($step4Done) { + Write-LogHost '' + Write-LogHost "[4/$totalSteps] Categorization — already done (categorized-prs.json exists)" -ForegroundColor Green +} else { + Write-LogHost '' + Write-LogHost "[4/$totalSteps] Categorizing PRs (parallel, max-concurrent: $MaxConcurrent)..." -ForegroundColor Yellow + + $catParams = @{ + InputPath = $allPrsFile + OutputPath = $categorizedFile + Repository = $Repository + ThrottleLimit = $MaxConcurrent + ReviewOutputRoot = $resolvedReviewOutputRoot + LogPath = (Join-Path $runRoot 'step4-categorization.log') + } + + # Pass AI enrichment results if available + if ((Test-Path $aiCatFile) -and -not $SkipAiEnrichment) { + $catParams.AiEnrichmentPath = $aiCatFile + } + + & (Join-Path $scriptDir 'Invoke-PrCategorization.ps1') @catParams + + if (-not (Test-Path $categorizedFile)) { + Write-LogHost ' [ERROR] Categorization failed — categorized-prs.json not created.' -ForegroundColor Red + return + } + + Write-LogHost ' Categorization complete' -ForegroundColor Green +} + +# ═══════════════════════════════════════════════════════════════════════════ +# STEP 5 — Generate reports (summary.md + per-category .md) +# ═══════════════════════════════════════════════════════════════════════════ + +$step5Done = (Test-Path $summaryFile) -and ((Get-Item $summaryFile).Length -gt 0) -and -not $Force + +if ($step5Done) { + Write-LogHost '' + Write-LogHost "[5/$totalSteps] Reporting — already done (summary.md exists)" -ForegroundColor Green +} else { + Write-LogHost '' + Write-LogHost "[5/$totalSteps] Generating reports..." -ForegroundColor Yellow + + $reportParams = @{ + InputPath = $categorizedFile + OutputDir = $runRoot + Repository = $Repository + IncludeDetailedReview = $true + LogPath = (Join-Path $runRoot 'step5-reporting.log') + } + + # Find previous run for delta comparison + $prevRun = Get-ChildItem $triageRoot -Directory | + Where-Object { $_.Name -match '^\d{4}-\d{2}-\d{2}$' -and $_.Name -lt $RunDate } | + Sort-Object Name -Descending | Select-Object -First 1 + if ($prevRun) { + $prevCatFile = Join-Path $prevRun.FullName 'categorized-prs.json' + if (Test-Path $prevCatFile) { + $reportParams.PreviousInputPath = $prevCatFile + Write-LogHost " Comparing against previous run: $($prevRun.Name)" -ForegroundColor Gray + } + } + + & (Join-Path $scriptDir 'Export-TriageReport.ps1') @reportParams + + Write-LogHost ' Reports generated' -ForegroundColor Green +} + +# ═══════════════════════════════════════════════════════════════════════════ +# Done +# ═══════════════════════════════════════════════════════════════════════════ + +$totalElapsed = (Get-Date) - $batchStart + +Write-LogHost '' +Write-LogHost '═══════════════════════════════════════════════════════════════' -ForegroundColor Cyan +Write-LogHost ' Triage complete!' -ForegroundColor Green +Write-LogHost " Duration: $([math]::Round($totalElapsed.TotalMinutes, 1)) minutes" -ForegroundColor Cyan +Write-LogHost " Reports: $runRoot" -ForegroundColor Cyan +Write-LogHost ' Start with: summary.md' -ForegroundColor Cyan +Write-LogHost '' +Write-LogHost ' Check progress any time:' -ForegroundColor Gray +Write-LogHost " .\Get-TriageProgress.ps1 -RunDate $RunDate -Detailed" -ForegroundColor Gray +Write-LogHost '═══════════════════════════════════════════════════════════════' -ForegroundColor Cyan +Write-LogHost '' diff --git a/.github/skills/pr-triage/scripts/TaskRunner.ps1 b/.github/skills/pr-triage/scripts/TaskRunner.ps1 new file mode 100644 index 000000000000..f800af3646fc --- /dev/null +++ b/.github/skills/pr-triage/scripts/TaskRunner.ps1 @@ -0,0 +1,545 @@ +<# +.SYNOPSIS + Shared task-runner library: file-based progress, parallel execution, timeout, + heartbeat liveness, and crash-resume — all driven by result files on disk + (no separate JSON state). + +.DESCRIPTION + Design principles: + ───────────────── + a. PARALLEL CONTROL — Start-TaskBatch queues PowerShell jobs up to a + configurable throttle limit, polling for completion every N seconds. + + b. TIMEOUT / LIVENESS — Every running task writes a heartbeat file + (.heartbeat) periodically. The supervisor detects stale heartbeats + and kills the job, then optionally retries. + + c. RESUMABLE VIA RESULT FILES — Each step of each task writes a + well-known output file (e.g., enriched.json, categorized.json, + summary.md). Get-StepStatus inspects the task folder to decide + what has already completed. No extra state JSON is required. + + d. SELF-CONTAINED STEPS — Every step is a script block that receives + the task folder path. A step is "done" when its expected output + file exists and is non-empty. + + File layout per task: + <RunRoot>/<TaskId>/ + .started — touched when the task begins + .heartbeat — updated every HeartbeatSec seconds + .completed — touched on success (contains exit code 0) + .failed — touched on failure (contains error text) + .timeout — touched when killed by supervisor + run-YYYYMMDD-HHmmss.log — full stdout/stderr capture (unique per attempt) + step1-collect.json + step2-enrich.json + step3-categorize.json + ... — any file the step produces + + e. LOG CAPTURE — Every job attempt writes a timestamped log file + (run-<ts>.log) capturing all stdout/stderr. Each retry gets its + own unique file. Test-LogAlive checks whether the latest log was + written to recently (secondary liveness: even when heartbeat is + stale, a growing log means the CLI is still producing output). + + Usage: + . ./TaskRunner.ps1 # dot-source + Start-TaskBatch -Tasks $list -RunRoot $dir -ThrottleLimit 5 + +.NOTES + Dot-source this file; all functions become available in the caller's scope. +#> + +#requires -Version 7.0 + +# ── Console helpers ───────────────────────────────────────────────────────── + +function Write-TRInfo { param([string]$M) Write-Host " [TR] $M" -ForegroundColor Cyan } +function Write-TRWarn { param([string]$M) Write-Host " [TR] $M" -ForegroundColor Yellow } +function Write-TRErr { param([string]$M) Write-Host " [TR] $M" -ForegroundColor Red } +function Write-TROk { param([string]$M) Write-Host " [TR] $M" -ForegroundColor Green } + +# ── Step status (resume detection) ────────────────────────────────────────── + +function Get-StepStatus { + <# + .SYNOPSIS + Inspect a task folder and return which steps have completed. + .DESCRIPTION + Looks for expected output files to decide whether a step is done. + Returns a hashtable: @{ 'step1-collect' = $true; 'step2-enrich' = $false; ... } + .PARAMETER TaskDir + Absolute path to the task folder. + .PARAMETER Steps + Array of step descriptors: @( @{ Name='step1-collect'; OutputFile='step1-collect.json' }, ... ) + #> + param( + [Parameter(Mandatory)] + [string]$TaskDir, + + [Parameter(Mandatory)] + [array]$Steps + ) + + $status = [ordered]@{} + foreach ($step in $Steps) { + $outFile = Join-Path $TaskDir $step.OutputFile + $done = (Test-Path $outFile) -and ((Get-Item $outFile).Length -gt 0) + $status[$step.Name] = $done + } + return $status +} + +function Get-NextPendingStep { + <# + .SYNOPSIS + Return the first step that has NOT completed (output file missing/empty). + #> + param( + [Parameter(Mandatory)] + [string]$TaskDir, + + [Parameter(Mandatory)] + [array]$Steps + ) + + $status = Get-StepStatus -TaskDir $TaskDir -Steps $Steps + foreach ($step in $Steps) { + if (-not $status[$step.Name]) { + return $step + } + } + return $null # all done +} + +function Get-TaskProgress { + <# + .SYNOPSIS + Returns a human-readable progress string plus numeric percentage. + #> + param( + [Parameter(Mandatory)] + [string]$TaskDir, + + [Parameter(Mandatory)] + [array]$Steps + ) + + $status = Get-StepStatus -TaskDir $TaskDir -Steps $Steps + $done = ($status.Values | Where-Object { $_ }).Count + $total = $Steps.Count + $pct = if ($total -gt 0) { [math]::Round(($done / $total) * 100) } else { 0 } + $next = Get-NextPendingStep -TaskDir $TaskDir -Steps $Steps + + return [PSCustomObject]@{ + CompletedCount = $done + TotalCount = $total + Percent = $pct + NextStep = if ($next) { $next.Name } else { $null } + Status = $status + } +} + +# ── Heartbeat helpers ─────────────────────────────────────────────────────── + +function Test-HeartbeatAlive { + <# + .SYNOPSIS + Returns $true if the heartbeat was updated within the staleness window. + #> + param( + [string]$TaskDir, + [int]$StaleSec = 120 + ) + $hb = Join-Path $TaskDir '.heartbeat' + if (-not (Test-Path $hb)) { return $false } + $ts = Get-Content $hb -Raw + try { + $lastBeat = [datetime]::Parse($ts.Trim()) + return ((Get-Date) - $lastBeat).TotalSeconds -lt $StaleSec + } catch { return $false } +} + +# ── Log-capture helpers ────────────────────────────────────────────────────── + +function Get-LatestLogFile { + <# + .SYNOPSIS + Return the most recently written .log file in a task folder (or $null). + #> + param([Parameter(Mandatory)] [string]$TaskDir) + + Get-ChildItem -Path $TaskDir -Filter '*.log' -File -ErrorAction SilentlyContinue | + Sort-Object LastWriteTime -Descending | + Select-Object -First 1 +} + +function Test-LogAlive { + <# + .SYNOPSIS + Returns $true if the latest log file was written to within $StaleSec seconds. + This is a secondary liveness signal: if the log keeps growing, the CLI is + still producing output even if the heartbeat thread is lagging. + #> + param( + [Parameter(Mandatory)] + [string]$TaskDir, + + [int]$StaleSec = 90 + ) + $latest = Get-LatestLogFile -TaskDir $TaskDir + if (-not $latest) { return $false } + return ((Get-Date) - $latest.LastWriteTime).TotalSeconds -lt $StaleSec +} + +function Get-TaskLogSummary { + <# + .SYNOPSIS + Return a summary of log files in a task folder: count, latest path, + latest size, and whether the latest log is still being written to. + #> + param( + [Parameter(Mandatory)] + [string]$TaskDir, + + [int]$StaleSec = 90 + ) + $logs = Get-ChildItem -Path $TaskDir -Filter '*.log' -File -ErrorAction SilentlyContinue + $latest = $logs | Sort-Object LastWriteTime -Descending | Select-Object -First 1 + + return [PSCustomObject]@{ + LogCount = ($logs | Measure-Object).Count + LatestLog = if ($latest) { $latest.Name } else { $null } + LatestSizeKB = if ($latest) { [math]::Round($latest.Length / 1024, 1) } else { 0 } + LatestWritten = if ($latest) { $latest.LastWriteTime.ToString('o') } else { $null } + LogAlive = if ($latest) { ((Get-Date) - $latest.LastWriteTime).TotalSeconds -lt $StaleSec } else { $false } + } +} + +# ── Signal files ──────────────────────────────────────────────────────────── + +function Set-TaskFailed { + param( + [string]$TaskDir, + [string]$ErrorText + ) + $ErrorText | Set-Content (Join-Path $TaskDir '.failed') -Force + Remove-Item (Join-Path $TaskDir '.heartbeat') -ErrorAction SilentlyContinue +} + +function Set-TaskTimeout { + param([string]$TaskDir) + "Timeout at $(Get-Date -Format 'o')" | Set-Content (Join-Path $TaskDir '.timeout') -Force + Remove-Item (Join-Path $TaskDir '.heartbeat') -ErrorAction SilentlyContinue +} + +function Clear-TaskSignals { + <# + .SYNOPSIS + Remove all signal files so the task can be retried. + #> + param([string]$TaskDir) + @('.started', '.heartbeat', '.completed', '.failed', '.timeout') | ForEach-Object { + Remove-Item (Join-Path $TaskDir $_) -ErrorAction SilentlyContinue + } +} + +# ── Batch execution engine ────────────────────────────────────────────────── + +function Start-TaskBatch { + <# + .SYNOPSIS + Execute a list of tasks in parallel with throttling, timeout, heartbeat + liveness detection, and automatic retry. + + .PARAMETER Tasks + Array of task descriptors. Each must have: + Id — unique string identifying this task (used as subfolder name) + ScriptBlock — the code to run, receives ($TaskDir, $TaskDescriptor) + Label — human-readable label for progress display + + .PARAMETER RunRoot + Root directory for this batch run. Each task gets <RunRoot>/<Id>/. + + .PARAMETER MaxConcurrent + Maximum concurrent jobs. Default: 5. + + .PARAMETER TimeoutMin + Per-task wall-clock timeout in minutes. Default: 10. + + .PARAMETER HeartbeatStaleSec + Seconds without heartbeat before considering a task stuck. Default: 120. + + .PARAMETER MaxRetryCount + How many times to retry a failed/timed-out task. Default: 2. + + .PARAMETER PollIntervalSec + Supervisor polling interval. Default: 5. + + .OUTPUTS + PSCustomObject with Succeeded, Failed, TimedOut arrays. + #> + [CmdletBinding()] + param( + [Parameter(Mandatory)] + [array]$Tasks, + + [Parameter(Mandatory)] + [string]$RunRoot, + + [int]$MaxConcurrent = 5, + + [int]$TimeoutMin = 10, + + [int]$HeartbeatStaleSec = 120, + + [int]$MaxRetryCount = 2, + + [int]$PollIntervalSec = 5 + ) + + # The caller may have set $ErrorActionPreference = 'Stop' (common in robust + # scripts). Inside the batch engine, non-terminating errors from Start-Job + # serialization, Receive-Job, or Remove-Job must NOT crash the supervisor + # loop — they are handled explicitly via job state inspection. + $ErrorActionPreference = 'Continue' + + # Diagnostic trace file — aids troubleshooting when the batch supervisor + # exits unexpectedly (VS Code terminal kills, resource limits, etc.) + $debugLog = Join-Path $RunRoot '_taskrunner-debug.log' + + if (-not (Test-Path $RunRoot)) { + New-Item -ItemType Directory -Path $RunRoot -Force | Out-Null + } + + # Build work queue (skip tasks already completed on disk) + $queue = [System.Collections.Queue]::new() + $alreadyDone = @() + foreach ($t in $Tasks) { + $taskDir = Join-Path $RunRoot $t.Id + if (Test-Path (Join-Path $taskDir '.completed')) { + $alreadyDone += $t.Id + } else { + # Clear any leftover failure/timeout signals so we can retry + if (Test-Path $taskDir) { Clear-TaskSignals -TaskDir $taskDir } + $queue.Enqueue(@{ Task = $t; Attempt = 0 }) + } + } + + if ($alreadyDone.Count -gt 0) { + Write-TROk "Resuming — $($alreadyDone.Count) task(s) already completed on disk" + } + + # Result accumulators + $succeeded = [System.Collections.ArrayList]::new() + $failed = [System.Collections.ArrayList]::new() + $timedOut = [System.Collections.ArrayList]::new() + + $alreadyDone | ForEach-Object { [void]$succeeded.Add($_) } + + $activeJobs = [System.Collections.ArrayList]::new() + $totalTasks = $Tasks.Count + $startTime = Get-Date + + # Retry helper + $retryQueue = [System.Collections.Queue]::new() + + $loopIter = 0 + while ($queue.Count -gt 0 -or $activeJobs.Count -gt 0 -or $retryQueue.Count -gt 0) { + $loopIter++ + "[$(Get-Date -Format 'o')] LOOP iter=$loopIter q=$($queue.Count) active=$($activeJobs.Count) retry=$($retryQueue.Count)" | Out-File $debugLog -Append + + # Move retry items back when main queue empty + if ($queue.Count -eq 0 -and $retryQueue.Count -gt 0 -and $activeJobs.Count -lt $MaxConcurrent) { + $ri = $retryQueue.Dequeue() + Write-TRWarn "Retrying $($ri.Task.Id) (attempt $($ri.Attempt + 1)/$($MaxRetryCount + 1))" + Start-Sleep -Seconds 5 + $queue.Enqueue(@{ Task = $ri.Task; Attempt = $ri.Attempt + 1 }) + } + + # Launch new jobs up to throttle + while ($activeJobs.Count -lt $MaxConcurrent -and $queue.Count -gt 0) { + $item = $queue.Dequeue() + $t = $item.Task + $attempt = $item.Attempt + $taskDir = Join-Path $RunRoot $t.Id + + $label = if ($t.Label) { $t.Label } else { $t.Id } + $attemptTag = if ($attempt -gt 0) { " (retry $attempt)" } else { '' } + Write-TRInfo "Starting: $label$attemptTag" + + # Generate a unique log file name for this attempt + $logTs = (Get-Date).ToString('yyyyMMdd-HHmmss') + if (-not (Test-Path $taskDir)) { New-Item -ItemType Directory -Path $taskDir -Force | Out-Null } + $logFile = Join-Path $taskDir "run-$logTs.log" + + $job = Start-Job -Name "TR-$($t.Id)" -ScriptBlock { + param($TaskDir, $TaskDescriptor, $ScriptBlockText, $LogFile) + + # Recreate the script block inside the job + $sb = [scriptblock]::Create($ScriptBlockText) + + # Mark started + first heartbeat + if (-not (Test-Path $TaskDir)) { + New-Item -ItemType Directory -Path $TaskDir -Force | Out-Null + } + (Get-Date).ToString('o') | Set-Content (Join-Path $TaskDir '.started') -Force + $hbPath = Join-Path $TaskDir '.heartbeat' + (Get-Date).ToString('o') | Set-Content $hbPath -Force + + # Start a background runspace that keeps the heartbeat alive + # every 30s. This is essential for tasks that invoke long- + # running external processes which produce no stdout (e.g. + # copilot.exe reviews). + $hbPs = [powershell]::Create() + [void]$hbPs.AddScript({ + param($Path, $Sec) + while ($true) { + Start-Sleep -Seconds $Sec + try { (Get-Date).ToString('o') | Set-Content $Path -Force } catch {} + } + }).AddArgument($hbPath).AddArgument(30) + $hbHandle = $hbPs.BeginInvoke() + + # Write log header + "[$(Get-Date -Format 'o')] Task started: $($TaskDescriptor.Id)" | Out-File $LogFile -Encoding utf8 + + try { + # Run the script block, capturing all output streams to log + & $sb $TaskDir $TaskDescriptor 2>&1 | ForEach-Object { + $line = $_.ToString() + $line | Out-File $LogFile -Append -Encoding utf8 + # Also emit to job output so Receive-Job still works + $_ + } + "`n[$(Get-Date -Format 'o')] Task completed successfully" | Out-File $LogFile -Append -Encoding utf8 + '0' | Set-Content (Join-Path $TaskDir '.completed') -Force + } catch { + "`n[$(Get-Date -Format 'o')] Task FAILED: $($_.Exception.Message)" | Out-File $LogFile -Append -Encoding utf8 + $_.Exception.Message | Set-Content (Join-Path $TaskDir '.failed') -Force + throw + } finally { + # Stop the heartbeat runspace and clean up + try { $hbPs.Stop() } catch {} + try { $hbPs.Dispose() } catch {} + Remove-Item (Join-Path $TaskDir '.heartbeat') -ErrorAction SilentlyContinue + } + } -ArgumentList $taskDir, $t, $t.ScriptBlock.ToString(), $logFile + + [void]$activeJobs.Add(@{ + Job = $job + Task = $t + TaskDir = $taskDir + StartTime = Get-Date + Attempt = $attempt + }) + } + + # Poll active jobs + $justFinished = @() + foreach ($aj in $activeJobs) { + $job = $aj.Job + $t = $aj.Task + $taskDir = $aj.TaskDir + $elapsed = (Get-Date) - $aj.StartTime + + if ($job.State -eq 'Completed') { + try { Receive-Job -Job $job -ErrorAction SilentlyContinue | Out-Null } catch {} + Remove-Job -Job $job -Force + Write-TROk "✓ $($t.Id) completed ($([math]::Round($elapsed.TotalSeconds))s)" + [void]$succeeded.Add($t.Id) + $justFinished += $aj + } + elseif ($job.State -eq 'Failed') { + $errMsg = try { $job.ChildJobs[0].JobStateInfo.Reason.Message } catch { 'Unknown error' } + Remove-Job -Job $job -Force + + if ($aj.Attempt -lt $MaxRetryCount) { + Write-TRWarn "⚠ $($t.Id) failed — queueing retry: $errMsg" + $retryQueue.Enqueue(@{ Task = $t; Attempt = $aj.Attempt }) + } else { + Write-TRErr "✗ $($t.Id) failed after $($aj.Attempt + 1) attempt(s): $errMsg" + Set-TaskFailed -TaskDir $taskDir -ErrorText $errMsg + [void]$failed.Add($t.Id) + } + $justFinished += $aj + } + elseif ($elapsed.TotalMinutes -ge $TimeoutMin) { + # Wall-clock timeout + Stop-Job -Job $job -ErrorAction SilentlyContinue + Remove-Job -Job $job -Force + Set-TaskTimeout -TaskDir $taskDir + + if ($aj.Attempt -lt $MaxRetryCount) { + Write-TRWarn "⏱ $($t.Id) timed out — queueing retry" + $retryQueue.Enqueue(@{ Task = $t; Attempt = $aj.Attempt }) + } else { + Write-TRErr "⏱ $($t.Id) timed out after $($aj.Attempt + 1) attempt(s)" + [void]$timedOut.Add($t.Id) + } + $justFinished += $aj + } + elseif ($elapsed.TotalSeconds -gt 30 -and -not (Test-HeartbeatAlive -TaskDir $taskDir -StaleSec $HeartbeatStaleSec)) { + # Heartbeat stale (grace period elapsed) — check if log is still growing + # (CLI may be producing output without the script hitting a Beat call) + if (Test-LogAlive -TaskDir $taskDir -StaleSec $HeartbeatStaleSec) { + # Log is still being written — CLI is alive, skip killing + Write-TRWarn "⚡ $($t.Id) heartbeat stale but log still growing — keeping alive" + } else { + # Both heartbeat and log are stale — truly stuck + $hbFile = Join-Path $taskDir '.heartbeat' + if (Test-Path $hbFile) { + $lastBeat = Get-Content $hbFile -Raw + Write-TRWarn "💀 $($t.Id) heartbeat stale (last: $lastBeat), log also stale" + } else { + Write-TRWarn "💀 $($t.Id) no heartbeat file, log also stale" + } + Stop-Job -Job $job -ErrorAction SilentlyContinue + Remove-Job -Job $job -Force + Set-TaskTimeout -TaskDir $taskDir + + if ($aj.Attempt -lt $MaxRetryCount) { + $retryQueue.Enqueue(@{ Task = $t; Attempt = $aj.Attempt }) + } else { + Write-TRErr "💀 $($t.Id) stuck after $($aj.Attempt + 1) attempt(s)" + [void]$timedOut.Add($t.Id) + } + $justFinished += $aj + } + } + } + + # Remove finished from active list + foreach ($fin in $justFinished) { + $activeJobs.Remove($fin) | Out-Null + } + + # Progress line + $doneCount = $succeeded.Count + $failed.Count + $timedOut.Count + $elapsed = (Get-Date) - $startTime + "[$(Get-Date -Format 'o')] PROGRESS done=$doneCount/$totalTasks active=$($activeJobs.Count) q=$($queue.Count) retry=$($retryQueue.Count) elapsed=$([math]::Round($elapsed.TotalSeconds))s" | Out-File $debugLog -Append + $jobStates = $activeJobs | ForEach-Object { "$($_.Task.Id):$($_.Job.State)" } + "[$(Get-Date -Format 'o')] JOB_STATES $($jobStates -join ', ')" | Out-File $debugLog -Append + if ($activeJobs.Count -gt 0) { + Write-Host "`r [$doneCount/$totalTasks] active: $($activeJobs.Count) | elapsed: $([math]::Round($elapsed.TotalSeconds))s" ` + -ForegroundColor DarkGray -NoNewline + } + + if ($activeJobs.Count -gt 0 -or $queue.Count -gt 0 -or $retryQueue.Count -gt 0) { + "[$(Get-Date -Format 'o')] SLEEPING ${PollIntervalSec}s" | Out-File $debugLog -Append + Start-Sleep -Seconds $PollIntervalSec + } + } + + "[$(Get-Date -Format 'o')] LOOP EXITED iter=$loopIter" | Out-File $debugLog -Append + Write-Host '' # clear progress line + $totalElapsed = (Get-Date) - $startTime + + return [PSCustomObject]@{ + TotalTasks = $totalTasks + Succeeded = @($succeeded) + Failed = @($failed) + TimedOut = @($timedOut) + ElapsedSec = [math]::Round($totalElapsed.TotalSeconds) + } +}