1→#!/usr/bin/env bun 2→/** 3→ * Generate SKILL.md files from .tmpl templates. 4→ * 5→ * Pipeline: 6→ * read .tmpl → find {{PLACEHOLDERS}} → resolve from source → format → write .md 7→ * 8→ * Supports --dry-run: generate to memory, exit 1 if different from committed file. 9→ * Used by skill:check and CI freshness checks. 10→ */ 11→ 12→import { COMMAND_DESCRIPTIONS } from '../browse/src/commands'; 13→import { SNAPSHOT_FLAGS } from '../browse/src/snapshot'; 14→import * as fs from 'fs'; 15→import * as path from 'path'; 16→ 17→const ROOT = path.resolve(import.meta.dir, '..'); 18→const DRY_RUN = process.argv.includes('--dry-run'); 19→ 20→// ─── Placeholder Resolvers ────────────────────────────────── 21→ 22→function generateCommandReference(): string { 23→ // Group commands by category 24→ const groups = new Map>(); 25→ for (const [cmd, meta] of Object.entries(COMMAND_DESCRIPTIONS)) { 26→ const list = groups.get(meta.category) || []; 27→ list.push({ command: cmd, description: meta.description, usage: meta.usage }); 28→ groups.set(meta.category, list); 29→ } 30→ 31→ // Category display order 32→ const categoryOrder = [ 33→ 'Navigation', 'Reading', 'Interaction', 'Inspection', 34→ 'Visual', 'Snapshot', 'Meta', 'Tabs', 'Server', 35→ ]; 36→ 37→ const sections: string[] = []; 38→ for (const category of categoryOrder) { 39→ const commands = groups.get(category); 40→ if (!commands || commands.length === 0) continue; 41→ 42→ // Sort alphabetically within category 43→ commands.sort((a, b) => a.command.localeCompare(b.command)); 44→ 45→ sections.push(`### ${category}`); 46→ sections.push('| Command | Description |'); 47→ sections.push('|---------|-------------|'); 48→ for (const cmd of commands) { 49→ const display = cmd.usage ? `\`${cmd.usage}\`` : `\`${cmd.command}\``; 50→ sections.push(`| ${display} | ${cmd.description} |`); 51→ } 52→ sections.push(''); 53→ } 54→ 55→ return sections.join('\n').trimEnd(); 56→} 57→ 58→function generateSnapshotFlags(): string { 59→ const lines: string[] = [ 60→ 'The snapshot is your primary tool for understanding and interacting with pages.', 61→ '', 62→ '```', 63→ ]; 64→ 65→ for (const flag of SNAPSHOT_FLAGS) { 66→ const label = flag.valueHint ? `${flag.short} ${flag.valueHint}` : flag.short; 67→ lines.push(`${label.padEnd(10)}${flag.long.padEnd(24)}${flag.description}`); 68→ } 69→ 70→ lines.push('```'); 71→ lines.push(''); 72→ lines.push('All flags can be combined freely. `-o` only applies when `-a` is also used.'); 73→ lines.push('Example: `$B snapshot -i -a -C -o /tmp/annotated.png`'); 74→ lines.push(''); 75→ lines.push('**Ref numbering:** @e refs are assigned sequentially (@e1, @e2, ...) in tree order.'); 76→ lines.push('@c refs from `-C` are numbered separately (@c1, @c2, ...).'); 77→ lines.push(''); 78→ lines.push('After snapshot, use @refs as selectors in any command:'); 79→ lines.push('```bash'); 80→ lines.push('$B click @e3 $B fill @e4 "value" $B hover @e1'); 81→ lines.push('$B html @e2 $B css @e5 "color" $B attrs @e6'); 82→ lines.push('$B click @c1 # cursor-interactive ref (from -C)'); 83→ lines.push('```'); 84→ lines.push(''); 85→ lines.push('**Output format:** indented accessibility tree with @ref IDs, one element per line.'); 86→ lines.push('```'); 87→ lines.push(' @e1 [heading] "Welcome" [level=1]'); 88→ lines.push(' @e2 [textbox] "Email"'); 89→ lines.push(' @e3 [button] "Submit"'); 90→ lines.push('```'); 91→ lines.push(''); 92→ lines.push('Refs are invalidated on navigation — run `snapshot` again after `goto`.'); 93→ 94→ return lines.join('\n'); 95→} 96→ 97→function generatePreamble(): string { 98→ return `## Preamble (run first) 99→ 100→\`\`\`bash 101→_UPD=$(~/.claude/skills/gstack/bin/gstack-update-check 2>/dev/null || .claude/skills/gstack/bin/gstack-update-check 2>/dev/null || true) 102→[ -n "$_UPD" ] && echo "$_UPD" || true 103→mkdir -p ~/.gstack/sessions 104→touch ~/.gstack/sessions/"$PPID" 105→_SESSIONS=$(find ~/.gstack/sessions -mmin -120 -type f 2>/dev/null | wc -l | tr -d ' ') 106→find ~/.gstack/sessions -mmin +120 -type f -delete 2>/dev/null || true 107→_CONTRIB=$(~/.claude/skills/gstack/bin/gstack-config get gstack_contributor 2>/dev/null || true) 108→_BRANCH=$(git branch --show-current 2>/dev/null || echo "unknown") 109→echo "BRANCH: $_BRANCH" 110→_LAKE_SEEN=$([ -f ~/.gstack/.completeness-intro-seen ] && echo "yes" || echo "no") 111→echo "LAKE_INTRO: $_LAKE_SEEN" 112→\`\`\` 113→ 114→If output shows \`UPGRADE_AVAILABLE \`: read \`~/.claude/skills/gstack/gstack-upgrade/SKILL.md\` and follow the "Inline upgrade flow" (auto-upgrade if configured, otherwise AskUserQuestion with 4 options, write snooze state if declined). If \`JUST_UPGRADED \`: tell user "Running gstack v{to} (just updated!)" and continue. 115→ 116→If \`LAKE_INTRO\` is \`no\`: Before continuing, introduce the Completeness Principle. 117→Tell the user: "gstack follows the **Boil the Lake** principle — always do the complete 118→thing when AI makes the marginal cost near-zero. Read more: https://garryslist.org/posts/boil-the-ocean" 119→Then offer to open the essay in their default browser: 120→ 121→\`\`\`bash 122→open https://garryslist.org/posts/boil-the-ocean 123→touch ~/.gstack/.completeness-intro-seen 124→\`\`\` 125→ 126→Only run \`open\` if the user says yes. Always run \`touch\` to mark as seen. This only happens once. 127→ 128→## AskUserQuestion Format 129→ 130→**ALWAYS follow this structure for every AskUserQuestion call:** 131→1. **Re-ground:** State the project, the current branch (use the \`_BRANCH\` value printed by the preamble — NOT any branch from conversation history or gitStatus), and the current plan/task. (1-2 sentences) 132→2. **Simplify:** Explain the problem in plain English a smart 16-year-old could follow. No raw function names, no internal jargon, no implementation details. Use concrete examples and analogies. Say what it DOES, not what it's called. 133→3. **Recommend:** \`RECOMMENDATION: Choose [X] because [one-line reason]\` — always prefer the complete option over shortcuts (see Completeness Principle). Include \`Completeness: X/10\` for each option. Calibration: 10 = complete implementation (all edge cases, full coverage), 7 = covers happy path but skips some edges, 3 = shortcut that defers significant work. If both options are 8+, pick the higher; if one is ≤5, flag it. 134→4. **Options:** Lettered options: \`A) ... B) ... C) ...\` — when an option involves effort, show both scales: \`(human: ~X / CC: ~Y)\` 135→ 136→Assume the user hasn't looked at this window in 20 minutes and doesn't have the code open. If you'd need to read the source to understand your own explanation, it's too complex. 137→ 138→Per-skill instructions may add additional formatting rules on top of this baseline. 139→ 140→## Completeness Principle — Boil the Lake 141→ 142→AI-assisted coding makes the marginal cost of completeness near-zero. When you present options: 143→ 144→- If Option A is the complete implementation (full parity, all edge cases, 100% coverage) and Option B is a shortcut that saves modest effort — **always recommend A**. The delta between 80 lines and 150 lines is meaningless with CC+gstack. "Good enough" is the wrong instinct when "complete" costs minutes more. 145→- **Lake vs. ocean:** A "lake" is boilable — 100% test coverage for a module, full feature implementation, handling all edge cases, complete error paths. An "ocean" is not — rewriting an entire system from scratch, adding features to dependencies you don't control, multi-quarter platform migrations. Recommend boiling lakes. Flag oceans as out of scope. 146→- **When estimating effort**, always show both scales: human team time and CC+gstack time. The compression ratio varies by task type — use this reference: 147→ 148→| Task type | Human team | CC+gstack | Compression | 149→|-----------|-----------|-----------|-------------| 150→| Boilerplate / scaffolding | 2 days | 15 min | ~100x | 151→| Test writing | 1 day | 15 min | ~50x | 152→| Feature implementation | 1 week | 30 min | ~30x | 153→| Bug fix + regression test | 4 hours | 15 min | ~20x | 154→| Architecture / design | 2 days | 4 hours | ~5x | 155→| Research / exploration | 1 day | 3 hours | ~3x | 156→ 157→- This principle applies to test coverage, error handling, documentation, edge cases, and feature completeness. Don't skip the last 10% to "save time" — with AI, that 10% costs seconds. 158→ 159→**Anti-patterns — DON'T do this:** 160→- BAD: "Choose B — it covers 90% of the value with less code." (If A is only 70 lines more, choose A.) 161→- BAD: "We can skip edge case handling to save time." (Edge case handling costs minutes with CC.) 162→- BAD: "Let's defer test coverage to a follow-up PR." (Tests are the cheapest lake to boil.) 163→- BAD: Quoting only human-team effort: "This would take 2 weeks." (Say: "2 weeks human / ~1 hour CC.") 164→ 165→## Contributor Mode 166→ 167→If \`_CONTRIB\` is \`true\`: you are in **contributor mode**. You're a gstack user who also helps make it better. 168→ 169→**At the end of each major workflow step** (not after every single command), reflect on the gstack tooling you used. Rate your experience 0 to 10. If it wasn't a 10, think about why. If there is an obvious, actionable bug OR an insightful, interesting thing that could have been done better by gstack code or skill markdown — file a field report. Maybe our contributor will help make us better! 170→ 171→**Calibration — this is the bar:** For example, \`$B js "await fetch(...)"\` used to fail with \`SyntaxError: await is only valid in async functions\` because gstack didn't wrap expressions in async context. Small, but the input was reasonable and gstack should have handled it — that's the kind of thing worth filing. Things less consequential than this, ignore. 172→ 173→**NOT worth filing:** user's app bugs, network errors to user's URL, auth failures on user's site, user's own JS logic bugs. 174→ 175→**To file:** write \`~/.gstack/contributor-logs/{slug}.md\` with **all sections below** (do not truncate — include every section through the Date/Version footer): 176→ 177→\`\`\` 178→# {Title} 179→ 180→Hey gstack team — ran into this while using /{skill-name}: 181→ 182→**What I was trying to do:** {what the user/agent was attempting} 183→**What happened instead:** {what actually happened} 184→**My rating:** {0-10} — {one sentence on why it wasn't a 10} 185→ 186→## Steps to reproduce 187→1. {step} 188→ 189→## Raw output 190→\`\`\` 191→{paste the actual error or unexpected output here} 192→\`\`\` 193→ 194→## What would make this a 10 195→{one sentence: what gstack should have done differently} 196→ 197→**Date:** {YYYY-MM-DD} | **Version:** {gstack version} | **Skill:** /{skill} 198→\`\`\` 199→ 200→Slug: lowercase, hyphens, max 60 chars (e.g. \`browse-js-no-await\`). Skip if file already exists. Max 3 reports per session. File inline and continue — don't stop the workflow. Tell user: "Filed gstack field report: {title}"`; 201→} 202→ 203→function generateBrowseSetup(): string { 204→ return `## SETUP (run this check BEFORE any browse command) 205→ 206→\`\`\`bash 207→_ROOT=$(git rev-parse --show-toplevel 2>/dev/null) 208→B="" 209→[ -n "$_ROOT" ] && [ -x "$_ROOT/.claude/skills/gstack/browse/dist/browse" ] && B="$_ROOT/.claude/skills/gstack/browse/dist/browse" 210→[ -z "$B" ] && B=~/.claude/skills/gstack/browse/dist/browse 211→if [ -x "$B" ]; then 212→ echo "READY: $B" 213→else 214→ echo "NEEDS_SETUP" 215→fi 216→\`\`\` 217→ 218→If \`NEEDS_SETUP\`: 219→1. Tell the user: "gstack browse needs a one-time build (~10 seconds). OK to proceed?" Then STOP and wait. 220→2. Run: \`cd && ./setup\` 221→3. If \`bun\` is not installed: \`curl -fsSL https://bun.sh/install | bash\``; 222→} 223→ 224→function generateBaseBranchDetect(): string { 225→ return `## Step 0: Detect base branch 226→ 227→Determine which branch this PR targets. Use the result as "the base branch" in all subsequent steps. 228→ 229→1. Check if a PR already exists for this branch: 230→ \`gh pr view --json baseRefName -q .baseRefName\` 231→ If this succeeds, use the printed branch name as the base branch. 232→ 233→2. If no PR exists (command fails), detect the repo's default branch: 234→ \`gh repo view --json defaultBranchRef -q .defaultBranchRef.name\` 235→ 236→3. If both commands fail, fall back to \`main\`. 237→ 238→Print the detected base branch name. In every subsequent \`git diff\`, \`git log\`, 239→\`git fetch\`, \`git merge\`, and \`gh pr create\` command, substitute the detected 240→branch name wherever the instructions say "the base branch." 241→ 242→---`; 243→} 244→ 245→function generateQAMethodology(): string { 246→ return `## Modes 247→ 248→### Diff-aware (automatic when on a feature branch with no URL) 249→ 250→This is the **primary mode** for developers verifying their work. When the user says \`/qa\` without a URL and the repo is on a feature branch, automatically: 251→ 252→1. **Analyze the branch diff** to understand what changed: 253→ \`\`\`bash 254→ git diff main...HEAD --name-only 255→ git log main..HEAD --oneline 256→ \`\`\` 257→ 258→2. **Identify affected pages/routes** from the changed files: 259→ - Controller/route files → which URL paths they serve 260→ - View/template/component files → which pages render them 261→ - Model/service files → which pages use those models (check controllers that reference them) 262→ - CSS/style files → which pages include those stylesheets 263→ - API endpoints → test them directly with \`$B js "await fetch('/api/...')"\` 264→ - Static pages (markdown, HTML) → navigate to them directly 265→ 266→3. **Detect the running app** — check common local dev ports: 267→ \`\`\`bash 268→ $B goto http://localhost:3000 2>/dev/null && echo "Found app on :3000" || \\ 269→ $B goto http://localhost:4000 2>/dev/null && echo "Found app on :4000" || \\ 270→ $B goto http://localhost:8080 2>/dev/null && echo "Found app on :8080" 271→ \`\`\` 272→ If no local app is found, check for a staging/preview URL in the PR or environment. If nothing works, ask the user for the URL. 273→ 274→4. **Test each affected page/route:** 275→ - Navigate to the page 276→ - Take a screenshot 277→ - Check console for errors 278→ - If the change was interactive (forms, buttons, flows), test the interaction end-to-end 279→ - Use \`snapshot -D\` before and after actions to verify the change had the expected effect 280→ 281→5. **Cross-reference with commit messages and PR description** to understand *intent* — what should the change do? Verify it actually does that. 282→ 283→6. **Check TODOS.md** (if it exists) for known bugs or issues related to the changed files. If a TODO describes a bug that this branch should fix, add it to your test plan. If you find a new bug during QA that isn't in TODOS.md, note it in the report. 284→ 285→7. **Report findings** scoped to the branch changes: 286→ - "Changes tested: N pages/routes affected by this branch" 287→ - For each: does it work? Screenshot evidence. 288→ - Any regressions on adjacent pages? 289→ 290→**If the user provides a URL with diff-aware mode:** Use that URL as the base but still scope testing to the changed files. 291→ 292→### Full (default when URL is provided) 293→Systematic exploration. Visit every reachable page. Document 5-10 well-evidenced issues. Produce health score. Takes 5-15 minutes depending on app size. 294→ 295→### Quick (\`--quick\`) 296→30-second smoke test. Visit homepage + top 5 navigation targets. Check: page loads? Console errors? Broken links? Produce health score. No detailed issue documentation. 297→ 298→### Regression (\`--regression \`) 299→Run full mode, then load \`baseline.json\` from a previous run. Diff: which issues are fixed? Which are new? What's the score delta? Append regression section to report. 300→ 301→--- 302→ 303→## Workflow 304→ 305→### Phase 1: Initialize 306→ 307→1. Find browse binary (see Setup above) 308→2. Create output directories 309→3. Copy report template from \`qa/templates/qa-report-template.md\` to output dir 310→4. Start timer for duration tracking 311→ 312→### Phase 2: Authenticate (if needed) 313→ 314→**If the user specified auth credentials:** 315→ 316→\`\`\`bash 317→$B goto 318→$B snapshot -i # find the login form 319→$B fill @e3 "user@example.com" 320→$B fill @e4 "[REDACTED]" # NEVER include real passwords in report 321→$B click @e5 # submit 322→$B snapshot -D # verify login succeeded 323→\`\`\` 324→ 325→**If the user provided a cookie file:** 326→ 327→\`\`\`bash 328→$B cookie-import cookies.json 329→$B goto 330→\`\`\` 331→ 332→**If 2FA/OTP is required:** Ask the user for the code and wait. 333→ 334→**If CAPTCHA blocks you:** Tell the user: "Please complete the CAPTCHA in the browser, then tell me to continue." 335→ 336→### Phase 3: Orient 337→ 338→Get a map of the application: 339→ 340→\`\`\`bash 341→$B goto 342→$B snapshot -i -a -o "$REPORT_DIR/screenshots/initial.png" 343→$B links # map navigation structure 344→$B console --errors # any errors on landing? 345→\`\`\` 346→ 347→**Detect framework** (note in report metadata): 348→- \`__next\` in HTML or \`_next/data\` requests → Next.js 349→- \`csrf-token\` meta tag → Rails 350→- \`wp-content\` in URLs → WordPress 351→- Client-side routing with no page reloads → SPA 352→ 353→**For SPAs:** The \`links\` command may return few results because navigation is client-side. Use \`snapshot -i\` to find nav elements (buttons, menu items) instead. 354→ 355→### Phase 4: Explore 356→ 357→Visit pages systematically. At each page: 358→ 359→\`\`\`bash 360→$B goto 361→$B snapshot -i -a -o "$REPORT_DIR/screenshots/page-name.png" 362→$B console --errors 363→\`\`\` 364→ 365→Then follow the **per-page exploration checklist** (see \`qa/references/issue-taxonomy.md\`): 366→ 367→1. **Visual scan** — Look at the annotated screenshot for layout issues 368→2. **Interactive elements** — Click buttons, links, controls. Do they work? 369→3. **Forms** — Fill and submit. Test empty, invalid, edge cases 370→4. **Navigation** — Check all paths in and out 371→5. **States** — Empty state, loading, error, overflow 372→6. **Console** — Any new JS errors after interactions? 373→7. **Responsiveness** — Check mobile viewport if relevant: 374→ \`\`\`bash 375→ $B viewport 375x812 376→ $B screenshot "$REPORT_DIR/screenshots/page-mobile.png" 377→ $B viewport 1280x720 378→ \`\`\` 379→ 380→**Depth judgment:** Spend more time on core features (homepage, dashboard, checkout, search) and less on secondary pages (about, terms, privacy). 381→ 382→**Quick mode:** Only visit homepage + top 5 navigation targets from the Orient phase. Skip the per-page checklist — just check: loads? Console errors? Broken links visible? 383→ 384→### Phase 5: Document 385→ 386→Document each issue **immediately when found** — don't batch them. 387→ 388→**Two evidence tiers:** 389→ 390→**Interactive bugs** (broken flows, dead buttons, form failures): 391→1. Take a screenshot before the action 392→2. Perform the action 393→3. Take a screenshot showing the result 394→4. Use \`snapshot -D\` to show what changed 395→5. Write repro steps referencing screenshots 396→ 397→\`\`\`bash 398→$B screenshot "$REPORT_DIR/screenshots/issue-001-step-1.png" 399→$B click @e5 400→$B screenshot "$REPORT_DIR/screenshots/issue-001-result.png" 401→$B snapshot -D 402→\`\`\` 403→ 404→**Static bugs** (typos, layout issues, missing images): 405→1. Take a single annotated screenshot showing the problem 406→2. Describe what's wrong 407→ 408→\`\`\`bash 409→$B snapshot -i -a -o "$REPORT_DIR/screenshots/issue-002.png" 410→\`\`\` 411→ 412→**Write each issue to the report immediately** using the template format from \`qa/templates/qa-report-template.md\`. 413→ 414→### Phase 6: Wrap Up 415→ 416→1. **Compute health score** using the rubric below 417→2. **Write "Top 3 Things to Fix"** — the 3 highest-severity issues 418→3. **Write console health summary** — aggregate all console errors seen across pages 419→4. **Update severity counts** in the summary table 420→5. **Fill in report metadata** — date, duration, pages visited, screenshot count, framework 421→6. **Save baseline** — write \`baseline.json\` with: 422→ \`\`\`json 423→ { 424→ "date": "YYYY-MM-DD", 425→ "url": "", 426→ "healthScore": N, 427→ "issues": [{ "id": "ISSUE-001", "title": "...", "severity": "...", "category": "..." }], 428→ "categoryScores": { "console": N, "links": N, ... } 429→ } 430→ \`\`\` 431→ 432→**Regression mode:** After writing the report, load the baseline file. Compare: 433→- Health score delta 434→- Issues fixed (in baseline but not current) 435→- New issues (in current but not baseline) 436→- Append the regression section to the report 437→ 438→--- 439→ 440→## Health Score Rubric 441→ 442→Compute each category score (0-100), then take the weighted average. 443→ 444→### Console (weight: 15%) 445→- 0 errors → 100 446→- 1-3 errors → 70 447→- 4-10 errors → 40 448→- 10+ errors → 10 449→ 450→### Links (weight: 10%) 451→- 0 broken → 100 452→- Each broken link → -15 (minimum 0) 453→ 454→### Per-Category Scoring (Visual, Functional, UX, Content, Performance, Accessibility) 455→Each category starts at 100. Deduct per finding: 456→- Critical issue → -25 457→- High issue → -15 458→- Medium issue → -8 459→- Low issue → -3 460→Minimum 0 per category. 461→ 462→### Weights 463→| Category | Weight | 464→|----------|--------| 465→| Console | 15% | 466→| Links | 10% | 467→| Visual | 10% | 468→| Functional | 20% | 469→| UX | 15% | 470→| Performance | 10% | 471→| Content | 5% | 472→| Accessibility | 15% | 473→ 474→### Final Score 475→\`score = Σ (category_score × weight)\` 476→ 477→--- 478→ 479→## Framework-Specific Guidance 480→ 481→### Next.js 482→- Check console for hydration errors (\`Hydration failed\`, \`Text content did not match\`) 483→- Monitor \`_next/data\` requests in network — 404s indicate broken data fetching 484→- Test client-side navigation (click links, don't just \`goto\`) — catches routing issues 485→- Check for CLS (Cumulative Layout Shift) on pages with dynamic content 486→ 487→### Rails 488→- Check for N+1 query warnings in console (if development mode) 489→- Verify CSRF token presence in forms 490→- Test Turbo/Stimulus integration — do page transitions work smoothly? 491→- Check for flash messages appearing and dismissing correctly 492→ 493→### WordPress 494→- Check for plugin conflicts (JS errors from different plugins) 495→- Verify admin bar visibility for logged-in users 496→- Test REST API endpoints (\`/wp-json/\`) 497→- Check for mixed content warnings (common with WP) 498→ 499→### General SPA (React, Vue, Angular) 500→- Use \`snapshot -i\` for navigation — \`links\` command misses client-side routes 501→- Check for stale state (navigate away and back — does data refresh?) 502→- Test browser back/forward — does the app handle history correctly? 503→- Check for memory leaks (monitor console after extended use) 504→ 505→--- 506→ 507→## Important Rules 508→ 509→1. **Repro is everything.** Every issue needs at least one screenshot. No exceptions. 510→2. **Verify before documenting.** Retry the issue once to confirm it's reproducible, not a fluke. 511→3. **Never include credentials.** Write \`[REDACTED]\` for passwords in repro steps. 512→4. **Write incrementally.** Append each issue to the report as you find it. Don't batch. 513→5. **Never read source code.** Test as a user, not a developer. 514→6. **Check console after every interaction.** JS errors that don't surface visually are still bugs. 515→7. **Test like a user.** Use realistic data. Walk through complete workflows end-to-end. 516→8. **Depth over breadth.** 5-10 well-documented issues with evidence > 20 vague descriptions. 517→9. **Never delete output files.** Screenshots and reports accumulate — that's intentional. 518→10. **Use \`snapshot -C\` for tricky UIs.** Finds clickable divs that the accessibility tree misses. 519→11. **Show screenshots to the user.** After every \`$B screenshot\`, \`$B snapshot -a -o\`, or \`$B responsive\` command, use the Read tool on the output file(s) so the user can see them inline. For \`responsive\` (3 files), Read all three. This is critical — without it, screenshots are invisible to the user.`; 520→} 521→ 522→function generateDesignReviewLite(): string { 523→ return `## Design Review (conditional, diff-scoped) 524→ 525→Check if the diff touches frontend files using \`gstack-diff-scope\`: 526→ 527→\`\`\`bash 528→eval $(~/.claude/skills/gstack/bin/gstack-diff-scope 2>/dev/null) 529→\`\`\` 530→ 531→**If \`SCOPE_FRONTEND=false\`:** Skip design review silently. No output. 532→ 533→**If \`SCOPE_FRONTEND=true\`:** 534→ 535→1. **Check for DESIGN.md.** If \`DESIGN.md\` or \`design-system.md\` exists in the repo root, read it. All design findings are calibrated against it — patterns blessed in DESIGN.md are not flagged. If not found, use universal design principles. 536→ 537→2. **Read \`.claude/skills/review/design-checklist.md\`.** If the file cannot be read, skip design review with a note: "Design checklist not found — skipping design review." 538→ 539→3. **Read each changed frontend file** (full file, not just diff hunks). Frontend files are identified by the patterns listed in the checklist. 540→ 541→4. **Apply the design checklist** against the changed files. For each item: 542→ - **[HIGH] mechanical CSS fix** (\`outline: none\`, \`!important\`, \`font-size < 16px\`): classify as AUTO-FIX 543→ - **[HIGH/MEDIUM] design judgment needed**: classify as ASK 544→ - **[LOW] intent-based detection**: present as "Possible — verify visually or run /design-review" 545→ 546→5. **Include findings** in the review output under a "Design Review" header, following the output format in the checklist. Design findings merge with code review findings into the same Fix-First flow. 547→ 548→6. **Log the result** for the Review Readiness Dashboard: 549→ 550→\`\`\`bash 551→eval $(~/.claude/skills/gstack/bin/gstack-slug 2>/dev/null) 552→mkdir -p ~/.gstack/projects/$SLUG 553→echo '{"skill":"design-review-lite","timestamp":"TIMESTAMP","status":"STATUS","findings":N,"auto_fixed":M}' >> ~/.gstack/projects/$SLUG/$BRANCH-reviews.jsonl 554→\`\`\` 555→ 556→Substitute: TIMESTAMP = ISO 8601 datetime, STATUS = "clean" if 0 findings or "issues_found", N = total findings, M = auto-fixed count.`; 557→} 558→ 559→// NOTE: design-checklist.md is a subset of this methodology for code-level detection. 560→// When adding items here, also update review/design-checklist.md, and vice versa. 561→function generateDesignMethodology(): string { 562→ return `## Modes 563→ 564→### Full (default) 565→Systematic review of all pages reachable from homepage. Visit 5-8 pages. Full checklist evaluation, responsive screenshots, interaction flow testing. Produces complete design audit report with letter grades. 566→ 567→### Quick (\`--quick\`) 568→Homepage + 2 key pages only. First Impression + Design System Extraction + abbreviated checklist. Fastest path to a design score. 569→ 570→### Deep (\`--deep\`) 571→Comprehensive review: 10-15 pages, every interaction flow, exhaustive checklist. For pre-launch audits or major redesigns. 572→ 573→### Diff-aware (automatic when on a feature branch with no URL) 574→When on a feature branch, scope to pages affected by the branch changes: 575→1. Analyze the branch diff: \`git diff main...HEAD --name-only\` 576→2. Map changed files to affected pages/routes 577→3. Detect running app on common local ports (3000, 4000, 8080) 578→4. Audit only affected pages, compare design quality before/after 579→ 580→### Regression (\`--regression\` or previous \`design-baseline.json\` found) 581→Run full audit, then load previous \`design-baseline.json\`. Compare: per-category grade deltas, new findings, resolved findings. Output regression table in report. 582→ 583→--- 584→ 585→## Phase 1: First Impression 586→ 587→The most uniquely designer-like output. Form a gut reaction before analyzing anything. 588→ 589→1. Navigate to the target URL 590→2. Take a full-page desktop screenshot: \`$B screenshot "$REPORT_DIR/screenshots/first-impression.png"\` 591→3. Write the **First Impression** using this structured critique format: 592→ - "The site communicates **[what]**." (what it says at a glance — competence? playfulness? confusion?) 593→ - "I notice **[observation]**." (what stands out, positive or negative — be specific) 594→ - "The first 3 things my eye goes to are: **[1]**, **[2]**, **[3]**." (hierarchy check — are these intentional?) 595→ - "If I had to describe this in one word: **[word]**." (gut verdict) 596→ 597→This is the section users read first. Be opinionated. A designer doesn't hedge — they react. 598→ 599→--- 600→ 601→## Phase 2: Design System Extraction 602→ 603→Extract the actual design system the site uses (not what a DESIGN.md says, but what's rendered): 604→ 605→\`\`\`bash 606→# Fonts in use (capped at 500 elements to avoid timeout) 607→$B js "JSON.stringify([...new Set([...document.querySelectorAll('*')].slice(0,500).map(e => getComputedStyle(e).fontFamily))])" 608→ 609→# Color palette in use 610→$B js "JSON.stringify([...new Set([...document.querySelectorAll('*')].slice(0,500).flatMap(e => [getComputedStyle(e).color, getComputedStyle(e).backgroundColor]).filter(c => c !== 'rgba(0, 0, 0, 0)'))])" 611→ 612→# Heading hierarchy 613→$B js "JSON.stringify([...document.querySelectorAll('h1,h2,h3,h4,h5,h6')].map(h => ({tag:h.tagName, text:h.textContent.trim().slice(0,50), size:getComputedStyle(h).fontSize, weight:getComputedStyle(h).fontWeight})))" 614→ 615→# Touch target audit (find undersized interactive elements) 616→$B js "JSON.stringify([...document.querySelectorAll('a,button,input,[role=button]')].filter(e => {const r=e.getBoundingClientRect(); return r.width>0 && (r.width<44||r.height<44)}).map(e => ({tag:e.tagName, text:(e.textContent||'').trim().slice(0,30), w:Math.round(e.getBoundingClientRect().width), h:Math.round(e.getBoundingClientRect().height)})).slice(0,20))" 617→ 618→# Performance baseline 619→$B perf 620→\`\`\` 621→ 622→Structure findings as an **Inferred Design System**: 623→- **Fonts:** list with usage counts. Flag if >3 distinct font families. 624→- **Colors:** palette extracted. Flag if >12 unique non-gray colors. Note warm/cool/mixed. 625→- **Heading Scale:** h1-h6 sizes. Flag skipped levels, non-systematic size jumps. 626→- **Spacing Patterns:** sample padding/margin values. Flag non-scale values. 627→ 628→After extraction, offer: *"Want me to save this as your DESIGN.md? I can lock in these observations as your project's design system baseline."* 629→ 630→--- 631→ 632→## Phase 3: Page-by-Page Visual Audit 633→ 634→For each page in scope: 635→ 636→\`\`\`bash 637→$B goto 638→$B snapshot -i -a -o "$REPORT_DIR/screenshots/{page}-annotated.png" 639→$B responsive "$REPORT_DIR/screenshots/{page}" 640→$B console --errors 641→$B perf 642→\`\`\` 643→ 644→### Auth Detection 645→ 646→After the first navigation, check if the URL changed to a login-like path: 647→\`\`\`bash 648→$B url 649→\`\`\` 650→If URL contains \`/login\`, \`/signin\`, \`/auth\`, or \`/sso\`: the site requires authentication. AskUserQuestion: "This site requires authentication. Want to import cookies from your browser? Run \`/setup-browser-cookies\` first if needed." 651→ 652→### Design Audit Checklist (10 categories, ~80 items) 653→ 654→Apply these at each page. Each finding gets an impact rating (high/medium/polish) and category. 655→ 656→**1. Visual Hierarchy & Composition** (8 items) 657→- Clear focal point? One primary CTA per view? 658→- Eye flows naturally top-left to bottom-right? 659→- Visual noise — competing elements fighting for attention? 660→- Information density appropriate for content type? 661→- Z-index clarity — nothing unexpectedly overlapping? 662→- Above-the-fold content communicates purpose in 3 seconds? 663→- Squint test: hierarchy still visible when blurred? 664→- White space is intentional, not leftover? 665→ 666→**2. Typography** (15 items) 667→- Font count <=3 (flag if more) 668→- Scale follows ratio (1.25 major third or 1.333 perfect fourth) 669→- Line-height: 1.5x body, 1.15-1.25x headings 670→- Measure: 45-75 chars per line (66 ideal) 671→- Heading hierarchy: no skipped levels (h1→h3 without h2) 672→- Weight contrast: >=2 weights used for hierarchy 673→- No blacklisted fonts (Papyrus, Comic Sans, Lobster, Impact, Jokerman) 674→- If primary font is Inter/Roboto/Open Sans/Poppins → flag as potentially generic 675→- \`text-wrap: balance\` or \`text-pretty\` on headings (check via \`$B css text-wrap\`) 676→- Curly quotes used, not straight quotes 677→- Ellipsis character (\`…\`) not three dots (\`...\`) 678→- \`font-variant-numeric: tabular-nums\` on number columns 679→- Body text >= 16px 680→- Caption/label >= 12px 681→- No letterspacing on lowercase text 682→ 683→**3. Color & Contrast** (10 items) 684→- Palette coherent (<=12 unique non-gray colors) 685→- WCAG AA: body text 4.5:1, large text (18px+) 3:1, UI components 3:1 686→- Semantic colors consistent (success=green, error=red, warning=yellow/amber) 687→- No color-only encoding (always add labels, icons, or patterns) 688→- Dark mode: surfaces use elevation, not just lightness inversion 689→- Dark mode: text off-white (~#E0E0E0), not pure white 690→- Primary accent desaturated 10-20% in dark mode 691→- \`color-scheme: dark\` on html element (if dark mode present) 692→- No red/green only combinations (8% of men have red-green deficiency) 693→- Neutral palette is warm or cool consistently — not mixed 694→ 695→**4. Spacing & Layout** (12 items) 696→- Grid consistent at all breakpoints 697→- Spacing uses a scale (4px or 8px base), not arbitrary values 698→- Alignment is consistent — nothing floats outside the grid 699→- Rhythm: related items closer together, distinct sections further apart 700→- Border-radius hierarchy (not uniform bubbly radius on everything) 701→- Inner radius = outer radius - gap (nested elements) 702→- No horizontal scroll on mobile 703→- Max content width set (no full-bleed body text) 704→- \`env(safe-area-inset-*)\` for notch devices 705→- URL reflects state (filters, tabs, pagination in query params) 706→- Flex/grid used for layout (not JS measurement) 707→- Breakpoints: mobile (375), tablet (768), desktop (1024), wide (1440) 708→ 709→**5. Interaction States** (10 items) 710→- Hover state on all interactive elements 711→- \`focus-visible\` ring present (never \`outline: none\` without replacement) 712→- Active/pressed state with depth effect or color shift 713→- Disabled state: reduced opacity + \`cursor: not-allowed\` 714→- Loading: skeleton shapes match real content layout 715→- Empty states: warm message + primary action + visual (not just "No items.") 716→- Error messages: specific + include fix/next step 717→- Success: confirmation animation or color, auto-dismiss 718→- Touch targets >= 44px on all interactive elements 719→- \`cursor: pointer\` on all clickable elements 720→ 721→**6. Responsive Design** (8 items) 722→- Mobile layout makes *design* sense (not just stacked desktop columns) 723→- Touch targets sufficient on mobile (>= 44px) 724→- No horizontal scroll on any viewport 725→- Images handle responsive (srcset, sizes, or CSS containment) 726→- Text readable without zooming on mobile (>= 16px body) 727→- Navigation collapses appropriately (hamburger, bottom nav, etc.) 728→- Forms usable on mobile (correct input types, no autoFocus on mobile) 729→- No \`user-scalable=no\` or \`maximum-scale=1\` in viewport meta 730→ 731→**7. Motion & Animation** (6 items) 732→- Easing: ease-out for entering, ease-in for exiting, ease-in-out for moving 733→- Duration: 50-700ms range (nothing slower unless page transition) 734→- Purpose: every animation communicates something (state change, attention, spatial relationship) 735→- \`prefers-reduced-motion\` respected (check: \`$B js "matchMedia('(prefers-reduced-motion: reduce)').matches"\`) 736→- No \`transition: all\` — properties listed explicitly 737→- Only \`transform\` and \`opacity\` animated (not layout properties like width, height, top, left) 738→ 739→**8. Content & Microcopy** (8 items) 740→- Empty states designed with warmth (message + action + illustration/icon) 741→- Error messages specific: what happened + why + what to do next 742→- Button labels specific ("Save API Key" not "Continue" or "Submit") 743→- No placeholder/lorem ipsum text visible in production 744→- Truncation handled (\`text-overflow: ellipsis\`, \`line-clamp\`, or \`break-words\`) 745→- Active voice ("Install the CLI" not "The CLI will be installed") 746→- Loading states end with \`…\` ("Saving…" not "Saving...") 747→- Destructive actions have confirmation modal or undo window 748→ 749→**9. AI Slop Detection** (10 anti-patterns — the blacklist) 750→ 751→The test: would a human designer at a respected studio ever ship this? 752→ 753→- Purple/violet/indigo gradient backgrounds or blue-to-purple color schemes 754→- **The 3-column feature grid:** icon-in-colored-circle + bold title + 2-line description, repeated 3x symmetrically. THE most recognizable AI layout. 755→- Icons in colored circles as section decoration (SaaS starter template look) 756→- Centered everything (\`text-align: center\` on all headings, descriptions, cards) 757→- Uniform bubbly border-radius on every element (same large radius on everything) 758→- Decorative blobs, floating circles, wavy SVG dividers (if a section feels empty, it needs better content, not decoration) 759→- Emoji as design elements (rockets in headings, emoji as bullet points) 760→- Colored left-border on cards (\`border-left: 3px solid \`) 761→- Generic hero copy ("Welcome to [X]", "Unlock the power of...", "Your all-in-one solution for...") 762→- Cookie-cutter section rhythm (hero → 3 features → testimonials → pricing → CTA, every section same height) 763→ 764→**10. Performance as Design** (6 items) 765→- LCP < 2.0s (web apps), < 1.5s (informational sites) 766→- CLS < 0.1 (no visible layout shifts during load) 767→- Skeleton quality: shapes match real content, shimmer animation 768→- Images: \`loading="lazy"\`, width/height dimensions set, WebP/AVIF format 769→- Fonts: \`font-display: swap\`, preconnect to CDN origins 770→- No visible font swap flash (FOUT) — critical fonts preloaded 771→ 772→--- 773→ 774→## Phase 4: Interaction Flow Review 775→ 776→Walk 2-3 key user flows and evaluate the *feel*, not just the function: 777→ 778→\`\`\`bash 779→$B snapshot -i 780→$B click @e3 # perform action 781→$B snapshot -D # diff to see what changed 782→\`\`\` 783→ 784→Evaluate: 785→- **Response feel:** Does clicking feel responsive? Any delays or missing loading states? 786→- **Transition quality:** Are transitions intentional or generic/absent? 787→- **Feedback clarity:** Did the action clearly succeed or fail? Is the feedback immediate? 788→- **Form polish:** Focus states visible? Validation timing correct? Errors near the source? 789→ 790→--- 791→ 792→## Phase 5: Cross-Page Consistency 793→ 794→Compare screenshots and observations across pages for: 795→- Navigation bar consistent across all pages? 796→- Footer consistent? 797→- Component reuse vs one-off designs (same button styled differently on different pages?) 798→- Tone consistency (one page playful while another is corporate?) 799→- Spacing rhythm carries across pages? 800→ 801→--- 802→ 803→## Phase 6: Compile Report 804→ 805→### Output Locations 806→ 807→**Local:** \`.gstack/design-reports/design-audit-{domain}-{YYYY-MM-DD}.md\` 808→ 809→**Project-scoped:** 810→\`\`\`bash 811→eval $(~/.claude/skills/gstack/bin/gstack-slug 2>/dev/null) 812→mkdir -p ~/.gstack/projects/$SLUG 813→\`\`\` 814→Write to: \`~/.gstack/projects/{slug}/{user}-{branch}-design-audit-{datetime}.md\` 815→ 816→**Baseline:** Write \`design-baseline.json\` for regression mode: 817→\`\`\`json 818→{ 819→ "date": "YYYY-MM-DD", 820→ "url": "", 821→ "designScore": "B", 822→ "aiSlopScore": "C", 823→ "categoryGrades": { "hierarchy": "A", "typography": "B", ... }, 824→ "findings": [{ "id": "FINDING-001", "title": "...", "impact": "high", "category": "typography" }] 825→} 826→\`\`\` 827→ 828→### Scoring System 829→ 830→**Dual headline scores:** 831→- **Design Score: {A-F}** — weighted average of all 10 categories 832→- **AI Slop Score: {A-F}** — standalone grade with pithy verdict 833→ 834→**Per-category grades:** 835→- **A:** Intentional, polished, delightful. Shows design thinking. 836→- **B:** Solid fundamentals, minor inconsistencies. Looks professional. 837→- **C:** Functional but generic. No major problems, no design point of view. 838→- **D:** Noticeable problems. Feels unfinished or careless. 839→- **F:** Actively hurting user experience. Needs significant rework. 840→ 841→**Grade computation:** Each category starts at A. Each High-impact finding drops one letter grade. Each Medium-impact finding drops half a letter grade. Polish findings are noted but do not affect grade. Minimum is F. 842→ 843→**Category weights for Design Score:** 844→| Category | Weight | 845→|----------|--------| 846→| Visual Hierarchy | 15% | 847→| Typography | 15% | 848→| Spacing & Layout | 15% | 849→| Color & Contrast | 10% | 850→| Interaction States | 10% | 851→| Responsive | 10% | 852→| Content Quality | 10% | 853→| AI Slop | 5% | 854→| Motion | 5% | 855→| Performance Feel | 5% | 856→ 857→AI Slop is 5% of Design Score but also graded independently as a headline metric. 858→ 859→### Regression Output 860→ 861→When previous \`design-baseline.json\` exists or \`--regression\` flag is used: 862→- Load baseline grades 863→- Compare: per-category deltas, new findings, resolved findings 864→- Append regression table to report 865→ 866→--- 867→ 868→## Design Critique Format 869→ 870→Use structured feedback, not opinions: 871→- "I notice..." — observation (e.g., "I notice the primary CTA competes with the secondary action") 872→- "I wonder..." — question (e.g., "I wonder if users will understand what 'Process' means here") 873→- "What if..." — suggestion (e.g., "What if we moved search to a more prominent position?") 874→- "I think... because..." — reasoned opinion (e.g., "I think the spacing between sections is too uniform because it doesn't create hierarchy") 875→ 876→Tie everything to user goals and product objectives. Always suggest specific improvements alongside problems. 877→ 878→--- 879→ 880→## Important Rules 881→ 882→1. **Think like a designer, not a QA engineer.** You care whether things feel right, look intentional, and respect the user. You do NOT just care whether things "work." 883→2. **Screenshots are evidence.** Every finding needs at least one screenshot. Use annotated screenshots (\`snapshot -a\`) to highlight elements. 884→3. **Be specific and actionable.** "Change X to Y because Z" — not "the spacing feels off." 885→4. **Never read source code.** Evaluate the rendered site, not the implementation. (Exception: offer to write DESIGN.md from extracted observations.) 886→5. **AI Slop detection is your superpower.** Most developers can't evaluate whether their site looks AI-generated. You can. Be direct about it. 887→6. **Quick wins matter.** Always include a "Quick Wins" section — the 3-5 highest-impact fixes that take <30 minutes each. 888→7. **Use \`snapshot -C\` for tricky UIs.** Finds clickable divs that the accessibility tree misses. 889→8. **Responsive is design, not just "not broken."** A stacked desktop layout on mobile is not responsive design — it's lazy. Evaluate whether the mobile layout makes *design* sense. 890→9. **Document incrementally.** Write each finding to the report as you find it. Don't batch. 891→10. **Depth over breadth.** 5-10 well-documented findings with screenshots and specific suggestions > 20 vague observations. 892→11. **Show screenshots to the user.** After every \`$B screenshot\`, \`$B snapshot -a -o\`, or \`$B responsive\` command, use the Read tool on the output file(s) so the user can see them inline. For \`responsive\` (3 files), Read all three. This is critical — without it, screenshots are invisible to the user.`; 893→} 894→ 895→function generateReviewDashboard(): string { 896→ return `## Review Readiness Dashboard 897→ 898→After completing the review, read the review log and config to display the dashboard. 899→ 900→\`\`\`bash 901→eval $(~/.claude/skills/gstack/bin/gstack-slug 2>/dev/null) 902→cat ~/.gstack/projects/$SLUG/$BRANCH-reviews.jsonl 2>/dev/null || echo "NO_REVIEWS" 903→echo "---CONFIG---" 904→~/.claude/skills/gstack/bin/gstack-config get skip_eng_review 2>/dev/null || echo "false" 905→\`\`\` 906→ 907→Parse the output. Find the most recent entry for each skill (plan-ceo-review, plan-eng-review, plan-design-review, design-review-lite). Ignore entries with timestamps older than 7 days. For Design Review, show whichever is more recent between \`plan-design-review\` (full visual audit) and \`design-review-lite\` (code-level check). Append "(FULL)" or "(LITE)" to the status to distinguish. Display: 908→ 909→\`\`\` 910→+====================================================================+ 911→| REVIEW READINESS DASHBOARD | 912→+====================================================================+ 913→| Review | Runs | Last Run | Status | Required | 914→|-----------------|------|---------------------|-----------|----------| 915→| Eng Review | 1 | 2026-03-16 15:00 | CLEAR | YES | 916→| CEO Review | 0 | — | — | no | 917→| Design Review | 0 | — | — | no | 918→+--------------------------------------------------------------------+ 919→| VERDICT: CLEARED — Eng Review passed | 920→+====================================================================+ 921→\`\`\` 922→ 923→**Review tiers:** 924→- **Eng Review (required by default):** The only review that gates shipping. Covers architecture, code quality, tests, performance. Can be disabled globally with \\\`gstack-config set skip_eng_review true\\\` (the "don't bother me" setting). 925→- **CEO Review (optional):** Use your judgment. Recommend it for big product/business changes, new user-facing features, or scope decisions. Skip for bug fixes, refactors, infra, and cleanup. 926→- **Design Review (optional):** Use your judgment. Recommend it for UI/UX changes. Skip for backend-only, infra, or prompt-only changes. 927→ 928→**Verdict logic:** 929→- **CLEARED**: Eng Review has >= 1 entry within 7 days with status "clean" (or \\\`skip_eng_review\\\` is \\\`true\\\`) 930→- **NOT CLEARED**: Eng Review missing, stale (>7 days), or has open issues 931→- CEO and Design reviews are shown for context but never block shipping 932→- If \\\`skip_eng_review\\\` config is \\\`true\\\`, Eng Review shows "SKIPPED (global)" and verdict is CLEARED`; 933→} 934→ 935→function generateTestBootstrap(): string { 936→ return `## Test Framework Bootstrap 937→ 938→**Detect existing test framework and project runtime:** 939→ 940→\`\`\`bash 941→# Detect project runtime 942→[ -f Gemfile ] && echo "RUNTIME:ruby" 943→[ -f package.json ] && echo "RUNTIME:node" 944→[ -f requirements.txt ] || [ -f pyproject.toml ] && echo "RUNTIME:python" 945→[ -f go.mod ] && echo "RUNTIME:go" 946→[ -f Cargo.toml ] && echo "RUNTIME:rust" 947→[ -f composer.json ] && echo "RUNTIME:php" 948→[ -f mix.exs ] && echo "RUNTIME:elixir" 949→# Detect sub-frameworks 950→[ -f Gemfile ] && grep -q "rails" Gemfile 2>/dev/null && echo "FRAMEWORK:rails" 951→[ -f package.json ] && grep -q '"next"' package.json 2>/dev/null && echo "FRAMEWORK:nextjs" 952→# Check for existing test infrastructure 953→ls jest.config.* vitest.config.* playwright.config.* .rspec pytest.ini pyproject.toml phpunit.xml 2>/dev/null 954→ls -d test/ tests/ spec/ __tests__/ cypress/ e2e/ 2>/dev/null 955→# Check opt-out marker 956→[ -f .gstack/no-test-bootstrap ] && echo "BOOTSTRAP_DECLINED" 957→\`\`\` 958→ 959→**If test framework detected** (config files or test directories found): 960→Print "Test framework detected: {name} ({N} existing tests). Skipping bootstrap." 961→Read 2-3 existing test files to learn conventions (naming, imports, assertion style, setup patterns). 962→Store conventions as prose context for use in Phase 8e.5 or Step 3.4. **Skip the rest of bootstrap.** 963→ 964→**If BOOTSTRAP_DECLINED** appears: Print "Test bootstrap previously declined — skipping." **Skip the rest of bootstrap.** 965→ 966→**If NO runtime detected** (no config files found): Use AskUserQuestion: 967→"I couldn't detect your project's language. What runtime are you using?" 968→Options: A) Node.js/TypeScript B) Ruby/Rails C) Python D) Go E) Rust F) PHP G) Elixir H) This project doesn't need tests. 969→If user picks H → write \`.gstack/no-test-bootstrap\` and continue without tests. 970→ 971→**If runtime detected but no test framework — bootstrap:** 972→ 973→### B2. Research best practices 974→ 975→Use WebSearch to find current best practices for the detected runtime: 976→- \`"[runtime] best test framework 2025 2026"\` 977→- \`"[framework A] vs [framework B] comparison"\` 978→ 979→If WebSearch is unavailable, use this built-in knowledge table: 980→ 981→| Runtime | Primary recommendation | Alternative | 982→|---------|----------------------|-------------| 983→| Ruby/Rails | minitest + fixtures + capybara | rspec + factory_bot + shoulda-matchers | 984→| Node.js | vitest + @testing-library | jest + @testing-library | 985→| Next.js | vitest + @testing-library/react + playwright | jest + cypress | 986→| Python | pytest + pytest-cov | unittest | 987→| Go | stdlib testing + testify | stdlib only | 988→| Rust | cargo test (built-in) + mockall | — | 989→| PHP | phpunit + mockery | pest | 990→| Elixir | ExUnit (built-in) + ex_machina | — | 991→ 992→### B3. Framework selection 993→ 994→Use AskUserQuestion: 995→"I detected this is a [Runtime/Framework] project with no test framework. I researched current best practices. Here are the options: 996→A) [Primary] — [rationale]. Includes: [packages]. Supports: unit, integration, smoke, e2e 997→B) [Alternative] — [rationale]. Includes: [packages] 998→C) Skip — don't set up testing right now 999→RECOMMENDATION: Choose A because [reason based on project context]" 1000→ 1001→If user picks C → write \`.gstack/no-test-bootstrap\`. Tell user: "If you change your mind later, delete \`.gstack/no-test-bootstrap\` and re-run." Continue without tests. 1002→ 1003→If multiple runtimes detected (monorepo) → ask which runtime to set up first, with option to do both sequentially. 1004→ 1005→### B4. Install and configure 1006→ 1007→1. Install the chosen packages (npm/bun/gem/pip/etc.) 1008→2. Create minimal config file 1009→3. Create directory structure (test/, spec/, etc.) 1010→4. Create one example test matching the project's code to verify setup works 1011→ 1012→If package installation fails → debug once. If still failing → revert with \`git checkout -- package.json package-lock.json\` (or equivalent for the runtime). Warn user and continue without tests. 1013→ 1014→### B4.5. First real tests 1015→ 1016→Generate 3-5 real tests for existing code: 1017→ 1018→1. **Find recently changed files:** \`git log --since=30.days --name-only --format="" | sort | uniq -c | sort -rn | head -10\` 1019→2. **Prioritize by risk:** Error handlers > business logic with conditionals > API endpoints > pure functions 1020→3. **For each file:** Write one test that tests real behavior with meaningful assertions. Never \`expect(x).toBeDefined()\` — test what the code DOES. 1021→4. Run each test. Passes → keep. Fails → fix once. Still fails → delete silently. 1022→5. Generate at least 1 test, cap at 5. 1023→ 1024→Never import secrets, API keys, or credentials in test files. Use environment variables or test fixtures. 1025→ 1026→### B5. Verify 1027→ 1028→\`\`\`bash 1029→# Run the full test suite to confirm everything works 1030→{detected test command} 1031→\`\`\` 1032→ 1033→If tests fail → debug once. If still failing → revert all bootstrap changes and warn user. 1034→ 1035→### B5.5. CI/CD pipeline 1036→ 1037→\`\`\`bash 1038→# Check CI provider 1039→ls -d .github/ 2>/dev/null && echo "CI:github" 1040→ls .gitlab-ci.yml .circleci/ bitrise.yml 2>/dev/null 1041→\`\`\` 1042→ 1043→If \`.github/\` exists (or no CI detected — default to GitHub Actions): 1044→Create \`.github/workflows/test.yml\` with: 1045→- \`runs-on: ubuntu-latest\` 1046→- Appropriate setup action for the runtime (setup-node, setup-ruby, setup-python, etc.) 1047→- The same test command verified in B5 1048→- Trigger: push + pull_request 1049→ 1050→If non-GitHub CI detected → skip CI generation with note: "Detected {provider} — CI pipeline generation supports GitHub Actions only. Add test step to your existing pipeline manually." 1051→ 1052→### B6. Create TESTING.md 1053→ 1054→First check: If TESTING.md already exists → read it and update/append rather than overwriting. Never destroy existing content. 1055→ 1056→Write TESTING.md with: 1057→- Philosophy: "100% test coverage is the key to great vibe coding. Tests let you move fast, trust your instincts, and ship with confidence — without them, vibe coding is just yolo coding. With tests, it's a superpower." 1058→- Framework name and version 1059→- How to run tests (the verified command from B5) 1060→- Test layers: Unit tests (what, where, when), Integration tests, Smoke tests, E2E tests 1061→- Conventions: file naming, assertion style, setup/teardown patterns 1062→ 1063→### B7. Update CLAUDE.md 1064→ 1065→First check: If CLAUDE.md already has a \`## Testing\` section → skip. Don't duplicate. 1066→ 1067→Append a \`## Testing\` section: 1068→- Run command and test directory 1069→- Reference to TESTING.md 1070→- Test expectations: 1071→ - 100% test coverage is the goal — tests make vibe coding safe 1072→ - When writing new functions, write a corresponding test 1073→ - When fixing a bug, write a regression test 1074→ - When adding error handling, write a test that triggers the error 1075→ - When adding a conditional (if/else, switch), write tests for BOTH paths 1076→ - Never commit code that makes existing tests fail 1077→ 1078→### B8. Commit 1079→ 1080→\`\`\`bash 1081→git status --porcelain 1082→\`\`\` 1083→ 1084→Only commit if there are changes. Stage all bootstrap files (config, test directory, TESTING.md, CLAUDE.md, .github/workflows/test.yml if created): 1085→\`git commit -m "chore: bootstrap test framework ({framework name})"\` 1086→ 1087→---`; 1088→} 1089→ 1090→const RESOLVERS: Record string> = { 1091→ COMMAND_REFERENCE: generateCommandReference, 1092→ SNAPSHOT_FLAGS: generateSnapshotFlags, 1093→ PREAMBLE: generatePreamble, 1094→ BROWSE_SETUP: generateBrowseSetup, 1095→ BASE_BRANCH_DETECT: generateBaseBranchDetect, 1096→ QA_METHODOLOGY: generateQAMethodology, 1097→ DESIGN_METHODOLOGY: generateDesignMethodology, 1098→ DESIGN_REVIEW_LITE: generateDesignReviewLite, 1099→ REVIEW_DASHBOARD: generateReviewDashboard, 1100→ TEST_BOOTSTRAP: generateTestBootstrap, 1101→}; 1102→ 1103→// ─── Template Processing ──────────────────────────────────── 1104→ 1105→const GENERATED_HEADER = `\n\n`; 1106→ 1107→function processTemplate(tmplPath: string): { outputPath: string; content: string } { 1108→ const tmplContent = fs.readFileSync(tmplPath, 'utf-8'); 1109→ const relTmplPath = path.relative(ROOT, tmplPath); 1110→ const outputPath = tmplPath.replace(/\.tmpl$/, ''); 1111→ 1112→ // Replace placeholders 1113→ let content = tmplContent.replace(/\{\{(\w+)\}\}/g, (match, name) => { 1114→ const resolver = RESOLVERS[name]; 1115→ if (!resolver) throw new Error(`Unknown placeholder {{${name}}} in ${relTmplPath}`); 1116→ return resolver(); 1117→ }); 1118→ 1119→ // Check for any remaining unresolved placeholders 1120→ const remaining = content.match(/\{\{(\w+)\}\}/g); 1121→ if (remaining) { 1122→ throw new Error(`Unresolved placeholders in ${relTmplPath}: ${remaining.join(', ')}`); 1123→ } 1124→ 1125→ // Prepend generated header (after frontmatter) 1126→ const header = GENERATED_HEADER.replace('{{SOURCE}}', path.basename(tmplPath)); 1127→ const fmEnd = content.indexOf('---', content.indexOf('---') + 3); 1128→ if (fmEnd !== -1) { 1129→ const insertAt = content.indexOf('\n', fmEnd) + 1; 1130→ content = content.slice(0, insertAt) + header + content.slice(insertAt); 1131→ } else { 1132→ content = header + content; 1133→ } 1134→ 1135→ return { outputPath, content }; 1136→} 1137→ 1138→// ─── Main ─────────────────────────────────────────────────── 1139→ 1140→function findTemplates(): string[] { 1141→ const templates: string[] = []; 1142→ const candidates = [ 1143→ path.join(ROOT, 'SKILL.md.tmpl'), 1144→ path.join(ROOT, 'browse', 'SKILL.md.tmpl'), 1145→ path.join(ROOT, 'qa', 'SKILL.md.tmpl'), 1146→ path.join(ROOT, 'qa-only', 'SKILL.md.tmpl'), 1147→ path.join(ROOT, 'setup-browser-cookies', 'SKILL.md.tmpl'), 1148→ path.join(ROOT, 'ship', 'SKILL.md.tmpl'), 1149→ path.join(ROOT, 'review', 'SKILL.md.tmpl'), 1150→ path.join(ROOT, 'plan-ceo-review', 'SKILL.md.tmpl'), 1151→ path.join(ROOT, 'plan-eng-review', 'SKILL.md.tmpl'), 1152→ path.join(ROOT, 'retro', 'SKILL.md.tmpl'), 1153→ path.join(ROOT, 'gstack-upgrade', 'SKILL.md.tmpl'), 1154→ path.join(ROOT, 'plan-design-review', 'SKILL.md.tmpl'), 1155→ path.join(ROOT, 'design-review', 'SKILL.md.tmpl'), 1156→ path.join(ROOT, 'design-consultation', 'SKILL.md.tmpl'), 1157→ path.join(ROOT, 'document-release', 'SKILL.md.tmpl'), 1158→ ]; 1159→ for (const p of candidates) { 1160→ if (fs.existsSync(p)) templates.push(p); 1161→ } 1162→ return templates; 1163→} 1164→ 1165→let hasChanges = false; 1166→ 1167→for (const tmplPath of findTemplates()) { 1168→ const { outputPath, content } = processTemplate(tmplPath); 1169→ const relOutput = path.relative(ROOT, outputPath); 1170→ 1171→ if (DRY_RUN) { 1172→ const existing = fs.existsSync(outputPath) ? fs.readFileSync(outputPath, 'utf-8') : ''; 1173→ if (existing !== content) { 1174→ console.log(`STALE: ${relOutput}`); 1175→ hasChanges = true; 1176→ } else { 1177→ console.log(`FRESH: ${relOutput}`); 1178→ } 1179→ } else { 1180→ fs.writeFileSync(outputPath, content); 1181→ console.log(`GENERATED: ${relOutput}`); 1182→ } 1183→} 1184→ 1185→if (DRY_RUN && hasChanges) { 1186→ console.error('\nGenerated SKILL.md files are stale. Run: bun run gen:skill-docs'); 1187→ process.exit(1); 1188→} 1189→