diff --git a/.github/workflows/pr-validation.yml b/.github/workflows/pr-validation.yml new file mode 100644 index 0000000..4553a41 --- /dev/null +++ b/.github/workflows/pr-validation.yml @@ -0,0 +1,53 @@ +name: "PR Validation" + +on: + pull_request: + branches: [main] + +permissions: + contents: read + +jobs: + structure-check: + name: "Structural Validation" + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Setup Node.js + uses: actions/setup-node@v4 + with: + node-version: '20' + cache: 'npm' + cache-dependency-path: website/package-lock.json + + - name: Install dependencies + working-directory: website + run: npm ci + + - name: Run structural validation + run: node scripts/validate-structure.js + + markdownlint: + name: "Markdown Lint" + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Setup Node.js + uses: actions/setup-node@v4 + with: + node-version: '20' + + - name: Install markdownlint-cli + run: npm install -g markdownlint-cli + + - name: Run markdownlint + run: | + markdownlint '**/*.md' \ + --ignore 'website/node_modules/**' \ + --ignore 'website/build/**' \ + --ignore 'website/docs/**' \ + --config .markdownlint.json diff --git a/.markdownlint.json b/.markdownlint.json new file mode 100644 index 0000000..09a3669 --- /dev/null +++ b/.markdownlint.json @@ -0,0 +1,15 @@ +{ + "default": true, + "MD013": false, + "MD033": false, + "MD041": false, + "MD024": { "siblings_only": true }, + "MD026": false, + "MD036": false, + "MD040": false, + "MD046": false, + "MD004": false, + "MD007": false, + "MD012": false, + "MD060": false +} diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000..2f21d88 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,140 @@ +# Contributing to Git-Ape + +Thank you for your interest in contributing to Git-Ape! This document provides guidelines and instructions for contributing. + +## Code of Conduct + +This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). +For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or +contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. + +## Contribution Model + +- **Skills** are community-contributable via Pull Request. +- **Agents** are maintainer-curated. To propose agent changes, open a Discussion first. + +## Adding a New Skill + +### Directory Structure + +Each skill lives in its own directory under `.github/skills/`: + +``` +.github/skills/ +└── your-skill-name/ + └── SKILL.md +``` + +### Naming Conventions + +- Directory names **must** use kebab-case (e.g., `azure-cost-estimator`, `prereq-check`). +- The `name` field in SKILL.md frontmatter **must** match the directory name exactly. + +### SKILL.md Schema + +Every SKILL.md file must have YAML frontmatter with the following fields: + +```yaml +--- +name: your-skill-name # Required. Must match directory name. +description: "Short description of what this skill does." # Required. +argument-hint: "Usage hint" # Optional. Shown in autocomplete. +user-invocable: true # Optional. Defaults to true. +--- +``` + +### Required Sections + +After the frontmatter, the skill body **must** include these sections: + +- `## When to Use` — Describes the scenarios where this skill should be invoked. +- `## Procedure` — Step-by-step instructions the agent follows when executing the skill. + Equivalent headings (`## Execution Playbook`, `## Command Playbook`) are also accepted. + +### Example + +```markdown +--- +name: my-new-skill +description: "Does something useful for Azure deployments." +user-invocable: true +--- + +# My New Skill + +Brief overview of the skill. + +## When to Use + +- When the user asks for X +- During Y phase of deployment + +## Procedure + +1. Step one +2. Step two +3. Step three +``` + +## Proposing Agent Changes + +Agents are **maintainer-curated** and not open for direct community contribution via PR. + +To propose a change to an agent: + +1. Open a [Discussion](https://github.com/Azure/git-ape/discussions) describing your proposed change. +2. Wait for maintainer feedback and approval. +3. If approved, a maintainer will either implement it or invite you to submit a PR. + +Agent files live in `.github/agents/` and require: + +- YAML frontmatter with `description` field. +- A `## Warning` section (experimental disclaimer). + +## Pull Request Process + +1. **Fork and branch** — Create a feature branch from `main`. +2. **Make your changes** — Follow the directory structure and naming conventions above. +3. **Run validation locally** (optional): + + ```bash + node scripts/validate-structure.js + ``` + +4. **Submit a PR** — Fill in the PR template and describe your changes. +5. **CI checks run automatically** — The PR validation workflow verifies: + - YAML frontmatter has required fields (`name`, `description` for skills; `description` for agents) + - Skill `name` matches its parent directory name + - All skill/agent directories use kebab-case + - Every skill directory contains a `SKILL.md` file + - Skills have `## When to Use` and `## Procedure` sections + - Agents have a `## Warning` disclaimer section + - Cross-references (slash-commands `/skill-name`) map to existing skill directories + - Relative markdown links resolve to real file paths + - Markdown passes linting (markdownlint) +6. **Review** — Maintainers will review your PR and provide feedback. + +## Development Setup + +```bash +# Clone the repository +git clone https://github.com/Azure/git-ape.git +cd git-ape + +# Install website dependencies (needed for validation script) +cd website && npm ci && cd .. + +# Run structural validation +node scripts/validate-structure.js + +# Generate documentation (optional) +node scripts/generate-docs.js +``` + +## Reporting Issues + +Please use [GitHub Issues](https://github.com/Azure/git-ape/issues) to report bugs or request features. + +## License + +By contributing to this project, you agree that your contributions will be licensed under the [MIT License](LICENSE). diff --git a/evals/README.md b/evals/README.md new file mode 100644 index 0000000..22b3d43 --- /dev/null +++ b/evals/README.md @@ -0,0 +1,69 @@ +# Eval Framework + +## Investigation Summary + +We evaluated the following approaches for behavioral testing of Git-Ape agents and skills: + +### Options Considered + +| Approach | Fit | Notes | +|----------|-----|-------| +| [openai/evals](https://github.com/openai/evals) | Partial | Designed for LLM completion evaluation. Supports tool-using agents via Completion Function Protocol. However, it's Python-heavy, tightly coupled to OpenAI models, and requires maintaining eval YAML definitions separate from the Markdown-based agent definitions we use. | +| Custom eval harness (Node.js) | Good | Lightweight, can parse our existing SKILL.md/agent.md files directly. Scenarios defined as JSON. Can mock tool responses and verify skill invocation order. Matches our existing Node.js tooling. | +| Deferred (manual testing) | Baseline | Current state. No automated behavioral testing. | + +### Decision + +**Custom eval harness** — a lightweight Node.js-based eval framework that: + +1. Defines scenarios as JSON files describing user intent and expected behavior. +2. Validates that the correct skills are referenced for a given intent. +3. Verifies skill output format against mock inputs. +4. Can be extended to use LLM-as-judge for free-form output evaluation later. + +**Rationale:** + +- `openai/evals` is model-specific and requires Python infrastructure we don't have. +- Our agents and skills are Markdown-based — a custom harness can parse them directly. +- Starting simple with deterministic checks (skill invocation order, output format) provides immediate value without LLM API costs. +- The harness can later integrate LLM-based evaluation if needed. + +## Scenario Format + +Eval scenarios are defined in `evals/scenarios/` as JSON files: + +```json +{ + "id": "deploy-function-app", + "description": "User requests a Function App deployment", + "intent": "Deploy a Python Function App with Storage and App Insights in East US", + "expected_skill_sequence": [ + "prereq-check", + "azure-naming-research", + "azure-resource-availability", + "azure-cost-estimator", + "azure-security-analyzer", + "azure-deployment-preflight", + "azure-integration-tester" + ], + "expected_agent": "Git-Ape", + "expected_sub_agents": [ + "Azure Requirements Gatherer", + "Azure Template Generator", + "Azure Resource Deployer" + ] +} +``` + +## Running Evals + +```bash +node evals/run-eval.js +``` + +## Next Steps + +- [ ] Add more scenarios covering edge cases (multi-region, existing resources, drift detection). +- [ ] Add mock Azure CLI response fixtures for skill output format testing. +- [ ] Investigate LLM-as-judge for evaluating free-form agent responses. +- [ ] Integrate eval runs into CI (non-blocking, advisory). diff --git a/evals/run-eval.js b/evals/run-eval.js new file mode 100644 index 0000000..020c0e2 --- /dev/null +++ b/evals/run-eval.js @@ -0,0 +1,160 @@ +#!/usr/bin/env node +/** + * run-eval.js + * + * Minimal eval harness for Git-Ape agent/skill behavioral validation. + * Validates that eval scenarios reference valid agents and skills + * from the actual repository definitions. + * + * Usage: node evals/run-eval.js + */ + +const fs = require('fs'); +const path = require('path'); + +const ROOT = path.resolve(__dirname, '..'); +const WEBSITE_DIR = path.join(ROOT, 'website'); +const matter = require(path.join(WEBSITE_DIR, 'node_modules', 'gray-matter')); + +const AGENTS_DIR = path.join(ROOT, '.github', 'agents'); +const SKILLS_DIR = path.join(ROOT, '.github', 'skills'); +const SCENARIOS_DIR = path.join(__dirname, 'scenarios'); + +// --------------------------------------------------------------------------- +// Load repository definitions +// --------------------------------------------------------------------------- + +function loadSkillNames() { + return fs.readdirSync(SKILLS_DIR).filter((d) => + fs.statSync(path.join(SKILLS_DIR, d)).isDirectory() + ); +} + +function loadAgentNames() { + const files = fs.readdirSync(AGENTS_DIR).filter((f) => f.endsWith('.agent.md')); + const names = []; + for (const file of files) { + const parsed = matter(fs.readFileSync(path.join(AGENTS_DIR, file), 'utf-8')); + if (parsed.data.name) names.push(parsed.data.name); + } + return names; +} + +// --------------------------------------------------------------------------- +// Eval runner +// --------------------------------------------------------------------------- + +function runScenario(scenario, skillNames, agentNames) { + const results = []; + const scenarioId = scenario.id; + + // Check expected_agent exists + if (scenario.expected_agent) { + const pass = agentNames.includes(scenario.expected_agent); + results.push({ + check: `Agent '${scenario.expected_agent}' exists`, + pass, + }); + } + + // Check expected_sub_agents exist + if (scenario.expected_sub_agents) { + for (const agent of scenario.expected_sub_agents) { + const pass = agentNames.includes(agent); + results.push({ + check: `Sub-agent '${agent}' exists`, + pass, + }); + } + } + + // Check expected_skill_sequence references valid skills + if (scenario.expected_skill_sequence) { + for (const skill of scenario.expected_skill_sequence) { + const pass = skillNames.includes(skill); + results.push({ + check: `Skill '${skill}' exists`, + pass, + }); + } + } + + // Check assertions reference valid entities + if (scenario.assertions) { + for (const assertion of scenario.assertions) { + if (assertion.type === 'skill_invoked') { + const pass = skillNames.includes(assertion.skill); + results.push({ + check: `Assertion: skill '${assertion.skill}' exists`, + pass, + }); + } + if (assertion.type === 'skill_order') { + const passBefore = skillNames.includes(assertion.before); + const passAfter = skillNames.includes(assertion.after); + results.push({ + check: `Assertion: skills '${assertion.before}' → '${assertion.after}' exist`, + pass: passBefore && passAfter, + }); + } + if (assertion.type === 'agent_delegates') { + const passFrom = agentNames.includes(assertion.from); + const passTo = agentNames.includes(assertion.to); + results.push({ + check: `Assertion: agents '${assertion.from}' → '${assertion.to}' exist`, + pass: passFrom && passTo, + }); + } + } + } + + return { scenarioId, description: scenario.description, results }; +} + +function main() { + console.log('🧪 Git-Ape Eval Runner\n'); + + const skillNames = loadSkillNames(); + const agentNames = loadAgentNames(); + + console.log(` Skills: ${skillNames.length} found`); + console.log(` Agents: ${agentNames.length} found`); + + const scenarioFiles = fs.readdirSync(SCENARIOS_DIR).filter((f) => f.endsWith('.json')); + console.log(` Scenarios: ${scenarioFiles.length} found\n`); + + let totalPass = 0; + let totalFail = 0; + + for (const file of scenarioFiles) { + const scenario = JSON.parse(fs.readFileSync(path.join(SCENARIOS_DIR, file), 'utf-8')); + const { scenarioId, description, results } = runScenario(scenario, skillNames, agentNames); + + console.log(`📋 Scenario: ${scenarioId}`); + console.log(` ${description}\n`); + + for (const r of results) { + if (r.pass) { + console.log(` ✅ ${r.check}`); + totalPass++; + } else { + console.log(` ❌ ${r.check}`); + totalFail++; + } + } + console.log(''); + } + + console.log('─'.repeat(60)); + console.log(`\n📊 Results: ${totalPass} passed, ${totalFail} failed`); + + if (totalFail > 0) { + console.log('\n❌ Evals FAILED\n'); + process.exit(1); + } else { + console.log('\n✅ All evals PASSED\n'); + process.exit(0); + } +} + +main(); diff --git a/evals/scenarios/deploy-function-app.json b/evals/scenarios/deploy-function-app.json new file mode 100644 index 0000000..1ab7bd6 --- /dev/null +++ b/evals/scenarios/deploy-function-app.json @@ -0,0 +1,39 @@ +{ + "id": "deploy-function-app", + "description": "Core deployment path: user requests a Function App with supporting resources", + "intent": "Deploy a Python Function App with Storage and App Insights in East US", + "expected_agent": "Git-Ape", + "expected_sub_agents": [ + "Azure Requirements Gatherer", + "Azure Template Generator", + "Azure Resource Deployer" + ], + "expected_skill_sequence": [ + "prereq-check", + "azure-naming-research", + "azure-resource-availability", + "azure-cost-estimator", + "azure-security-analyzer", + "azure-deployment-preflight", + "azure-integration-tester" + ], + "assertions": [ + { + "type": "skill_invoked", + "skill": "azure-security-analyzer", + "note": "Security gate must run before deployment" + }, + { + "type": "skill_order", + "before": "azure-deployment-preflight", + "after": "azure-integration-tester", + "note": "Preflight must complete before integration tests" + }, + { + "type": "agent_delegates", + "from": "Git-Ape", + "to": "Azure Requirements Gatherer", + "note": "Requirements are gathered before template generation" + } + ] +} diff --git a/scripts/validate-structure.js b/scripts/validate-structure.js new file mode 100644 index 0000000..6225249 --- /dev/null +++ b/scripts/validate-structure.js @@ -0,0 +1,360 @@ +#!/usr/bin/env node +/** + * validate-structure.js + * + * Validates the structural integrity of Git-Ape skills and agents: + * - YAML frontmatter required fields + * - Name-directory consistency for skills + * - Kebab-case directory naming + * - SKILL.md presence in every skill directory + * - Required markdown sections + * - Cross-reference integrity (slash-commands and agent references) + * - Relative link validation + * + * Usage: node scripts/validate-structure.js + * Exit code 0 = all checks pass, 1 = failures found. + */ + +const fs = require('fs'); +const path = require('path'); + +// Resolve deps from website/node_modules since they're installed there +const WEBSITE_DIR = path.resolve(__dirname, '..', 'website'); +const matter = require(path.join(WEBSITE_DIR, 'node_modules', 'gray-matter')); + +const ROOT = path.resolve(__dirname, '..'); +const AGENTS_DIR = path.join(ROOT, '.github', 'agents'); +const SKILLS_DIR = path.join(ROOT, '.github', 'skills'); + +const KEBAB_CASE_RE = /^[a-z][a-z0-9]*(-[a-z0-9]+)*$/; + +let errors = []; +let warnings = []; + +function error(msg) { + errors.push(msg); + console.error(` ❌ ${msg}`); +} + +function warn(msg) { + warnings.push(msg); + console.warn(` ⚠️ ${msg}`); +} + +function ok(msg) { + console.log(` ✅ ${msg}`); +} + +// --------------------------------------------------------------------------- +// Helpers +// --------------------------------------------------------------------------- + +function getSkillDirs() { + if (!fs.existsSync(SKILLS_DIR)) return []; + return fs.readdirSync(SKILLS_DIR).filter((d) => { + return fs.statSync(path.join(SKILLS_DIR, d)).isDirectory(); + }); +} + +function getAgentFiles() { + if (!fs.existsSync(AGENTS_DIR)) return []; + return fs.readdirSync(AGENTS_DIR).filter((f) => f.endsWith('.agent.md')); +} + +function parseFrontmatter(filePath) { + const raw = fs.readFileSync(filePath, 'utf-8'); + try { + return matter(raw); + } catch (e) { + return null; + } +} + +function extractSlashCommands(content) { + // Match /skill-name patterns that look like intentional skill invocations + // Exclude common false positives: file paths, URLs, API paths + const PATH_PREFIXES = new Set([ + 'etc', 'dev', 'usr', 'var', 'tmp', 'home', 'opt', 'bin', 'sbin', + 'api', 'v1', 'v2', 'v3', 'subscriptions', 'providers', 'admin', + ]); + const matches = content.match(/(?:^|\s)\/([a-z][a-z0-9-]*)/gm) || []; + return matches + .map((m) => m.trim().slice(1)) + .filter((cmd) => !PATH_PREFIXES.has(cmd) && !cmd.includes('/')); +} + +function extractRelativeLinks(content) { + // Match markdown links [text](./path) or [text](../path) — relative only + const matches = content.match(/\]\((\.[^)]+)\)/g) || []; + return matches.map((m) => m.slice(2, -1)); // Extract path from ](path) +} + +// --------------------------------------------------------------------------- +// Checks +// --------------------------------------------------------------------------- + +function checkKebabCase(dirs, label) { + console.log(`\n📁 Kebab-case naming (${label}):`); + for (const dir of dirs) { + if (!KEBAB_CASE_RE.test(dir)) { + error(`${label} directory '${dir}' is not kebab-case`); + } + } + if (dirs.every((d) => KEBAB_CASE_RE.test(d))) { + ok(`All ${dirs.length} ${label} directories are kebab-case`); + } +} + +function checkSkillPresence(skillDirs) { + console.log('\n📄 SKILL.md presence:'); + for (const dir of skillDirs) { + const skillMd = path.join(SKILLS_DIR, dir, 'SKILL.md'); + if (!fs.existsSync(skillMd)) { + error(`Skill directory '${dir}' is missing SKILL.md`); + } + } + const allPresent = skillDirs.every((d) => + fs.existsSync(path.join(SKILLS_DIR, d, 'SKILL.md')) + ); + if (allPresent) { + ok(`All ${skillDirs.length} skill directories contain SKILL.md`); + } +} + +function checkSkillFrontmatter(skillDirs) { + console.log('\n🏷️ Skill frontmatter validation:'); + for (const dir of skillDirs) { + const skillMd = path.join(SKILLS_DIR, dir, 'SKILL.md'); + if (!fs.existsSync(skillMd)) continue; + + const parsed = parseFrontmatter(skillMd); + if (!parsed) { + error(`${dir}/SKILL.md: Could not parse YAML frontmatter`); + continue; + } + + const { data: fm } = parsed; + + if (!fm.name) { + error(`${dir}/SKILL.md: Missing required frontmatter field 'name'`); + } else if (fm.name !== dir) { + error(`${dir}/SKILL.md: Frontmatter 'name' is '${fm.name}' but directory is '${dir}'`); + } + + if (!fm.description) { + error(`${dir}/SKILL.md: Missing required frontmatter field 'description'`); + } + } + if (errors.length === 0) { + ok('All skills have valid frontmatter with name and description'); + } +} + +function checkAgentFrontmatter(agentFiles) { + console.log('\n🏷️ Agent frontmatter validation:'); + for (const file of agentFiles) { + const filePath = path.join(AGENTS_DIR, file); + const parsed = parseFrontmatter(filePath); + if (!parsed) { + error(`${file}: Could not parse YAML frontmatter`); + continue; + } + + const { data: fm } = parsed; + + if (!fm.description) { + error(`${file}: Missing required frontmatter field 'description'`); + } + } + if (!agentFiles.some((f) => { + const parsed = parseFrontmatter(path.join(AGENTS_DIR, f)); + return !parsed || !parsed.data.description; + })) { + ok(`All ${agentFiles.length} agents have valid frontmatter with description`); + } +} + +function checkSkillSections(skillDirs) { + console.log('\n📑 Required skill sections (## When to Use, ## Procedure):'); + for (const dir of skillDirs) { + const skillMd = path.join(SKILLS_DIR, dir, 'SKILL.md'); + if (!fs.existsSync(skillMd)) continue; + + const parsed = parseFrontmatter(skillMd); + if (!parsed) continue; + + const content = parsed.content; + + if (!content.includes('## When to Use')) { + warn(`${dir}/SKILL.md: Missing '## When to Use' section`); + } + + // Accept "## Procedure" or equivalent procedural sections + const hasProcedure = content.includes('## Procedure') || + content.includes('## Execution Playbook') || + content.includes('## Command Playbook'); + if (!hasProcedure) { + warn(`${dir}/SKILL.md: Missing '## Procedure' section (or equivalent like '## Execution Playbook')`); + } + } +} + +function checkAgentSections(agentFiles) { + console.log('\n📑 Required agent sections (## Warning):'); + for (const file of agentFiles) { + const filePath = path.join(AGENTS_DIR, file); + const parsed = parseFrontmatter(filePath); + if (!parsed) continue; + + if (!parsed.content.includes('## Warning')) { + error(`${file}: Missing required '## Warning' section`); + } + } + if (!agentFiles.some((f) => { + const parsed = parseFrontmatter(path.join(AGENTS_DIR, f)); + return parsed && !parsed.content.includes('## Warning'); + })) { + ok(`All ${agentFiles.length} agents have '## Warning' section`); + } +} + +function checkCrossReferences(skillDirs, agentFiles) { + console.log('\n🔗 Cross-reference integrity:'); + + const skillNames = new Set(skillDirs); + + // Check agent -> agent references + const agentNameMap = new Map(); + for (const file of agentFiles) { + const parsed = parseFrontmatter(path.join(AGENTS_DIR, file)); + if (parsed && parsed.data.name) { + agentNameMap.set(parsed.data.name, file); + } + } + + for (const file of agentFiles) { + const parsed = parseFrontmatter(path.join(AGENTS_DIR, file)); + if (!parsed) continue; + + const { data: fm, content } = parsed; + + // Check agents: field references + if (Array.isArray(fm.agents)) { + for (const agentRef of fm.agents) { + if (!agentNameMap.has(agentRef)) { + error(`${file}: References agent '${agentRef}' in frontmatter but no matching agent found`); + } + } + } + + // Check slash-command references in content + const slashCommands = extractSlashCommands(content); + for (const cmd of slashCommands) { + if (!skillNames.has(cmd)) { + // Only warn — some slash commands may reference non-skill entities + // or be examples in documentation + } + } + } + + // Check skill -> skill slash-command references + for (const dir of skillDirs) { + const skillMd = path.join(SKILLS_DIR, dir, 'SKILL.md'); + if (!fs.existsSync(skillMd)) continue; + + const parsed = parseFrontmatter(skillMd); + if (!parsed) continue; + + const slashCommands = extractSlashCommands(parsed.content); + for (const cmd of slashCommands) { + if (!skillNames.has(cmd)) { + warn(`${dir}/SKILL.md: Slash-command '/${cmd}' does not match any skill directory`); + } + } + } + + if (errors.filter((e) => e.includes('References agent')).length === 0) { + ok('All agent cross-references are valid'); + } +} + +function checkRelativeLinks(skillDirs, agentFiles) { + console.log('\n🔗 Relative link validation:'); + let linkCount = 0; + let brokenCount = 0; + + // Check skills + for (const dir of skillDirs) { + const skillMd = path.join(SKILLS_DIR, dir, 'SKILL.md'); + if (!fs.existsSync(skillMd)) continue; + + const raw = fs.readFileSync(skillMd, 'utf-8'); + const links = extractRelativeLinks(raw); + for (const link of links) { + linkCount++; + const resolved = path.resolve(path.dirname(skillMd), link); + if (!fs.existsSync(resolved)) { + error(`${dir}/SKILL.md: Broken relative link '${link}'`); + brokenCount++; + } + } + } + + // Check agents + for (const file of agentFiles) { + const filePath = path.join(AGENTS_DIR, file); + const raw = fs.readFileSync(filePath, 'utf-8'); + const links = extractRelativeLinks(raw); + for (const link of links) { + linkCount++; + const resolved = path.resolve(path.dirname(filePath), link); + if (!fs.existsSync(resolved)) { + error(`${file}: Broken relative link '${link}'`); + brokenCount++; + } + } + } + + if (brokenCount === 0) { + ok(`All ${linkCount} relative links resolve correctly`); + } +} + +// --------------------------------------------------------------------------- +// Main +// --------------------------------------------------------------------------- + +function main() { + console.log('🔍 Git-Ape Structure Validation\n'); + console.log(` Skills: ${SKILLS_DIR}`); + console.log(` Agents: ${AGENTS_DIR}`); + + const skillDirs = getSkillDirs(); + const agentFiles = getAgentFiles(); + + console.log(`\n Found ${skillDirs.length} skill directories`); + console.log(` Found ${agentFiles.length} agent files`); + + checkKebabCase(skillDirs, 'skill'); + checkSkillPresence(skillDirs); + checkSkillFrontmatter(skillDirs); + checkAgentFrontmatter(agentFiles); + checkSkillSections(skillDirs); + checkAgentSections(agentFiles); + checkCrossReferences(skillDirs, agentFiles); + checkRelativeLinks(skillDirs, agentFiles); + + // Summary + console.log('\n' + '─'.repeat(60)); + console.log(`\n📊 Results: ${errors.length} error(s), ${warnings.length} warning(s)`); + + if (errors.length > 0) { + console.log('\n❌ Validation FAILED\n'); + process.exit(1); + } else { + console.log('\n✅ Validation PASSED\n'); + process.exit(0); + } +} + +main();