extends infer P
+ ? P extends readonly (string | number)[]
+ ? Join>
+ : never
+ : never;
+
+// Example usage
+interface User {
+ name: string;
+ address: {
+ street: string;
+ city: string;
+ };
+}
+
+type UserPaths = Paths; // "name" | "address" | "address.street" | "address.city"
+```
+
+### 4. Brand Type System Implementation
+
+```typescript
+declare const __brand: unique symbol;
+declare const __validator: unique symbol;
+
+interface Brand {
+ readonly [__brand]: B;
+ readonly [__validator]: (value: T) => boolean;
+}
+
+type Branded = T & Brand;
+
+// Specific branded types
+type PositiveNumber = Branded;
+type EmailAddress = Branded;
+type UserId = Branded;
+
+// Brand constructors with validation
+function createPositiveNumber(value: number): PositiveNumber {
+ if (value <= 0) {
+ throw new Error('Number must be positive');
+ }
+ return value as PositiveNumber;
+}
+
+function createEmailAddress(value: string): EmailAddress {
+ if (!/^[^\s@]+@[^\s@]+\.[^\s@]+$/.test(value)) {
+ throw new Error('Invalid email format');
+ }
+ return value as EmailAddress;
+}
+
+// Usage prevents mixing of domain types
+function sendEmail(to: EmailAddress, userId: UserId, amount: PositiveNumber) {
+ // All parameters are type-safe and validated
+}
+
+// Error: cannot mix branded types
+// sendEmail('invalid@email', 'user123', -100); // Type errors
+```
+
+## Performance Optimization Strategies
+
+### 1. Type Complexity Analysis
+
+```bash
+# Generate type trace for analysis
+npx tsc --generateTrace trace --incremental false
+
+# Analyze the trace (requires @typescript/analyze-trace)
+npx @typescript/analyze-trace trace
+
+# Check specific type instantiation depth
+npx tsc --extendedDiagnostics | grep -E "Type instantiation|Check time"
+```
+
+### 2. Memory-Efficient Type Patterns
+
+```typescript
+// Prefer interfaces over type intersections for performance
+// Bad: Heavy intersection
+type HeavyType = TypeA & TypeB & TypeC & TypeD & TypeE;
+
+// Good: Interface extension
+interface LightType extends TypeA, TypeB, TypeC, TypeD, TypeE {}
+
+// Use discriminated unions instead of large unions
+// Bad: Large union
+type Status = 'a' | 'b' | 'c' | /* ... 100 more values */;
+
+// Good: Discriminated union
+type Status =
+ | { category: 'loading'; value: 'pending' | 'in-progress' }
+ | { category: 'complete'; value: 'success' | 'error' }
+ | { category: 'cancelled'; value: 'user' | 'timeout' };
+```
+
+## Validation Commands
+
+```bash
+# Type checking validation
+tsc --noEmit --strict
+
+# Performance validation
+tsc --extendedDiagnostics --incremental false | grep "Check time"
+
+# Memory usage validation
+node --max-old-space-size=8192 ./node_modules/typescript/lib/tsc.js --noEmit
+
+# Declaration file validation
+tsc --declaration --emitDeclarationOnly --outDir temp-types
+
+# Type coverage validation
+npx type-coverage --detail --strict
+```
+
+## Expert Resources
+
+### Official Documentation
+- [Conditional Types](https://www.typescriptlang.org/docs/handbook/2/conditional-types.html)
+- [Template Literal Types](https://www.typescriptlang.org/docs/handbook/2/template-literal-types.html)
+- [Mapped Types](https://www.typescriptlang.org/docs/handbook/2/mapped-types.html)
+- [TypeScript Performance](https://github.com/microsoft/TypeScript/wiki/Performance)
+
+### Advanced Learning
+- [Type Challenges](https://github.com/type-challenges/type-challenges) - Progressive type exercises
+- [Type-Level TypeScript](https://type-level-typescript.com) - Advanced patterns course
+- [TypeScript Deep Dive](https://basarat.gitbook.io/typescript/) - Comprehensive guide
+
+### Tools
+- [tsd](https://github.com/SamVerschueren/tsd) - Type definition testing
+- [type-coverage](https://github.com/plantain-00/type-coverage) - Coverage analysis
+- [ts-essentials](https://github.com/ts-essentials/ts-essentials) - Utility types library
+
+Always validate solutions with the provided diagnostic commands and ensure type safety is maintained throughout the implementation.
+
+## Code Review Checklist
+
+When reviewing TypeScript type definitions and usage, focus on:
+
+### Type Safety & Correctness
+- [ ] All function parameters and return types are explicitly typed
+- [ ] Generic constraints are specific enough to prevent invalid usage
+- [ ] Union types include all possible values and are properly discriminated
+- [ ] Optional properties use consistent patterns (undefined vs optional)
+- [ ] Type assertions are avoided unless absolutely necessary
+- [ ] any types are documented with justification and migration plan
+
+### Generic Design & Constraints
+- [ ] Generic type parameters have meaningful constraint boundaries
+- [ ] Variance is handled correctly (covariant, contravariant, invariant)
+- [ ] Generic functions infer types correctly from usage context
+- [ ] Conditional types provide appropriate fallback behaviors
+- [ ] Recursive types include depth limiting to prevent infinite instantiation
+- [ ] Brand types are used appropriately for nominal typing requirements
+
+### Utility Types & Transformations
+- [ ] Built-in utility types (Pick, Omit, Partial) are preferred over custom implementations
+- [ ] Mapped types transform object structures correctly
+- [ ] Template literal types generate expected string patterns
+- [ ] Conditional types distribute properly over union types
+- [ ] Type-level computation is efficient and maintainable
+- [ ] Custom utility types include comprehensive documentation
+
+### Type Inference & Narrowing
+- [ ] Type guards use proper type predicate syntax
+- [ ] Assertion functions are implemented correctly with asserts keyword
+- [ ] Control flow analysis narrows types appropriately
+- [ ] Discriminated unions include all necessary discriminator properties
+- [ ] Type narrowing works correctly with complex nested objects
+- [ ] Unknown types are handled safely without type assertions
+
+### Performance & Complexity
+- [ ] Type instantiation depth remains within reasonable limits
+- [ ] Complex union types are broken into manageable discriminated unions
+- [ ] Type computation complexity is appropriate for usage frequency
+- [ ] Recursive types terminate properly without infinite loops
+- [ ] Large type definitions don't significantly impact compilation time
+- [ ] Type coverage remains high without excessive complexity
+
+### Library & Module Types
+- [ ] Declaration files accurately represent runtime behavior
+- [ ] Module augmentation is used appropriately for extending third-party types
+- [ ] Global types are scoped correctly and don't pollute global namespace
+- [ ] Export/import types work correctly across module boundaries
+- [ ] Ambient declarations match actual runtime interfaces
+- [ ] Type compatibility is maintained across library versions
+
+### Advanced Patterns & Best Practices
+- [ ] Higher-order types are composed logically and reusably
+- [ ] Type-level programming uses appropriate abstractions
+- [ ] Index signatures are used judiciously with proper key types
+- [ ] Function overloads provide clear, unambiguous signatures
+- [ ] Namespace usage is minimal and well-justified
+- [ ] Type definitions support intended usage patterns without friction
\ No newline at end of file
diff --git a/.claude/commands/agents-md/cli.md b/.claude/commands/agents-md/cli.md
new file mode 100644
index 0000000..8391c4a
--- /dev/null
+++ b/.claude/commands/agents-md/cli.md
@@ -0,0 +1,85 @@
+---
+description: Capture CLI tool help documentation and add it to CLAUDE.md for AI assistant reference
+category: claude-setup
+allowed-tools: Bash(*:--help), Bash(*:-h), Bash(*:help), Bash(which:*), Bash(echo:*), Bash(sed:*), Edit, Read
+argument-hint: ""
+---
+
+# Add CLI Tool Documentation to CLAUDE.md
+
+Capture help documentation from CLI tools and add it to CLAUDE.md for future reference.
+
+## Usage
+`/agents-md:cli `
+
+Examples:
+- `/agents-md:cli npm`
+- `/agents-md:cli git`
+- `/agents-md:cli cargo`
+
+## Task
+
+### 1. Check Tool Availability
+First, verify the CLI tool exists:
+!`which $ARGUMENTS 2>/dev/null && echo "✅ $ARGUMENTS is available" || echo "❌ $ARGUMENTS not found"`
+
+### 2. Capture Help Documentation
+If the tool exists, capture its help output. Try different help flags in order:
+
+```bash
+# Try common help flags
+$ARGUMENTS --help 2>&1 || $ARGUMENTS -h 2>&1 || $ARGUMENTS help 2>&1
+```
+
+### 3. Update CLAUDE.md
+Add or update the CLI tool documentation in CLAUDE.md following these steps:
+
+1. **Check for existing CLI Tools Reference section**
+ - If it doesn't exist, create it after the Configuration section
+ - If it exists, add the new tool in alphabetical order
+
+2. **Format the documentation** as a collapsible section:
+ ```markdown
+ ## CLI Tools Reference
+
+ Documentation for CLI tools used in this project.
+
+
+ $ARGUMENTS - [Brief description from help output]
+
+ ```
+ [Help output here, with ANSI codes stripped]
+ ```
+
+
+ ```
+
+3. **Clean the output**:
+ - Remove ANSI escape codes (color codes, cursor movements)
+ - Preserve the structure and formatting
+ - Keep command examples and options intact
+
+4. **Extract key information**:
+ - Tool version if shown in help output
+ - Primary purpose/description
+ - Most commonly used commands or options
+
+### 4. Provide Summary
+After updating CLAUDE.md, show:
+- ✅ Tool documentation added to CLAUDE.md
+- Location in file where it was added
+- Brief summary of what was captured
+- Suggest reviewing CLAUDE.md to ensure formatting is correct
+
+## Error Handling
+- If tool not found: Suggest checking if it's installed and in PATH
+- If no help output: Try running the tool without arguments
+- If help output is extremely long (>500 lines): Capture key sections only
+- If CLAUDE.md is a symlink: Update the target file (likely AGENTS.md)
+
+## Implementation Notes
+When processing help output:
+1. Strip ANSI codes: `sed 's/\x1b\[[0-9;]*m//g'`
+2. Handle tools that output to stderr by using `2>&1`
+3. Preserve important formatting like tables and lists
+4. Keep code examples and command syntax intact
\ No newline at end of file
diff --git a/.claude/commands/agents-md/init.md b/.claude/commands/agents-md/init.md
new file mode 100644
index 0000000..0d3e202
--- /dev/null
+++ b/.claude/commands/agents-md/init.md
@@ -0,0 +1,434 @@
+---
+description: Initialize project with AGENTS.md and create symlinks for all AI assistants
+category: claude-setup
+allowed-tools: Write, Bash(ln:*), Bash(mkdir:*), Bash(test:*), Bash(echo:*), Read, Glob, Task
+---
+
+# Initialize AGENTS.md for Your Project
+
+Create a comprehensive AGENTS.md file following the universal standard, with symlinks for all AI assistants.
+
+## Current Status
+!`test -f AGENTS.md && echo "⚠️ AGENTS.md already exists" || echo "✅ Ready to create AGENTS.md"`
+
+## Task
+
+Please analyze this codebase and create an AGENTS.md file containing:
+1. Build/lint/test commands - especially for running a single test
+2. Code style guidelines including imports, formatting, types, naming conventions, error handling, etc.
+
+Usage notes:
+- The file you create will be given to agentic coding agents (such as yourself) that operate in this repository
+- If there's already an AGENTS.md, improve it
+- If there are Cursor rules (in .cursor/rules/ or .cursorrules) or Copilot rules (in .github/copilot-instructions.md), make sure to include them
+- Start the file with: "# AGENTS.md\nThis file provides guidance to AI coding assistants working in this repository."
+
+### 1. Gather Repository Information
+Use Task tool with description "Gather repository information" to run these Glob patterns in parallel:
+- `package*.json` - Node.js project files
+- `*.md` - Documentation files
+- `.github/workflows/*.yml` - GitHub Actions workflows
+- `.github/workflows/*.yaml` - GitHub Actions workflows (alternate extension)
+- `.cursor/rules/**` - Cursor rules
+- `.cursorrules` - Cursor rules (alternate location)
+- `.github/copilot-instructions.md` - GitHub Copilot rules
+- `.claude/agents/**/*.md` - Specialized AI subagents
+- `requirements.txt`, `setup.py`, `pyproject.toml` - Python projects
+- `go.mod` - Go projects
+- `Cargo.toml` - Rust projects
+- `Gemfile` - Ruby projects
+- `pom.xml`, `build.gradle` - Java projects
+- `*.csproj` - .NET projects
+- `Makefile` - Build automation
+- `.eslintrc*`, `.prettierrc*` - Code style configs
+- `tsconfig.json` - TypeScript config
+- `.env.example` - Environment configuration
+- `**/*.test.*`, `**/*.spec.*` - Test files (limit to a few)
+- `Dockerfile`, `docker-compose*.yml` - Docker configuration
+
+Also examine:
+- README.md for project overview and command documentation
+- package.json scripts to document all available commands
+- GitHub workflows to identify CI/CD commands
+- A few source files to infer coding conventions
+- Test files to understand testing patterns
+- `.claude/agents/` directory to discover available subagents
+
+**Script Consistency Check**: When documenting npm scripts from package.json, verify they match references in:
+- GitHub Actions workflows (npm run, npm test, etc.)
+- README.md installation and usage sections
+- Docker configuration files
+- Any setup or deployment scripts
+
+### 2. Check for Existing Configs
+- If AGENTS.md exists, improve it based on analysis
+- If .cursorrules or .cursor/rules/* exist, incorporate them
+- If .github/copilot-instructions.md exists, include its content
+- If other AI configs exist (.clinerules, .windsurfrules), merge them
+- If `.claude/agents/` directory exists, document available subagents with their descriptions and usage examples
+
+### 3. Create AGENTS.md
+Based on your analysis, create AGENTS.md with this structure:
+
+```markdown
+# AGENTS.md
+This file provides guidance to AI coding assistants working in this repository.
+
+**Note:** [Document if CLAUDE.md or other AI config files are symlinks to AGENTS.md]
+
+# [Project Name]
+
+[Project Overview: Brief description of the project's purpose and architecture]
+
+## Build & Commands
+
+[Development, testing, and deployment commands with EXACT script names:]
+
+**CRITICAL**: Document the EXACT script names from package.json, not generic placeholders.
+For example:
+- Build: `npm run build` (if package.json has "build": "webpack")
+- Test: `npm test` (if package.json has "test": "jest")
+- Type check: `npm run typecheck` (if package.json has "typecheck": "tsc --noEmit")
+- Lint: `npm run lint` (if package.json has "lint": "eslint .")
+
+If the project uses different names, document those:
+- Type check: `npm run tsc` (if that's what's in package.json)
+- Lint: `npm run eslint` (if that's what's in package.json)
+- Format: `npm run prettier` (if that's what's in package.json)
+
+[Include ALL commands from package.json scripts, even if they have non-standard names]
+
+### Script Command Consistency
+**Important**: When modifying npm scripts in package.json, ensure all references are updated:
+- GitHub Actions workflows (.github/workflows/*.yml)
+- README.md documentation
+- Contributing guides
+- Dockerfile/docker-compose.yml
+- CI/CD configuration files
+- Setup/installation scripts
+
+Common places that reference npm scripts:
+- Build commands → Check: workflows, README, Dockerfile
+- Test commands → Check: workflows, contributing docs
+- Lint commands → Check: pre-commit hooks, workflows
+- Start commands → Check: README, deployment docs
+
+**Note**: Always use the EXACT script names from package.json, not assumed names
+
+## Code Style
+
+[Formatting rules, naming conventions, and best practices:]
+- Language/framework specifics
+- Import conventions
+- Formatting rules
+- Naming conventions
+- Type usage patterns
+- Error handling patterns
+[Be specific based on actual code analysis]
+
+## Testing
+
+[Testing frameworks, conventions, and execution guidelines:]
+- Framework: [Jest/Vitest/Pytest/etc]
+- Test file patterns: [*.test.ts, *.spec.js, etc]
+- Testing conventions
+- Coverage requirements
+- How to run specific test suites
+
+### Testing Philosophy
+**When tests fail, fix the code, not the test.**
+
+Key principles:
+- **Tests should be meaningful** - Avoid tests that always pass regardless of behavior
+- **Test actual functionality** - Call the functions being tested, don't just check side effects
+- **Failing tests are valuable** - They reveal bugs or missing features
+- **Fix the root cause** - When a test fails, fix the underlying issue, don't hide the test
+- **Test edge cases** - Tests that reveal limitations help improve the code
+- **Document test purpose** - Each test should include a comment explaining why it exists and what it validates
+
+## Security
+
+[Security considerations and data protection guidelines:]
+- Authentication/authorization patterns
+- Data validation requirements
+- Secret management
+- Security best practices specific to this project
+
+## Directory Structure & File Organization
+
+### Reports Directory
+ALL project reports and documentation should be saved to the `reports/` directory:
+
+```
+your-project/
+├── reports/ # All project reports and documentation
+│ └── *.md # Various report types
+├── temp/ # Temporary files and debugging
+└── [other directories]
+```
+
+### Report Generation Guidelines
+**Important**: ALL reports should be saved to the `reports/` directory with descriptive names:
+
+**Implementation Reports:**
+- Phase validation: `PHASE_X_VALIDATION_REPORT.md`
+- Implementation summaries: `IMPLEMENTATION_SUMMARY_[FEATURE].md`
+- Feature completion: `FEATURE_[NAME]_REPORT.md`
+
+**Testing & Analysis Reports:**
+- Test results: `TEST_RESULTS_[DATE].md`
+- Coverage reports: `COVERAGE_REPORT_[DATE].md`
+- Performance analysis: `PERFORMANCE_ANALYSIS_[SCENARIO].md`
+- Security scans: `SECURITY_SCAN_[DATE].md`
+
+**Quality & Validation:**
+- Code quality: `CODE_QUALITY_REPORT.md`
+- Dependency analysis: `DEPENDENCY_REPORT.md`
+- API compatibility: `API_COMPATIBILITY_REPORT.md`
+
+**Report Naming Conventions:**
+- Use descriptive names: `[TYPE]_[SCOPE]_[DATE].md`
+- Include dates: `YYYY-MM-DD` format
+- Group with prefixes: `TEST_`, `PERFORMANCE_`, `SECURITY_`
+- Markdown format: All reports end in `.md`
+
+### Temporary Files & Debugging
+All temporary files, debugging scripts, and test artifacts should be organized in a `/temp` folder:
+
+**Temporary File Organization:**
+- **Debug scripts**: `temp/debug-*.js`, `temp/analyze-*.py`
+- **Test artifacts**: `temp/test-results/`, `temp/coverage/`
+- **Generated files**: `temp/generated/`, `temp/build-artifacts/`
+- **Logs**: `temp/logs/debug.log`, `temp/logs/error.log`
+
+**Guidelines:**
+- Never commit files from `/temp` directory
+- Use `/temp` for all debugging and analysis scripts created during development
+- Clean up `/temp` directory regularly or use automated cleanup
+- Include `/temp/` in `.gitignore` to prevent accidental commits
+
+### Example `.gitignore` patterns
+```
+# Temporary files and debugging
+/temp/
+temp/
+**/temp/
+debug-*.js
+test-*.py
+analyze-*.sh
+*-debug.*
+*.debug
+
+# Claude settings
+.claude/settings.local.json
+
+# Don't ignore reports directory
+!reports/
+!reports/**
+```
+
+### Claude Code Settings (.claude Directory)
+
+The `.claude` directory contains Claude Code configuration files with specific version control rules:
+
+#### Version Controlled Files (commit these):
+- `.claude/settings.json` - Shared team settings for hooks, tools, and environment
+- `.claude/commands/*.md` - Custom slash commands available to all team members
+- `.claude/hooks/*.sh` - Hook scripts for automated validations and actions
+
+#### Ignored Files (do NOT commit):
+- `.claude/settings.local.json` - Personal preferences and local overrides
+- Any `*.local.json` files - Personal configuration not meant for sharing
+
+**Important Notes:**
+- Claude Code automatically adds `.claude/settings.local.json` to `.gitignore`
+- The shared `settings.json` should contain team-wide standards (linting, type checking, etc.)
+- Personal preferences or experimental settings belong in `settings.local.json`
+- Hook scripts in `.claude/hooks/` should be executable (`chmod +x`)
+
+## Configuration
+
+[Environment setup and configuration management:]
+- Required environment variables
+- Configuration files and their purposes
+- Development environment setup
+- Dependencies and version requirements
+
+## Agent Delegation & Tool Execution
+
+### ⚠️ MANDATORY: Always Delegate to Specialists & Execute in Parallel
+
+**When specialized agents are available, you MUST use them instead of attempting tasks yourself.**
+
+**When performing multiple operations, send all tool calls (including Task calls for agent delegation) in a single message to execute them concurrently for optimal performance.**
+
+#### Why Agent Delegation Matters:
+- Specialists have deeper, more focused knowledge
+- They're aware of edge cases and subtle bugs
+- They follow established patterns and best practices
+- They can provide more comprehensive solutions
+
+#### Key Principles:
+- **Agent Delegation**: Always check if a specialized agent exists for your task domain
+- **Complex Problems**: Delegate to domain experts, use diagnostic agents when scope is unclear
+- **Multiple Agents**: Send multiple Task tool calls in a single message to delegate to specialists in parallel
+- **DEFAULT TO PARALLEL**: Unless you have a specific reason why operations MUST be sequential (output of A required for input of B), always execute multiple tools simultaneously
+- **Plan Upfront**: Think "What information do I need to fully answer this question?" Then execute all searches together
+
+#### Discovering Available Agents:
+```bash
+# Quick check: List agents if claudekit is installed
+command -v claudekit >/dev/null 2>&1 && claudekit list agents || echo "claudekit not installed"
+
+# If claudekit is installed, you can explore available agents:
+claudekit list agents
+```
+
+#### Critical: Always Use Parallel Tool Calls
+
+**Err on the side of maximizing parallel tool calls rather than running sequentially.**
+
+**IMPORTANT: Send all tool calls in a single message to execute them in parallel.**
+
+**These cases MUST use parallel tool calls:**
+- Searching for different patterns (imports, usage, definitions)
+- Multiple grep searches with different regex patterns
+- Reading multiple files or searching different directories
+- Combining Glob with Grep for comprehensive results
+- Searching for multiple independent concepts with codebase_search_agent
+- Any information gathering where you know upfront what you're looking for
+- Agent delegations with multiple Task calls to different specialists
+
+**Sequential calls ONLY when:**
+You genuinely REQUIRE the output of one tool to determine the usage of the next tool.
+
+**Planning Approach:**
+1. Before making tool calls, think: "What information do I need to fully answer this question?"
+2. Send all tool calls in a single message to execute them in parallel
+3. Execute all those searches together rather than waiting for each result
+4. Most of the time, parallel tool calls can be used rather than sequential
+
+**Performance Impact:** Parallel tool execution is 3-5x faster than sequential calls, significantly improving user experience.
+
+**Remember:** This is not just an optimization—it's the expected behavior. Both delegation and parallel execution are requirements, not suggestions.
+```
+
+Think about what you'd tell a new team member on their first day. Include these key sections:
+
+1. **Project Overview** - Brief description of purpose and architecture
+2. **Build & Commands** - All development, testing, and deployment commands
+3. **Code Style** - Formatting rules, naming conventions, best practices
+4. **Testing** - Testing frameworks, conventions, execution guidelines
+5. **Security** - Security considerations and data protection
+6. **Configuration** - Environment setup and configuration management
+7. **Available AI Subagents** - Document relevant specialized agents for the project
+
+Additional sections based on project needs:
+- Architecture details for complex projects
+- API documentation
+- Database schemas
+- Deployment procedures
+- Contributing guidelines
+
+**Important:**
+- Include content from any existing .cursorrules or copilot-instructions.md files
+- Focus on practical information that helps AI assistants write better code
+- Be specific and concrete based on actual code analysis
+
+### 4. Create Directory Structure
+Create the reports directory and documentation structure:
+
+```bash
+# Create reports directory
+mkdir -p reports
+
+# Create reports README template
+cat > reports/README.md << 'EOF'
+# Reports Directory
+
+This directory contains ALL project reports including validation, testing, analysis, performance benchmarks, and any other documentation generated during development.
+
+## Report Categories
+
+### Implementation Reports
+- Phase/milestone completion reports
+- Feature implementation summaries
+- Technical implementation details
+
+### Testing & Analysis Reports
+- Test execution results
+- Code coverage analysis
+- Performance test results
+- Security analysis reports
+
+### Quality & Validation
+- Code quality metrics
+- Dependency analysis
+- API compatibility reports
+- Build and deployment validation
+
+## Purpose
+
+These reports serve as:
+1. **Progress tracking** - Document completion of development phases
+2. **Quality assurance** - Validate implementations meet requirements
+3. **Knowledge preservation** - Capture decisions and findings
+4. **Audit trail** - Historical record of project evolution
+
+## Naming Conventions
+
+- Use descriptive names: `[TYPE]_[SCOPE]_[DATE].md`
+- Include dates: `YYYY-MM-DD` format
+- Group with prefixes: `TEST_`, `PERFORMANCE_`, `SECURITY_`
+- Markdown format: All reports end in `.md`
+
+## Version Control
+
+All reports are tracked in git to maintain historical records.
+EOF
+```
+
+### 5. Create Symlinks
+After creating AGENTS.md and directory structure, create symlinks for all AI assistants and document this in AGENTS.md:
+
+```bash
+# Claude Code
+ln -sf AGENTS.md CLAUDE.md
+
+# Cline
+ln -sf AGENTS.md .clinerules
+
+# Cursor
+ln -sf AGENTS.md .cursorrules
+
+# Windsurf
+ln -sf AGENTS.md .windsurfrules
+
+# Replit
+ln -sf AGENTS.md .replit.md
+
+# Gemini CLI, OpenAI Codex, OpenCode
+ln -sf AGENTS.md GEMINI.md
+
+# GitHub Copilot (needs directory)
+mkdir -p .github
+ln -sf ../AGENTS.md .github/copilot-instructions.md
+
+# Firebase Studio (needs directory)
+mkdir -p .idx
+ln -sf ../AGENTS.md .idx/airules.md
+```
+
+### 6. Show Results
+Display:
+- Created/updated AGENTS.md
+- Created reports directory structure
+- List of symlinks created
+- Key information included in the file
+- Suggest reviewing and customizing if needed
+
+**Important:** Make sure to add a note at the top of AGENTS.md documenting which files are symlinks to AGENTS.md. For example:
+```markdown
+**Note:** CLAUDE.md, .clinerules, .cursorrules, and other AI config files are symlinks to AGENTS.md in this project.
+```
+
diff --git a/.claude/commands/agents-md/migration.md b/.claude/commands/agents-md/migration.md
new file mode 100644
index 0000000..465a9af
--- /dev/null
+++ b/.claude/commands/agents-md/migration.md
@@ -0,0 +1,175 @@
+---
+description: Migrate AI assistant configuration to AGENTS.md standard with universal compatibility
+category: claude-setup
+allowed-tools: Bash(mv:*), Bash(ln:*), Bash(ls:*), Bash(test:*), Bash(grep:*), Bash(echo:*), Read
+---
+
+# Convert to Universal AGENTS.md Format
+
+This command helps you adopt the AGENTS.md standard by converting your existing CLAUDE.md file and creating symlinks for compatibility with various AI assistants.
+
+## Current Project State
+!`ls -la CLAUDE.md AGENTS.md AGENT.md GEMINI.md .cursorrules .clinerules .windsurfrules .replit.md .github/copilot-instructions.md 2>/dev/null | grep -E "(CLAUDE|AGENT|AGENTS|GEMINI|cursor|cline|windsurf|replit|copilot)" || echo "Checking for AI configuration files..."`
+
+## Task
+
+Convert this project to use the AGENTS.md standard following these steps:
+
+### 1. Pre-flight Checks
+Check for existing AI configuration files:
+- CLAUDE.md (Claude Code)
+- .clinerules (Cline)
+- .cursorrules (Cursor)
+- .windsurfrules (Windsurf)
+- .replit.md (Replit)
+- .github/copilot-instructions.md (GitHub Copilot)
+- GEMINI.md (Gemini CLI)
+- AGENTS.md (if already exists)
+- AGENT.md (legacy, to be symlinked)
+
+### 2. Analyze Existing Files
+Check all AI config files and their content to determine migration strategy:
+
+**Priority order for analysis:**
+1. CLAUDE.md (Claude Code)
+2. .clinerules (Cline)
+3. .cursorrules (Cursor)
+4. .windsurfrules (Windsurf)
+5. .github/copilot-instructions.md (GitHub Copilot)
+6. .replit.md (Replit)
+7. GEMINI.md (Gemini CLI)
+
+**Content Analysis:**
+- Compare file sizes and content
+- Identify identical files (can be safely symlinked)
+- Detect different content (needs merging or user decision)
+
+### 3. Perform Smart Migration
+
+**Scenario A: Single file found**
+```bash
+# Simple case - move to AGENTS.md
+mv CLAUDE.md AGENTS.md # or whichever file exists
+```
+
+**Scenario B: Multiple identical files**
+```bash
+# Keep the priority file, symlink others
+mv CLAUDE.md AGENTS.md
+ln -sf AGENTS.md .cursorrules # if .cursorrules was identical
+```
+
+**Scenario C: Multiple files with different content**
+1. **Automatic merging** (when possible):
+ - Different sections can be combined
+ - No conflicting information
+ - Clear structure boundaries
+
+2. **User guidance** (when conflicts exist):
+ - Show content differences
+ - Provide merge recommendations
+ - Offer options:
+ - Keep primary file, backup others
+ - Manual merge with assistance
+ - Selective migration
+
+### 4. Handle Conflicts Intelligently
+
+**When conflicts detected:**
+1. **Display differences:**
+ ```
+ ⚠️ Multiple AI config files with different content found:
+
+ 📄 CLAUDE.md (1,234 bytes)
+ - Build commands: npm run build
+ - Testing: vitest
+
+ 📄 .cursorrules (856 bytes)
+ - Code style: Prettier + ESLint
+ - TypeScript: strict mode
+
+ 📄 .github/copilot-instructions.md (567 bytes)
+ - Security guidelines
+ - No secrets in code
+ ```
+
+2. **Provide merge options:**
+ ```
+ Choose migration approach:
+ 1. 🔄 Auto-merge (recommended) - Combine all unique content
+ 2. 📋 Keep CLAUDE.md, backup others (.cursorrules.bak, copilot-instructions.md.bak)
+ 3. 🎯 Selective - Choose which sections to include
+ 4. 🛠️ Manual - Guide me through merging step-by-step
+ ```
+
+3. **Execute chosen strategy:**
+ - **Auto-merge**: Combine sections intelligently
+ - **Backup**: Keep primary, rename others with .bak extension
+ - **Selective**: Interactive selection of content blocks
+ - **Manual**: Step-by-step merge assistance
+
+### 5. Create AGENTS.md and Symlinks
+After handling content merging, create the final structure:
+```bash
+# Claude Code
+ln -s AGENTS.md CLAUDE.md
+
+# Cline
+ln -s AGENTS.md .clinerules
+
+# Cursor
+ln -s AGENTS.md .cursorrules
+
+# Windsurf
+ln -s AGENTS.md .windsurfrules
+
+# Replit
+ln -s AGENTS.md .replit.md
+
+# Gemini CLI, OpenAI Codex, OpenCode
+ln -s AGENTS.md GEMINI.md
+
+# Legacy AGENT.md symlink for backward compatibility
+ln -s AGENTS.md AGENT.md
+
+# GitHub Copilot (special case - needs directory)
+mkdir -p .github
+ln -s ../AGENTS.md .github/copilot-instructions.md
+
+# Firebase Studio (special case - needs .idx directory)
+mkdir -p .idx
+ln -s ../AGENTS.md .idx/airules.md
+```
+
+### 6. Verify Results
+- Use `ls -la` to show all created symlinks
+- Display which AI assistants are now configured
+- Show any backup files created (.bak extensions)
+- Confirm that AGENTS.md includes the symlink documentation note
+- Verify content completeness (all important sections included)
+
+### 7. Git Guidance
+If in a git repository:
+- Show git status (including new AGENTS.md and any .bak files)
+- Suggest adding AGENTS.md and symlinks to git
+- Recommend reviewing .bak files before deleting them
+- Remind to update .gitignore if needed (some teams ignore certain config files)
+
+### 8. Post-Migration Cleanup
+After successful migration and git commit:
+1. **Review backup files** (.bak extensions) to ensure nothing important was missed
+2. **Delete backup files** once satisfied with AGENTS.md content
+3. **Test with different AI assistants** to ensure all symlinks work correctly
+4. **Run `/agents-md:init`** if you want to add directory structure and latest best practices
+
+## Why AGENTS.md?
+
+AGENTS.md is becoming the standard for AI assistant configuration because:
+- Single source of truth for all AI tools
+- No more duplicating content across multiple files
+- Consistent experience across Claude Code, Cursor, Windsurf, and other tools
+- Future-proof as new AI tools emerge
+
+AGENTS.md emerged from collaborative efforts across the AI software development ecosystem, including OpenAI Codex, Amp, Jules from Google, Cursor, and Factory.
+
+Learn more at https://agents.md
\ No newline at end of file
diff --git a/.claude/commands/checkpoint/create.md b/.claude/commands/checkpoint/create.md
new file mode 100644
index 0000000..f8355e5
--- /dev/null
+++ b/.claude/commands/checkpoint/create.md
@@ -0,0 +1,31 @@
+---
+description: Create a git stash checkpoint with optional description
+category: workflow
+allowed-tools: Bash(git stash:*), Bash(git add:*), Bash(git status:*)
+argument-hint: "[optional description]"
+---
+
+## Create a checkpoint
+
+Create a git stash checkpoint to save your current working state.
+
+## Current status
+!`git status --short`
+
+## Task
+
+Create a git stash checkpoint while keeping all current changes in the working directory. Steps:
+
+1. If no description provided in $ARGUMENTS, use current timestamp as "YYYY-MM-DD HH:MM:SS"
+2. Create a stash object without modifying the working directory:
+ - First add all files temporarily: `git add -A`
+ - Create the stash object: `git stash create "claude-checkpoint: $ARGUMENTS"`
+ - This returns a commit SHA that we need to capture
+3. Store the stash object in the stash list:
+ - `git stash store -m "claude-checkpoint: $ARGUMENTS" `
+4. Reset the index to unstage files: `git reset`
+5. Confirm the checkpoint was created and show what was saved
+
+Note: Using `git stash create` + `git stash store` creates a checkpoint without touching your working directory.
+
+Example: If user runs `/checkpoint before major refactor`, it creates a stash checkpoint while leaving all your files exactly as they are.
\ No newline at end of file
diff --git a/.claude/commands/checkpoint/list.md b/.claude/commands/checkpoint/list.md
new file mode 100644
index 0000000..5aaf6a7
--- /dev/null
+++ b/.claude/commands/checkpoint/list.md
@@ -0,0 +1,34 @@
+---
+description: List all Claude Code checkpoints with time and description
+category: workflow
+allowed-tools: Bash(git stash:*)
+---
+
+## List Claude Code checkpoints
+
+Display all checkpoints created by Claude Code during this and previous sessions.
+
+## Task
+
+List all Claude Code checkpoints. Steps:
+
+1. Run `git stash list` to get all stashes
+2. Filter for lines containing "claude-checkpoint:" using grep or by parsing the output
+3. For each matching stash line (format: `stash@{n}: On branch: message`):
+ - Extract the stash number from `stash@{n}`
+ - Extract the branch name after "On "
+ - Extract the checkpoint description after "claude-checkpoint: "
+ - Use `git log -1 --format="%ai" stash@{n}` to get the timestamp for each stash
+
+4. Format and display as:
+ ```
+ Claude Code Checkpoints:
+ [n] YYYY-MM-DD HH:MM:SS - Description (branch)
+ ```
+ Where n is the stash index number
+
+5. If `git stash list | grep "claude-checkpoint:"` returns nothing, display:
+ "No checkpoints found. Use /checkpoint [description] to create one."
+
+Example: A stash line like `stash@{2}: On main: claude-checkpoint: before auth refactor`
+Should display as: `[2] 2025-01-15 10:30:45 - before auth refactor (main)`
\ No newline at end of file
diff --git a/.claude/commands/checkpoint/restore.md b/.claude/commands/checkpoint/restore.md
new file mode 100644
index 0000000..fb2fb24
--- /dev/null
+++ b/.claude/commands/checkpoint/restore.md
@@ -0,0 +1,42 @@
+---
+description: Restore project to a previous checkpoint
+category: workflow
+allowed-tools: Bash(git stash:*), Bash(git status:*), Bash(git reset:*), Bash(grep:*), Bash(head:*)
+argument-hint: ""
+---
+
+## Restore to checkpoint
+
+Restore your project files to a previous checkpoint created with /checkpoint.
+
+## Available checkpoints
+!`git stash list | grep "claude-checkpoint" | head -10`
+
+## Current status
+!`git status --short`
+
+## Task
+
+Restore the project to a previous checkpoint. Based on $ARGUMENTS:
+
+1. Parse the argument:
+ - If empty or "latest": Find the most recent claude-checkpoint stash
+ - If a number (e.g. "2"): Use stash@{2} if it's a claude-checkpoint
+ - Otherwise: Show error and list available checkpoints
+
+2. Check for uncommitted changes with `git status --porcelain`. If any exist:
+ - Create a temporary backup stash: `git stash push -m "claude-restore-backup: $(date +%Y-%m-%d_%H:%M:%S)"`
+ - Note the stash reference for potential recovery
+
+3. Apply the checkpoint:
+ - Use `git stash apply stash@{n}` (not pop, to preserve the checkpoint)
+ - If there's a conflict due to uncommitted changes that were stashed, handle gracefully
+
+4. Show what was restored:
+ - Display which checkpoint was applied
+ - If uncommitted changes were backed up, inform user how to recover them
+
+Example outputs:
+- For `/restore`: "Restored to checkpoint: before major refactor (stash@{0})"
+- For `/restore 3`: "Restored to checkpoint: working OAuth implementation (stash@{3})"
+- With uncommitted changes: "Backed up current changes to stash@{0}. Restored to checkpoint: before major refactor (stash@{1})"
\ No newline at end of file
diff --git a/.claude/commands/code-review.md b/.claude/commands/code-review.md
new file mode 100644
index 0000000..e2d5b9f
--- /dev/null
+++ b/.claude/commands/code-review.md
@@ -0,0 +1,222 @@
+---
+description: Multi-aspect code review using parallel code-review-expert agents
+allowed-tools: Task, Bash(git status:*), Bash(git diff:*), Bash(git log:*)
+argument-hint: '[what to review] - e.g., "recent changes", "src/components", "*.ts files", "PR #123"'
+---
+
+# Code Review
+
+## Current Repository State
+!`git status --short && echo "---" && git diff --stat && echo "---" && git log --oneline -5`
+
+## Pre-Review Analysis: Think This Through End-to-End
+
+Before launching review agents, analyze the complete impact and context:
+
+### Impact Assessment
+- **System Impact**: What systems, services, or components could be affected by these changes?
+- **Deployment Context**: What's the risk level and timeline for these changes?
+- **Integration Points**: Are there external dependencies, APIs, or team workflows involved?
+- **Stakeholder Impact**: Who depends on the code being reviewed?
+
+### Review Strategy Coordination
+Based on impact assessment and **$ARGUMENTS**, determine:
+- **Critical vs. Nice-to-Have**: Which review aspects are CRITICAL vs. optional for this change?
+- **Potential Conflicts**: Could findings from different review areas suggest competing solutions?
+- **Shared Context**: What context should all review agents be aware of?
+- **Appropriate Rigor**: What level of analysis matches the change scope and risk?
+
+## Review Strategy
+
+Based on **$ARGUMENTS** and the impact assessment above, determine which review agents are needed:
+
+If reviewing "changes" or recent modifications:
+1. Analyze the file types that have been modified
+2. Launch only relevant review agents:
+ - **Documentation files only** (*.md, *.txt, README): Launch only Documentation & API Review agent
+ - **Test files only** (*test.*, *.spec.*, tests/): Launch Testing Quality Review and Code Quality Review agents
+ - **Config files only** (*.json, *.yaml, *.toml, .*rc): Launch Security & Dependencies Review and Architecture Review agents
+ - **Source code files** (*.ts, *.js, *.py, etc.): Launch all 6 review agents
+ - **Mixed changes**: Launch agents relevant to each file type present
+
+If reviewing a specific directory or broad scope:
+- Launch all 6 review agents for comprehensive coverage
+
+Use the Task tool to invoke the appropriate code-review-expert agents concurrently with enhanced thinking trigger instructions:
+
+## 1. Architecture & Design Review
+```
+Subagent: code-review-expert
+Description: Architecture review with end-to-end analysis
+Prompt: Review the architecture and design patterns in: $ARGUMENTS
+
+CONTEXT: [Include findings from Pre-Review Analysis above - system impact, deployment context, integration points]
+
+Primary Focus: module organization, separation of concerns, dependency management, abstraction levels, design pattern usage, and architectural consistency. Check available experts with claudekit for domain-specific patterns.
+
+THINK THIS THROUGH END-TO-END:
+- Trace architectural impacts: How does this change affect all dependent systems?
+- Map the complete data/control flow through the architecture
+- Identify what breaks when components fail or change
+- Consider the full deployment and integration pipeline
+- Analyze how this fits into the broader system architecture
+
+Check available experts with claudekit for domain-specific patterns.
+```
+
+## 2. Code Quality Review
+```
+Subagent: code-review-expert
+Description: Code quality review
+Prompt: Review code quality and maintainability in: $ARGUMENTS
+Focus on: readability, naming conventions, code complexity, DRY principles, code smells, refactoring opportunities, and consistent coding patterns. Pull domain-specific quality metrics from available experts.
+```
+
+## 3. Security & Dependencies Review
+```
+Subagent: code-review-expert
+Description: Security and dependencies review with alternative hypothesis analysis
+Prompt: Perform security and dependency analysis of: $ARGUMENTS
+
+CONTEXT: [Include findings from Pre-Review Analysis above - system impact, deployment context, integration points]
+
+Primary Focus: input validation, injection vulnerabilities, authentication/authorization, secrets management, dependency vulnerabilities, license compliance, version pinning, and supply chain security. Use security insights from domain experts if available.
+
+CONSIDER ALTERNATIVE HYPOTHESES:
+- Beyond obvious vulnerabilities, what other attack vectors exist?
+- How else could these security controls be bypassed or exploited?
+- What assumptions about user behavior, data flow, or system boundaries could an attacker violate?
+- Are there alternative explanations for apparent security measures?
+- What if the current security model is fundamentally flawed?
+
+Use security insights from domain experts if available.
+```
+
+## 4. Performance & Scalability Review
+```
+Subagent: code-review-expert
+Description: Performance and scalability review
+Prompt: Analyze performance and scalability in: $ARGUMENTS
+Focus on: algorithm complexity, memory usage, database queries, caching strategies, async patterns, resource management, load handling, and horizontal scaling considerations. Get performance patterns from relevant experts.
+```
+
+## 5. Testing Quality Review
+```
+Subagent: code-review-expert
+Description: Testing quality review
+Prompt: Review test quality and effectiveness for: $ARGUMENTS
+Focus on: meaningful assertions, test isolation, edge case handling, failure scenario coverage, mock vs real dependencies balance, test maintainability, clear test names, and actual behavior verification (not just coverage metrics). Check for testing-expert insights if available.
+```
+
+## 6. Documentation & API Review
+```
+Subagent: code-review-expert
+Description: Documentation and API review
+Prompt: Review documentation and API design for: $ARGUMENTS
+
+Focus on: README completeness, API documentation, breaking changes, code comments, JSDoc/TypeDoc coverage, usage examples, migration guides, and developer experience. Evaluate API consistency and contract clarity.
+
+Documentation Review Guidelines:
+- Consider purpose and audience: Who needs this information and why?
+- Evaluate effectiveness: Does the documentation achieve its goals?
+- Focus on clarity: Can users understand and apply the information?
+- Identify real issues: Missing information, errors, contradictions, outdated content
+- Respect intentional variation: Multiple examples may show different valid approaches
+```
+
+## Post-Review Consolidation: Consider Alternative Hypotheses
+
+After all agents complete, apply alternative hypothesis thinking before consolidating:
+
+### Cross-Pattern Analysis
+- **Competing Solutions**: Do findings from different review areas suggest conflicting solutions or approaches?
+- **Alternative Explanations**: Are there alternative explanations for patterns seen across multiple review areas?
+- **Root Cause Investigation**: Could the same underlying issue be manifesting in multiple review aspects?
+- **Intentional Trade-offs**: What if apparent "problems" are actually intentional design decisions with valid reasoning?
+
+### Prioritization with Context
+- **Real vs. Theoretical Issues**: Which issues matter given the actual deployment context and timeline?
+- **Conflicting Recommendations**: How do we sequence fixes that might conflict with each other?
+- **Alternative Approaches**: If obvious fixes prove problematic, what are the alternative solutions?
+
+Then consolidate findings into this structured format:
+
+```
+🗂 Consolidated Code Review Report - [Target]
+
+📋 Review Scope
+Target: [directory/files reviewed] ([X files, Y lines])
+Focus: Architecture, Security, Performance, Testing, Documentation
+
+📊 Executive Summary
+Brief overview of code quality, key strengths, and critical issues requiring attention.
+
+🔴 CRITICAL Issues (Must Fix Immediately)
+1. 🔒 [Security/🏗️ Architecture/⚡ Performance/🧪 Testing/📝 Documentation/💥 Breaking] [Issue Name]
+ File: [path:line]
+ Impact: [description]
+ Solution:
+ ```[code example]```
+
+2. [Additional critical issues with type icons...]
+
+🟠 HIGH Priority Issues
+1. [Type icon] [Issue name]
+ File: [path:line]
+ Impact: [description]
+ Solution: [recommendation]
+
+2. [Additional high priority issues...]
+
+🟡 MEDIUM Priority Issues
+1. [Type icon] [Issue name] - [file:line]
+ Extract into: [suggested refactoring]
+
+2. [Additional medium priority issues...]
+
+✅ Quality Metrics
+Include only aspects that were actually reviewed based on the file types and agents launched:
+┌─────────────────┬───────┬────────────────────────────────────┐
+│ Aspect │ Score │ Notes │
+├─────────────────┼───────┼────────────────────────────────────┤
+│ [Only include relevant aspects based on what was reviewed] │
+│ Architecture │ X/10 │ [Clean separation, coupling issues]│
+│ Code Quality │ X/10 │ [Readability, consistency, patterns]│
+│ Security │ X/10 │ [Critical vulnerabilities, if any] │
+│ Performance │ X/10 │ [Bottlenecks, scalability concerns]│
+│ Testing │ X/10 │ [Coverage percentage, test quality]│
+│ Documentation │ X/10 │ [API docs, comments, examples] │
+└─────────────────┴───────┴────────────────────────────────────┘
+
+For example:
+- Documentation-only review: Show only Documentation row
+- Test file review: Show Testing and Code Quality rows
+- Config file review: Show Security and Architecture rows
+- Full code review: Show all relevant aspects
+
+✨ Strengths to Preserve
+- [Key strength with evidence]
+- [Additional strengths...]
+
+🚀 Proactive Improvements
+1. [Pattern/Practice Name]
+ ```[code example]```
+
+2. [Additional improvements...]
+
+📊 Issue Distribution
+- Architecture: [X critical, Y high, Z medium]
+- Security: [X critical, Y high, Z medium]
+- Performance: [X critical, Y high, Z medium]
+- Testing: [X critical, Y high, Z medium]
+- Documentation: [X critical, Y high, Z medium]
+
+⚠️ Systemic Issues
+Repeated problems that need addressing:
+- [Problem pattern] (X occurrences)
+ → [Actionable fix/next step]
+- [Additional problems with solutions...]
+```
+
+After all agents complete, consolidate findings into this format. Focus on actionable feedback with specific file locations and code examples. Use type icons:
+🔒 Security | 🏗️ Architecture | ⚡ Performance | 🧪 Testing | 📝 Documentation | 💥 Breaking Change
\ No newline at end of file
diff --git a/.claude/commands/config/bash-timeout.md b/.claude/commands/config/bash-timeout.md
new file mode 100644
index 0000000..684167f
--- /dev/null
+++ b/.claude/commands/config/bash-timeout.md
@@ -0,0 +1,87 @@
+---
+description: Configure bash timeout values in Claude Code settings
+category: claude-setup
+allowed-tools: Read, Edit, Write
+argument-hint: " [scope]"
+---
+
+# Configure Bash Timeout Settings
+
+Configure the bash command timeout values in your Claude Code settings.json file. The default timeout is 2 minutes (120000ms), which is often insufficient for long-running operations like builds, tests, or deployments.
+
+## Current Settings
+
+User settings: !if [ -f ~/.claude/settings.json ]; then if command -v jq &>/dev/null; then cat ~/.claude/settings.json | jq '.env // {}' 2>/dev/null; else cat ~/.claude/settings.json | grep -A 10 '"env"' 2>/dev/null || echo "No env settings found"; fi; else echo "No user settings file"; fi
+Project settings: !if [ -f .claude/settings.json ]; then if command -v jq &>/dev/null; then cat .claude/settings.json | jq '.env // {}' 2>/dev/null; else cat .claude/settings.json | grep -A 10 '"env"' 2>/dev/null || echo "No env settings found"; fi; else echo "No project settings file"; fi
+
+## Available Timeout Settings
+
+- **BASH_DEFAULT_TIMEOUT_MS**: The default timeout for bash commands (in milliseconds)
+- **BASH_MAX_TIMEOUT_MS**: The maximum timeout that can be set for bash commands (in milliseconds)
+
+## Common Timeout Values
+
+- 2 minutes: 120000 (default)
+- 5 minutes: 300000
+- 10 minutes: 600000
+- 15 minutes: 900000
+- 20 minutes: 1200000
+- 30 minutes: 1800000
+
+## Configure Settings
+
+1. First, check if settings.json exists in the appropriate location
+2. Read the current settings to preserve existing configuration
+3. Add or update the `env` section with the desired timeout values
+4. Maintain all existing settings (hooks, etc.)
+
+### For User-Level Settings (~/.claude/settings.json)
+- Applies to all projects for the current user
+- Location: `~/.claude/settings.json`
+
+### For Project-Level Settings (.claude/settings.json)
+- Applies only to the current project
+- Location: `.claude/settings.json`
+- Project settings override user settings
+
+## Arguments
+
+Specify the timeout duration (e.g., "10min", "20min", "5m", "600s") and optionally the scope:
+- `$ARGUMENTS` format: `[duration] [scope]`
+- Duration: Required (e.g., "10min", "20min", "300s")
+- Scope: Optional - "user" (default) or "project"
+
+Examples:
+- `/bash-timeout 10min` - Set user-level timeout to 10 minutes
+- `/bash-timeout 20min project` - Set project-level timeout to 20 minutes
+- `/bash-timeout 600s user` - Set user-level timeout to 600 seconds
+
+## Implementation Steps
+
+1. Parse the arguments to extract duration and scope
+2. Convert duration to milliseconds
+3. Determine the settings file path based on scope
+4. Read existing settings if the file exists
+5. Update or add the env section with new timeout values
+6. Set BASH_DEFAULT_TIMEOUT_MS to the specified value
+7. Set BASH_MAX_TIMEOUT_MS to 2x the default value (or at least 20 minutes)
+8. Write the updated settings back to the file
+9. Confirm the changes to the user
+
+## Example Configuration
+
+```json
+{
+ "env": {
+ "BASH_DEFAULT_TIMEOUT_MS": "600000",
+ "BASH_MAX_TIMEOUT_MS": "1200000"
+ },
+ "hooks": {
+ // existing hooks configuration...
+ }
+}
+```
+
+This sets:
+- Default timeout: 10 minutes (600000ms)
+- Maximum timeout: 20 minutes (1200000ms)
\ No newline at end of file
diff --git a/.claude/commands/create-command.md b/.claude/commands/create-command.md
new file mode 100644
index 0000000..96500b8
--- /dev/null
+++ b/.claude/commands/create-command.md
@@ -0,0 +1,129 @@
+---
+description: Create a new Claude Code slash command with full feature support
+category: claude-setup
+allowed-tools: Write, Read, Bash(mkdir:*)
+argument-hint: "[command-name] [description]"
+---
+
+Create a new Claude Code slash command based on the user's requirements: $ARGUMENTS
+
+For complete slash command documentation, see: https://docs.claude.com/en/docs/claude-code/slash-commands
+
+First, ask the user to specify the command type:
+- **project** - Add to current project's `.claude/commands/` directory (shared with team)
+- **personal** - Add to user's `~/.claude/commands/` directory (personal use only)
+
+If the user doesn't specify, ask which type to create.
+
+Then gather the following information from the user:
+- Command name
+- Description
+- Command content/template
+- Any required tools (for frontmatter)
+- Whether to use arguments, bash commands, or file references
+
+## Command Template Structure
+
+### YAML Frontmatter
+Commands use standardized frontmatter that follows Claude Code's official schema:
+
+```yaml
+---
+# Required field:
+description: Brief description of what the command does
+
+# Security control (highly recommended):
+allowed-tools: Read, Write, Bash(git:*) # Specify allowed tools
+
+# Optional fields:
+argument-hint: "" # Help text for expected arguments
+model: sonnet # opus, sonnet, haiku, or specific model
+category: workflow # workflow, ai-assistant, or validation
+---
+```
+
+### Security with allowed-tools
+The `allowed-tools` field provides granular security control:
+- Basic: `allowed-tools: Read, Write, Edit`
+- Restricted bash: `allowed-tools: Bash(git:*), Read` # Only git commands
+- Multiple restrictions: `allowed-tools: Read, Write, Bash(npm:*, git:*)`
+
+## Features to Support
+
+When creating the command, support these Claude Code features if requested:
+
+**Arguments:** If the user wants dynamic input, use `$ARGUMENTS` placeholder
+- Example: `/deploy $ARGUMENTS` where user types `/deploy production`
+
+**Bash Execution:** If the user wants command output, use exclamation mark (!) prefix
+- Example: `!pwd > /dev/null 2>&1` or `!ls -la > /dev/null 2>&1` to include command output
+- **Performance tip**: Combine related commands with `&&` for faster execution
+- Example: `!pwd > /dev/null 2>&1 && ls -la 2>/dev/null | head -5 > /dev/null`
+
+**File References:** If the user wants file contents, use `@` prefix
+- Example: `@package.json` to include package.json contents
+
+**Namespacing:** If the command name contains `:`, create subdirectories
+- Example: `/api:create` → `.claude/commands/api/create.md`
+
+
+## Implementation Steps
+
+1. **Determine Location**
+ - If command type not specified, ask the user (project vs personal)
+ - For project commands: create `.claude/commands/` directory if needed
+ - For personal commands: create `~/.claude/commands/` directory if needed
+ - Create subdirectories for namespaced commands (e.g., `api/` for `/api:create`)
+
+2. **Create Command File**
+ - Generate `{{COMMAND_NAME}}.md` file in the appropriate directory
+ - Include YAML frontmatter if the command needs specific tools
+ - Add the command content with any placeholders, bash commands, or file references
+ - Ensure proper markdown formatting
+
+3. **Show the User**
+ - Display the created command file path
+ - Show how to invoke it with `/{{COMMAND_NAME}}`
+ - Explain any argument usage if `$ARGUMENTS` is included
+ - Provide a brief example of using the command
+
+## Command Content Guidelines
+
+Key principle: Write instructions TO the AI agent, not as the AI agent. Use imperative, instructional language rather than first-person descriptions of what the agent will do.
+
+### Example Command Templates
+
+**Simple Command:**
+```markdown
+---
+description: Create a React component
+allowed-tools: Write
+---
+
+Create a new React component named $ARGUMENTS
+
+Component template:
+\```tsx
+import React from 'react';
+
+export const $ARGUMENTS: React.FC = () => {
+ return $ARGUMENTS Component
;
+};
+\```
+```
+
+**Command with Bash and File Analysis:**
+```markdown
+---
+description: Analyze dependencies
+allowed-tools: Read, Bash(npm:*, yarn:*, pnpm:*)
+---
+
+Current dependencies:
+@package.json
+
+Outdated packages:
+!npm outdated 2>/dev/null || echo "No outdated packages"
+
+Suggest which packages to update based on the above information.
+```
\ No newline at end of file
diff --git a/.claude/commands/create-subagent.md b/.claude/commands/create-subagent.md
new file mode 100644
index 0000000..efaf2f3
--- /dev/null
+++ b/.claude/commands/create-subagent.md
@@ -0,0 +1,248 @@
+---
+description: Create a specialized AI subagent following domain expert principles
+category: claude-setup
+allowed-tools: Write, Bash(mkdir:*), Read
+---
+
+# Create Domain Expert Subagent
+
+Create a specialized AI subagent following the domain expert principles. This command helps you build concentrated domain expertise rather than single-task agents.
+
+## Setup
+
+First, specify the subagent location:
+- **project** - Add to `.claude/agents/` (shared with team, higher priority)
+- **user** - Add to `~/.claude/agents/` (personal use across projects)
+
+If not specified, ask which type to create.
+
+## Required Information
+
+Gather the following from the user:
+
+### 1. Domain Identification
+- **Domain name**: The expertise area (e.g., typescript, testing, database)
+- **Sub-domain (optional)**: Specific area within domain (e.g., typescript-type, test-jest)
+- **Hierarchical placement**: Is this a broad expert or sub-domain specialist?
+
+### 2. Domain Coverage Assessment
+Ask the user to identify 5-15 related problems this expert will handle. Examples:
+- TypeScript type expert: generics, conditionals, mapped types, declarations, performance
+- Database performance expert: query optimization, indexing, execution plans, partitioning
+- Testing expert: structure, patterns, fixtures, debugging, coverage
+
+If they list fewer than 5 problems, suggest expanding scope or reconsidering as a slash command instead.
+
+### 3. Tool Requirements
+- Leave blank to inherit all tools (recommended for broad experts)
+- Specify specific tools for focused permissions (e.g., Read, Grep, Glob for analysis-only)
+- Common patterns:
+ - Analysis experts: `Read, Grep, Glob, Bash`
+ - Fix experts: `Read, Edit, MultiEdit, Bash, Grep`
+ - Architecture experts: `Read, Write, Edit, Bash, Grep`
+
+**Tip**: Use `/agents` to adjust tool permissions interactively later.
+
+### 4. Environmental Adaptation
+Help define how the agent detects and adapts to project context:
+- Framework/library detection (prefer config reads over heavy commands)
+- Configuration file checks using internal tools first
+- Project structure analysis
+- Available tool discovery
+
+**Note**: Prefer internal tools (Read, Grep, Glob) over shell commands for better performance.
+
+## Subagent Template Structure
+
+### YAML Frontmatter
+```yaml
+---
+# REQUIRED FIELDS
+name: domain-expert # Unique identifier (lowercase, hyphens only)
+description: Expert in {domain} handling {problem-list}. Use PROACTIVELY for {trigger-conditions}.
+
+# OPTIONAL FIELDS
+tools: Read, Grep, Bash # If omitted, inherits ALL tools
+model: opus # opus, sonnet, or haiku
+category: general # For UI grouping
+color: indigo # Visual color in UI
+displayName: Domain Expert # Human-readable name
+bundle: ["related-expert"] # Related agents to install together
+---
+```
+
+**Important**: Omitting the `tools` field grants ALL tools. An empty `tools:` field grants NO tools.
+
+### Content Template
+```markdown
+# {Domain} Expert
+
+You are a {domain} expert with deep knowledge of {specific-areas}.
+
+## Delegation First
+0. **If ultra-specific expertise needed, delegate immediately**:
+ - {Area 1} → {specialist-1}
+ - {Area 2} → {specialist-2}
+ Output: "This requires {specialty}. Use {expert-name}. Stopping here."
+
+## Core Process
+1. **Environment Detection** (Use Read/Grep before shell):
+ - Check configuration files
+ - Detect framework/tools
+ - Analyze project structure
+
+2. **Problem Analysis** (4-6 categories):
+ - {Category 1}: {Description}
+ - {Category 2}: {Description}
+ - {Category 3-6}: {Description}
+
+3. **Solution Implementation**:
+ - Apply domain best practices
+ - Use progressive solutions (quick/proper/best)
+ - Validate with established workflows
+```
+
+## Delegation Patterns
+
+### Broad Domain Experts
+- Include step 0 delegation to specialists
+- Reference related domain experts
+- Clear "stopping here" language
+- Example: `typescript-expert` delegates to `typescript-type-expert`
+
+### Sub-Domain Experts
+- Reference parent domain expert
+- Define specialization boundaries
+- Provide escalation paths
+- Example: `typescript-type-expert` references `typescript-expert`
+
+## Quality Checks
+
+Before creating, verify:
+
+### Domain Expert Criteria
+- [ ] Covers 5-15 related problems (not just 1-2)
+- [ ] Has concentrated, non-obvious knowledge
+- [ ] Detects and adapts to environment
+- [ ] Integrates with specific tools
+- [ ] Would pass the "Would I pay $5/month for this?" test
+
+### Boundary Check
+Ask: "Would someone put '{{Domain}} Expert' on their resume?"
+- Yes → Good domain boundary
+- No → Too narrow, consider broader scope
+
+### Naming Check
+- ✅ Good: `typescript-expert`, `database-performance-expert`
+- ❌ Avoid: `fix-circular-deps`, `enhanced-typescript-helper`
+
+## Proactive Triggers
+
+For agents that should be used automatically, include trigger phrases:
+- "Use PROACTIVELY when {{condition}}"
+- "MUST BE USED for {{scenario}}"
+- "Automatically handles {{problem-type}}"
+
+## Implementation Steps
+
+1. **Create Directory Structure**
+ ```bash
+ # For project subagent
+ mkdir -p .claude/agents
+
+ # For user subagent
+ mkdir -p ~/.claude/agents
+ ```
+
+2. **Generate Agent File**
+ First, convert agent name to kebab-case filename:
+ - "TypeScript Expert" → `typescript-expert.md`
+ - "Database Performance" → `database-performance.md`
+
+ Check if file exists before writing:
+ ```bash
+ # Check for existing file
+ if [[ -f "{{path}}/{{kebab-name}}.md" ]]; then
+ # Ask user: overwrite or create {{kebab-name}}-new.md?
+ fi
+ ```
+
+ Create `{{kebab-name}}.md` with the populated template
+
+3. **Validate Structure**
+ - Ensure YAML frontmatter is valid
+ - Check name follows kebab-case convention
+ - Verify description is clear and actionable
+
+4. **Show Usage Examples**
+ ```
+ # Automatic invocation based on description
+ > Fix the TypeScript type errors in my code
+
+ # Explicit invocation
+ > Use the {{agent-name}} to analyze this issue
+ ```
+
+## Common Domain Expert Examples
+
+### Complete Example: TypeScript Type Expert
+```markdown
+---
+name: typescript-type-expert
+description: Advanced TypeScript type system specialist for complex generics, conditional types, and type-level programming. Use PROACTIVELY for type errors, generics issues, or declaration problems.
+tools: Read, Edit, MultiEdit, Grep, Glob
+category: general
+---
+
+# TypeScript Type System Expert
+
+You are a TypeScript type system specialist with deep knowledge of advanced type features.
+
+## Delegation First
+0. **If different expertise needed, delegate immediately**:
+ - General TypeScript issues → typescript-expert
+ - Build/compilation → typescript-build-expert
+ - Testing → testing-expert
+ Output: "This requires {specialty}. Use {expert-name}. Stopping here."
+
+## Core Process
+1. **Environment Detection**:
+ - Check tsconfig.json for strict mode settings
+ - Detect TypeScript version
+ - Analyze type complexity in codebase
+
+2. **Problem Analysis**:
+ - Generic constraints and inference
+ - Conditional types and mapped types
+ - Template literal types
+ - Type-level programming
+
+3. **Solution Implementation**:
+ - Apply progressive fixes (quick/proper/best)
+ - Ensure type safety without runtime overhead
+ - Validate with tsc --noEmit
+```
+
+### Other Language Experts
+- `typescript-type-expert`: Type system, generics, conditionals, declarations
+- `python-async-expert`: asyncio, concurrency, event loops
+- `rust-ownership-expert`: Lifetimes, borrowing, memory safety
+
+### Infrastructure Experts
+- `database-performance-expert`: Query optimization, indexing, execution plans
+- `container-optimization-expert`: Docker, image size, security
+- `kubernetes-expert`: Deployments, networking, scaling
+
+### Quality Experts
+- `test-architecture-expert`: Test structure, fixtures, patterns
+- `webapp-security-expert`: XSS, CSRF, authentication
+- `frontend-performance-expert`: Bundle size, lazy loading, caching
+
+## Notes
+
+- Start with Claude-generated agents, then customize to your needs
+- Design focused agents with single, clear responsibilities
+- Check project agents into version control for team sharing
+- Limit tool access to what's necessary for the agent's purpose
+
+Remember: The goal is concentrated domain expertise that handles multiple related problems, not single-task agents. When in doubt, expand the scope to cover more related problems within the domain.
\ No newline at end of file
diff --git a/.claude/commands/dev/cleanup.md b/.claude/commands/dev/cleanup.md
new file mode 100644
index 0000000..2126e73
--- /dev/null
+++ b/.claude/commands/dev/cleanup.md
@@ -0,0 +1,206 @@
+---
+description: Clean up debug files, test artifacts, and status reports created during development
+category: workflow
+allowed-tools: Task, Bash(git:*), Bash(echo:*), Bash(grep:*), Bash(ls:*), Bash(pwd:*), Bash(head:*), Bash(wc:*), Bash(test:*)
+---
+
+## Purpose
+
+Clean up temporary files and debug artifacts that Claude Code commonly creates during development sessions. These files clutter the workspace and should not be committed to version control.
+
+## Context
+
+\!`git status --porcelain && git status --ignored --porcelain | grep "^!!" && echo "--- PWD: $(pwd) ---" && ls -la && if [ -z "$(git status --porcelain)" ]; then echo "WORKING_DIR_CLEAN=true" && git ls-files | grep -E "(analyze-.*\.(js|ts)|debug-.*\.(js|ts)|test-.*\.(js|ts|sh)|.*-test\.(js|ts|sh)|quick-test\.(js|ts|sh)|.*-poc\..*|poc-.*\..*|.*_poc\..*|proof-of-concept-.*\..*|verify-.*\.md|research-.*\.(js|ts)|temp-.*/|test-.*/|.*_SUMMARY\.md|.*_REPORT\.md|.*_CHECKLIST\.md|.*_COMPLETE\.md|.*_GUIDE\.md|.*_ANALYSIS\.md|.*-analysis\.md|.*-examples\.(js|ts))$" | head -20 && echo "--- Found $(git ls-files | grep -E "(analyze-.*\.(js|ts)|debug-.*\.(js|ts)|test-.*\.(js|ts|sh)|.*-test\.(js|ts|sh)|quick-test\.(js|ts|sh)|.*-poc\..*|poc-.*\..*|.*_poc\..*|proof-of-concept-.*\..*|verify-.*\.md|research-.*\.(js|ts)|temp-.*/|test-.*/|.*_SUMMARY\.md|.*_REPORT\.md|.*_CHECKLIST\.md|.*_COMPLETE\.md|.*_GUIDE\.md|.*_ANALYSIS\.md|.*-analysis\.md|.*-examples\.(js|ts))$" | wc -l) committed cleanup candidates ---"; else echo "WORKING_DIR_CLEAN=false"; fi`
+
+Launch ONE subagent to analyze the git status (including ignored files) and propose files for deletion. If the working directory is clean, also check for committed files that match cleanup patterns.
+
+## Target Files for Cleanup
+
+**Debug & Analysis Files:**
+- `analyze-*.js`, `analyze-*.ts` - Analysis scripts (e.g., `analyze-race-condition.js`)
+- `debug-*.js`, `debug-*.ts` - Debug scripts (e.g., `debug-detailed.js`, `debug-race-condition.js`)
+- `research-*.js`, `research-*.ts` - Research scripts (e.g., `research-frontmatter-libs.js`)
+- `*-analysis.md` - Analysis documents (e.g., `eslint-manual-analysis.md`)
+
+**Test Files (temporary/experimental):**
+- `test-*.js`, `test-*.ts`, `test-*.sh` - Test scripts (e.g., `test-race-condition.js`, `test-basic-add.js`, `test-poc.sh`)
+- `*-test.js`, `*-test.ts`, `*-test.sh` - Test scripts with suffix
+- `quick-test.js`, `quick-test.ts`, `quick-test.sh` - Quick test files
+- `verify-*.md` - Verification documents (e.g., `verify-migration.md`)
+- `*-examples.js`, `*-examples.ts` - Example files (e.g., `frontmatter-replacement-examples.ts`)
+
+**Proof of Concept (POC) Files:**
+- `*-poc.*` - POC files in any language (e.g., `test-poc.sh`, `auth-poc.js`)
+- `poc-*.*` - POC files with prefix (e.g., `poc-validation.ts`)
+- `*_poc.*` - POC files with underscore (e.g., `feature_poc.js`)
+- `proof-of-concept-*.*` - Verbose POC naming
+
+**Temporary Directories:**
+- `temp-*` - Temporary directories (e.g., `temp-debug/`, `temp-test/`, `temp-test-fix/`)
+- `test-*` - Temporary test directories (e.g., `test-integration/`, `test-2-concurrent/`)
+- NOTE: These are different from standard `test/` or `tests/` directories which should be preserved
+
+**Reports & Summaries:**
+- `*_SUMMARY.md` - Summary reports (e.g., `TEST_SUMMARY.md`, `ESLINT_FIXES_SUMMARY.md`)
+- `*_REPORT.md` - Various reports (e.g., `QUALITY_VALIDATION_REPORT.md`, `RELEASE_READINESS_REPORT.md`)
+- `*_CHECKLIST.md` - Checklist documents (e.g., `MIGRATION_CHECKLIST.md`)
+- `*_COMPLETE.md` - Completion markers (e.g., `MIGRATION_COMPLETE.md`)
+- `*_GUIDE.md` - Temporary guides (e.g., `MIGRATION_GUIDE.md`)
+- `*_ANALYSIS.md` - Analysis reports (e.g., `FRONTMATTER_ANALYSIS.md`)
+
+## Safety Rules
+
+**Files safe to propose for deletion:**
+- Must be untracked (?? in git status) OR ignored (!! in git status)
+- Should match or be similar to cleanup patterns above
+- Must be clearly temporary/debug files
+
+**Never propose these files:**
+- Any committed files (not marked ?? or !!) unless working directory is clean
+- CHANGELOG.md, README.md, AGENTS.md, CLAUDE.md (even if untracked)
+- Core project directories: src/, dist/, scripts/, node_modules/, etc.
+- Standard test directories: `test/`, `tests/`, `__tests__/` (without hyphens)
+- Any files you're uncertain about
+
+## Instructions
+
+Launch ONE subagent to:
+
+1. **Analyze the git status output** provided in the context above
+2. **Check if WORKING_DIR_CLEAN=true**: If so, also analyze committed files that match cleanup patterns
+3. **Identify cleanup candidates**:
+ - For dirty working directory: Focus on untracked (??) and ignored (!!) files
+ - For clean working directory: Also include committed files matching cleanup patterns
+4. **Create a proposal list** of files and directories to delete
+5. **Present the list to the user** for approval before any deletion
+6. **Do NOT delete anything** - only propose what should be deleted
+
+The agent should provide:
+- Clear list of proposed deletions with reasons
+- For untracked files: Confirmation they are marked (??) or (!!)
+- For committed files: Clear indication they are committed and match debug/temp patterns
+- Ask user for explicit approval before proceeding
+
+**IMPORTANT**: The agent cannot delete files directly. It must present a proposal and wait for user confirmation.
+
+## After User Approval
+
+Once the user approves the proposed deletions:
+
+1. **Delete the approved files** using appropriate commands:
+ - For untracked/ignored files: `rm -f` or `rm -rf` for directories
+ - For committed files: `git rm` to properly remove from git tracking
+2. **Analyze the target cleanup patterns** and approved files to identify common types
+3. **Propose .gitignore patterns** based on the cleanup patterns to prevent future accumulation:
+ ```
+ # Debug and analysis files
+ analyze-*.js
+ analyze-*.ts
+ debug-*.js
+ debug-*.ts
+ research-*.js
+ research-*.ts
+ *-analysis.md
+
+ # Temporary test files
+ test-*.js
+ test-*.ts
+ *-test.js
+ *-test.ts
+ quick-test.js
+ quick-test.ts
+ verify-*.md
+ *-examples.js
+ *-examples.ts
+
+ # Temporary directories
+ temp-*/
+ test-*/
+
+ # Reports and summaries
+ *_SUMMARY.md
+ *_REPORT.md
+ *_CHECKLIST.md
+ *_COMPLETE.md
+ *_GUIDE.md
+ *_ANALYSIS.md
+ ```
+4. **Add suggested patterns to .gitignore** if user agrees
+
+This prevents the same types of files from cluttering the workspace in future development sessions.
+
+**Note**: When removing committed files, the agent should use `git rm` to ensure proper removal from git tracking, and remind the user to commit these removals.
+
+## Example Output
+
+Here's what a typical cleanup analysis looks like:
+
+```
+⏺ Based on my analysis, I've identified 17 files that can be cleaned up from your project. Here's what I found:
+
+🗑️ Untracked Files to Remove (8 files)
+
+Research/Debug files:
+rm -f research-frontmatter-libs.js
+rm -f eslint-manual-analysis.md
+rm -f frontmatter-replacement-examples.ts
+rm -f test-content-preservation.ts
+rm -f test-migration.ts
+rm -f verify-migration.md
+
+Dated reports:
+rm -f reports/RELEASE_READINESS_REPORT_2025-07-18.md
+rm -f reports/TEST_FAILURE_ANALYSIS_2025-07-18.md
+
+📁 Committed Files to Move or Remove (9 files)
+
+These report files are in the root but should be in reports/:
+
+# Move to reports/ (recommended):
+git mv FRONTMATTER_ANALYSIS.md reports/
+git mv MIGRATION_CHECKLIST.md reports/
+git mv MIGRATION_COMPLETE.md reports/
+git mv MIGRATION_GUIDE.md reports/
+git mv PERFORMANCE_TEST_FIX_SUMMARY.md reports/
+git mv QUALITY_VALIDATION_REPORT.md reports/
+git mv RELEASE_READINESS_REPORT.md reports/
+git mv TEST_FIXES_SUMMARY.md reports/
+git mv VALIDATION_REPORT.md reports/
+
+📝 Update .gitignore
+
+Add these patterns to prevent future accumulation:
+
+# Research and debug files
+research-*.js
+research-*.ts
+*-manual-analysis.md
+*-examples.ts
+test-*.ts
+!test/ # Exclude the test directory itself
+verify-*.md
+
+# Reports in root directory (should be in reports/)
+/*_ANALYSIS.md
+/*_CHECKLIST.md
+/*_COMPLETE.md
+/*_GUIDE.md
+/*_SUMMARY.md
+/*_REPORT.md
+# Preserve important documentation
+!CHANGELOG.md
+!README.md
+!AGENTS.md
+
+# Dated reports
+reports/*_[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9].md
+
+Would you like me to proceed with the cleanup?
+```
+
+The command analyzes your project and categorizes cleanup items:
+- **Untracked files**: Temporary debug/test files that can be deleted
+- **Committed files**: Often reports that should be moved to the reports/ directory
+- **.gitignore updates**: Patterns to prevent future accumulation
+
+The agent will always ask for confirmation before making any changes.
\ No newline at end of file
diff --git a/.claude/commands/gh/repo-init.md b/.claude/commands/gh/repo-init.md
new file mode 100644
index 0000000..7042b45
--- /dev/null
+++ b/.claude/commands/gh/repo-init.md
@@ -0,0 +1,54 @@
+---
+allowed-tools: Bash, Write, TodoWrite
+description: Create a new GitHub repository with proper setup including directory creation, git initialization, and remote configuration
+category: workflow
+argument-hint: ""
+---
+
+# GitHub Repository Setup
+
+Create a new GitHub repository named "$ARGUMENTS" with proper directory structure and git setup.
+
+**Note:** The repository will be created as **private** by default for security. If you need a public repository, please specify "public" in your request.
+
+## Steps to execute:
+
+1. Create a new directory named "$ARGUMENTS"
+2. Initialize a git repository in that directory
+3. Create the GitHub repository using gh CLI
+4. Create a basic README.md file
+5. Make an initial commit
+6. Set up the remote origin
+7. Push to GitHub
+
+## Commands:
+
+```bash
+# Create the directory
+mkdir "$ARGUMENTS"
+cd "$ARGUMENTS"
+
+# Initialize git repository
+git init
+
+# Create GitHub repository using gh CLI (private by default)
+gh repo create "$ARGUMENTS" --private
+
+# Create README.md
+echo "# $ARGUMENTS" > README.md
+echo "" >> README.md
+echo "A new repository created with GitHub CLI." >> README.md
+
+# Initial commit
+git add README.md
+git commit -m "Initial commit"
+
+# Add remote origin (using SSH)
+git remote add origin "git@github.com:$(gh api user --jq .login)/$ARGUMENTS.git"
+
+# Push to GitHub
+git branch -M main
+git push -u origin main
+```
+
+Execute these commands to create the repository.
\ No newline at end of file
diff --git a/.claude/commands/git/checkout.md b/.claude/commands/git/checkout.md
new file mode 100644
index 0000000..1d7977b
--- /dev/null
+++ b/.claude/commands/git/checkout.md
@@ -0,0 +1,108 @@
+---
+description: Smart branch creation and switching with conventional naming
+allowed-tools: Bash(git:*), Read
+category: workflow
+argument-hint: ""
+---
+
+# Git Checkout: Smart Branch Management
+
+Create or switch to branches with intelligent naming conventions and setup.
+
+## Current Branch Status
+
+!`git branch --show-current 2>/dev/null || echo "(no branch)"`
+
+## Available Branches
+
+!`git branch -a 2>/dev/null | head -20`
+
+## Branch Creation Task
+
+Based on the arguments provided: `$ARGUMENTS`
+
+Parse the branch specification and create/switch to the appropriate branch.
+
+### Supported Branch Types
+- `feature/` - New features and enhancements
+- `bugfix/` - Bug fixes (non-critical)
+- `hotfix/` - Urgent production fixes
+- `release/` - Release preparation branches
+- `chore/` - Maintenance and cleanup tasks
+- `experiment/` - Experimental features
+- `docs/` - Documentation updates
+- `test/` - Test-related changes
+- `refactor/` - Code refactoring
+
+### Branch Naming Rules
+1. If argument contains `/`, use as-is (e.g., `feature/user-auth`)
+2. If argument is single word, suggest adding a prefix
+3. Convert spaces to hyphens
+4. Lowercase all characters
+5. Remove special characters except hyphens and slashes
+6. Validate branch name is git-compatible
+
+### Workflow
+
+1. **Parse the branch argument**:
+ - If empty, show current branch and available branches
+ - If contains `/`, treat as type/name format
+ - If single word without `/`, ask for branch type or suggest `feature/`
+
+2. **Validate branch name**:
+ - Check if branch already exists locally
+ - Check if branch exists on remote
+ - Ensure name follows git conventions
+ - Warn if name is too long (>50 chars)
+
+3. **Create or switch branch**:
+ - If branch exists locally: `git checkout `
+ - If branch exists only on remote: `git checkout -b origin/`
+ - If new branch: `git checkout -b `
+
+4. **Set up branch configuration**:
+ - For hotfix branches: Base off main/master
+ - For feature branches: Base off current branch or develop
+ - For release branches: Base off develop or main
+
+5. **Report status**:
+ - Confirm branch switch/creation
+ - Show upstream tracking status
+ - Suggest next steps (e.g., "Ready to start working. Use /git:push to set upstream when ready to push.")
+
+### Examples
+
+```bash
+# Create feature branch
+/git:checkout feature/user-authentication
+
+# Create hotfix from main
+/git:checkout hotfix/security-patch
+
+# Switch to existing branch
+/git:checkout develop
+
+# Create branch without prefix (will prompt)
+/git:checkout payment-integration
+```
+
+### Special Handling
+
+For hotfix branches:
+- Automatically checkout from main/master first
+- Set high priority indicator
+- Suggest immediate push after fix
+
+For feature branches:
+- Check if develop branch exists, use as base
+- Otherwise use current branch as base
+
+For release branches:
+- Validate version format if provided (e.g., release/v1.2.0)
+- Set up from develop or main
+
+### Error Handling
+
+- If branch name is invalid, suggest corrections
+- If checkout fails, show git error and provide guidance
+- If working directory is dirty, warn and suggest stashing or committing
\ No newline at end of file
diff --git a/.claude/commands/git/commit.md b/.claude/commands/git/commit.md
new file mode 100644
index 0000000..8eb09f1
--- /dev/null
+++ b/.claude/commands/git/commit.md
@@ -0,0 +1,76 @@
+---
+description: Create a git commit following the project's established style
+category: workflow
+allowed-tools: Bash(git:*), Bash(echo:*), Bash(head:*), Bash(wc:*), Bash(test:*), Bash([:[*), Bash(grep:*), Read, Edit, Task
+---
+
+Create a git commit following the project's established style
+
+## Git Expert Integration
+For complex commit scenarios (merge commits, conflict resolution, commit history issues, interactive rebasing), consider using the Task tool with `git-expert` subagent for specialized git expertise.
+
+## Efficiency Note:
+This command intelligently reuses recent git:status results when available to avoid redundant operations. If you just ran /git:status, the commit process will be faster.
+
+When git conventions are already documented in CLAUDE.md/AGENTS.md, use them directly without verbose explanation.
+
+All git commands are combined into a single bash call for maximum speed.
+
+## Steps:
+1. Check if the previous message contains git:status results:
+ - Look for patterns like "Git Status Analysis", "Modified Files:", "Uncommitted Changes:"
+ - If found and recent (within last 2-3 messages): Reuse those results
+ - If not found or stale: Run a single combined git command:
+ !git --no-pager status --porcelain=v1 && echo "---STAT---" && git --no-pager diff --stat 2>/dev/null && echo "---DIFF---" && git --no-pager diff 2>/dev/null | head -2000 && echo "---LOG---" && git --no-pager log --oneline -5
+ - Note: Only skip git status if you're confident the working directory hasn't changed
+ - Note: Full diff is capped at 2000 lines to prevent context flooding. The stat summary above shows all changed files
+2. Review the diff output to verify:
+ - No sensitive information (passwords, API keys, tokens) in the changes
+ - No debugging code or console.log statements left in production code
+ - No temporary debugging scripts (test-*.js, debug-*.py, etc.) created by Claude Code
+ - No temporary files or outputs in inappropriate locations (move to project's temp directory or delete)
+ - All TODO/FIXME comments are addressed or intentionally left
+3. Use documented git commit conventions from CLAUDE.md/AGENTS.md
+ - If conventions are not documented, analyze recent commits and document them
+4. If the project uses ticket/task codes, ask the user for the relevant code if not clear from context
+5. Check if README.md or other documentation needs updating to reflect the changes (see "Documentation Updates" section below)
+6. Run tests and lint commands to ensure code quality (unless just ran before this command)
+7. Stage all relevant files (including any updated documentation)
+8. Create commit with appropriate message matching the project's conventions
+9. Verify commit succeeded - Report with ✅ success indicator
+10. Check if any post-commit hooks need to be considered (e.g., pushing to remote, creating PR)
+
+## Documentation Updates:
+Consider updating relevant documentation when committing changes:
+- README.md: New features, API changes, installation steps, usage examples
+- CHANGELOG.md: Notable changes, bug fixes, new features
+- API documentation: New endpoints, changed parameters, deprecated features
+- User guides: New workflows, updated procedures
+- Configuration docs: New settings, changed defaults
+
+## Commit Convention Documentation:
+Only when conventions are NOT already documented: Analyze the commit history and document the observed conventions in CLAUDE.md under a "Git Commit Conventions" section. Once documented, use them without verbose explanation.
+
+The documentation should capture whatever style the project uses, for example:
+- Simple descriptive messages: "Fix navigation bug"
+- Conventional commits: "feat(auth): add OAuth support"
+- Prefixed style: "[BUGFIX] Resolve memory leak in parser"
+- Task/ticket codes: "PROJ-123: Add user authentication"
+- JIRA integration: "ABC-456 Fix memory leak in parser"
+- GitHub issues: "#42 Update documentation"
+- Imperative mood: "Add user authentication"
+- Past tense: "Added user authentication"
+- Or any other project-specific convention
+
+Example CLAUDE.md section:
+```markdown
+## Git Commit Conventions
+Based on analysis of this project's git history:
+- Format: [observed format pattern]
+- Tense: [imperative/past/present]
+- Length: [typical subject line length]
+- Ticket codes: [if used, note the pattern like "PROJ-123:" or "ABC-456 "]
+- Other patterns: [any other observed conventions]
+
+Note: If ticket/task codes are used, always ask the user for the specific code rather than inventing one.
+```
\ No newline at end of file
diff --git a/.claude/commands/git/ignore-init.md b/.claude/commands/git/ignore-init.md
new file mode 100644
index 0000000..5eaf142
--- /dev/null
+++ b/.claude/commands/git/ignore-init.md
@@ -0,0 +1,62 @@
+---
+description: Initialize .gitignore with Claude Code specific patterns
+allowed-tools: Read, Edit, Write, Bash(echo:*), Bash(cat:*), Bash(test:*)
+category: workflow
+---
+
+# Initialize .gitignore for Claude Code
+
+Set up or update the project's .gitignore file with Claude Code specific patterns.
+
+## Core Claude Code Files to Ignore
+
+Ensure these Claude Code local configuration files are ignored:
+- `CLAUDE.local.md` - Local AI assistant instructions (root)
+- `.claude/settings.local.json` - Personal Claude Code settings
+- `.mcp.local.json` - Local MCP server configuration (root)
+
+## Development Patterns
+
+These common development artifacts will also be added:
+- `temp/` - Temporary working directory
+- `temp-*/` - Temporary directories with prefix
+- `test-*/` - Test directories with prefix
+- `debug-*.js` - Debug scripts
+- `test-*.js` - Test scripts
+- `*-test.js` - Test files with suffix
+- `*-debug.js` - Debug files with suffix
+
+## Current .gitignore Status
+
+!`[ -f .gitignore ] && echo "EXISTS: .gitignore found" && echo "---CONTENTS---" && cat .gitignore || echo "MISSING: No .gitignore file found"`
+
+## Task
+
+Based on the above status:
+1. Create `.gitignore` if it doesn't exist
+2. Add all patterns that aren't already present
+3. Preserve existing entries and comments
+4. Report what was added
+
+## Patterns to Add
+
+```gitignore
+# Claude Code local files
+CLAUDE.local.md
+.claude/settings.local.json
+.mcp.local.json
+
+# Temporary and debug files
+temp/
+temp-*/
+test-*/
+debug-*.js
+test-*.js
+*-test.js
+*-debug.js
+```
+
+Implement this by:
+1. Using the gitignore status above to determine what's missing
+2. Adding missing patterns with appropriate comments
+3. Preserving the existing file structure and entries
\ No newline at end of file
diff --git a/.claude/commands/git/push.md b/.claude/commands/git/push.md
new file mode 100644
index 0000000..5d6c133
--- /dev/null
+++ b/.claude/commands/git/push.md
@@ -0,0 +1,47 @@
+---
+description: Intelligently push commits to remote with safety checks and insights
+category: workflow
+allowed-tools: Bash(git:*), Task
+---
+
+Push commits to remote repository with appropriate safety checks and branch management.
+
+## Git Expert Integration
+For complex push scenarios (force push requirements, diverged branches, upstream conflicts, protected branch workflows), consider using the Task tool with `git-expert` subagent for specialized git expertise.
+
+## Efficiency Note:
+Be concise. Use single bash calls where possible. Skip verbose explanations and intermediate status messages. Execute the push directly if safe, show only the result.
+
+## Instructions for Claude:
+
+1. Run safety checks in a single bash call:
+!git status --porcelain=v1 && echo "---" && git branch -vv | grep "^\*" && echo "---" && git remote -v | head -2 && echo "---" && git log --oneline @{u}..HEAD 2>/dev/null
+
+Parse output to check:
+- Any uncommitted changes (warn if present)
+- Current branch and tracking info
+- Remote repository URL
+- Commits to be pushed
+
+2. If safe to push (no uncommitted changes), execute push immediately:
+ - For tracked branch: `git push`
+ - For new branch: `git push -u origin [branch-name]`
+ - If behind remote: Stop and suggest `git pull --rebase`
+
+3. Show only the final result:
+ - If successful: Show the push output with ✅ emoji and success message
+ - If failed: Show error and suggest fix
+ - If unsafe: Show what needs to be done first
+
+4. Special cases to handle:
+ - Diverged branches: Suggest rebase or merge strategy
+ - No upstream branch: Use -u flag
+ - Force push needed: Warn strongly, require confirmation
+ - Protected branch: Remind about PR workflow
+
+Example concise output:
+- Skip: "Let me check if it's safe to push"
+- Skip: "I'll analyze your branch status"
+- Skip: "Ready to push X commits"
+- Skip: "Executing push..."
+- Just show the push result directly
\ No newline at end of file
diff --git a/.claude/commands/git/status.md b/.claude/commands/git/status.md
new file mode 100644
index 0000000..9970b9f
--- /dev/null
+++ b/.claude/commands/git/status.md
@@ -0,0 +1,42 @@
+---
+description: Intelligently analyze git status and provide insights about current project state
+category: workflow
+allowed-tools: Bash(git:*), Task
+---
+
+Analyze the current git status and provide an intelligent summary of what's happening in the project.
+
+## Git Expert Integration
+For complex git analysis scenarios (merge conflicts, complex branch states, repository issues), consider using the Task tool with `git-expert` subagent for specialized git expertise.
+
+## Efficiency Note:
+Be concise. Skip verbose explanations of what commands you're running. Focus on the actual status results.
+
+## Instructions for Claude:
+
+1. Run all git commands in a single bash call for speed:
+!git status --porcelain=v1 && echo "---" && git diff --stat 2>/dev/null && echo "---" && git branch -vv | grep "^\*" && echo "---" && git log --oneline -1 && echo "---" && git diff --cached --stat 2>/dev/null
+
+Note: The output will be separated by "---" markers. Parse each section accordingly.
+
+3. Provide results directly without explaining the process:
+ - **Summary**: Brief overview of the current state
+ - **Modified Files**: Group by type (docs, code, tests, config)
+ - **Uncommitted Changes**: What's been changed and why it might matter
+ - **Branch Status**: Relationship to remote branch
+ - **Suggestions**: What actions might be appropriate
+
+Provide insights about:
+- Whether changes appear related or should be separate commits
+- If any critical files are modified (package.json, config files, etc.)
+- Whether the working directory is clean for operations like rebasing
+- Any patterns in the modifications (e.g., all test files, all docs, etc.)
+- If there are stashed changes that might be forgotten
+
+Make the output concise but informative, focusing on what matters most to the developer.
+
+Example of concise output:
+- Skip: "I'll analyze the current git status for you."
+- Skip: "Let me gather the details efficiently:"
+- Skip: "I see there are changes. Let me gather the details:"
+- Just show the results directly
\ No newline at end of file
diff --git a/.claude/commands/research.md b/.claude/commands/research.md
new file mode 100644
index 0000000..62c7f15
--- /dev/null
+++ b/.claude/commands/research.md
@@ -0,0 +1,199 @@
+---
+description: Deep research with parallel subagents and automatic citations
+argument-hint: ""
+allowed-tools: Task, Read, Write, Edit, Grep, Glob
+category: workflow
+model: sonnet
+---
+
+# 🔬 Research Command
+
+Conduct deep, parallel research on any topic using multiple specialized subagents.
+
+## Research Query
+$ARGUMENTS
+
+## Research Process
+
+### Phase 1: Query Classification (CRITICAL FIRST STEP)
+
+**PRIMARY DECISION: Classify the query type to determine research strategy**
+
+#### Query Types:
+
+1. **BREADTH-FIRST QUERIES** (Wide exploration)
+ - Characteristics: Multiple independent aspects, survey questions, comparisons
+ - Examples: "Compare all major cloud providers", "List board members of S&P 500 tech companies"
+ - Strategy: 5-10 parallel subagents, each exploring different aspects
+ - Each subagent gets narrow, specific tasks
+
+2. **DEPTH-FIRST QUERIES** (Deep investigation)
+ - Characteristics: Single topic requiring thorough understanding, technical deep-dives
+ - Examples: "How does transformer architecture work?", "Explain quantum entanglement"
+ - Strategy: 2-4 subagents with overlapping but complementary angles
+ - Each subagent explores the same topic from different perspectives
+
+3. **SIMPLE FACTUAL QUERIES** (Quick lookup)
+ - Characteristics: Single fact, recent event, specific data point
+ - Examples: "When was GPT-4 released?", "Current CEO of Microsoft"
+ - Strategy: 1-2 subagents for verification
+ - Focus on authoritative sources
+
+#### After Classification, Determine:
+- **Resource Allocation**: Based on query type (1-10 subagents)
+- **Search Domains**: Academic, technical, news, or general web
+- **Depth vs Coverage**: How deep vs how wide to search
+
+### Phase 2: Parallel Research Execution
+
+Based on the query classification, spawn appropriate research subagents IN A SINGLE MESSAGE for true parallelization.
+
+**CRITICAL: Parallel Execution Pattern**
+Use multiple Task tool invocations in ONE message, ALL with subagent_type="research-expert".
+
+**MANDATORY: Start Each Task Prompt with Mode Indicator**
+You MUST begin each task prompt with one of these trigger phrases to control subagent behavior:
+
+- **Quick Verification (3-5 searches)**: Start with "Quick check:", "Verify:", or "Confirm:"
+- **Focused Investigation (5-10 searches)**: Start with "Investigate:", "Explore:", or "Find details about:"
+- **Deep Research (10-15 searches)**: Start with "Deep dive:", "Comprehensive:", "Thorough research:", or "Exhaustive:"
+
+Example Task invocations:
+```
+Task(description="Academic research", prompt="Deep dive: Find all academic papers on transformer architectures from 2017-2024", subagent_type="research-expert")
+Task(description="Quick fact check", prompt="Quick check: Verify the release date of GPT-4", subagent_type="research-expert")
+Task(description="Company research", prompt="Investigate: OpenAI's current product offerings and pricing", subagent_type="research-expert")
+```
+
+This ensures all subagents work simultaneously AND understand the expected search depth through these trigger words.
+
+**Filesystem Artifact Pattern**:
+Each subagent saves full report to `/tmp/research_[timestamp]_[topic].md` and returns only:
+- File path to the full report
+- Brief 2-3 sentence summary
+- Key topics covered
+- Number of sources found
+
+### Phase 3: Synthesis from Filesystem Artifacts
+
+**CRITICAL: Subagents Return File References, Not Full Reports**
+
+Each subagent will:
+1. Write their full report to `/tmp/research_*.md`
+2. Return only a summary with the file path
+
+Synthesis Process:
+1. **Collect File References**: Gather all `/tmp/research_*.md` paths from subagent responses
+2. **Read Reports**: Use Read tool to access each research artifact
+3. **Merge Findings**:
+ - Identify common themes across reports
+ - Deduplicate overlapping information
+ - Preserve unique insights from each report
+4. **Consolidate Sources**:
+ - Merge all cited sources
+ - Remove duplicate URLs
+ - Organize by relevance and credibility
+5. **Write Final Report**: Save synthesized report to `/tmp/research_final_[timestamp].md`
+
+### Phase 4: Final Report Structure
+
+The synthesized report (written to file) must include:
+
+# Research Report: [Query Topic]
+
+## Executive Summary
+[3-5 paragraph overview synthesizing all findings]
+
+## Key Findings
+1. **[Major Finding 1]** - Synthesized from multiple subagent reports
+2. **[Major Finding 2]** - Cross-referenced and verified
+3. **[Major Finding 3]** - With supporting evidence from multiple sources
+
+## Detailed Analysis
+
+### [Theme 1 - Merged from Multiple Reports]
+[Comprehensive synthesis integrating all relevant subagent findings]
+
+### [Theme 2 - Merged from Multiple Reports]
+[Comprehensive synthesis integrating all relevant subagent findings]
+
+## Sources & References
+[Consolidated list of all sources from all subagents, organized by type]
+
+## Research Methodology
+- Query Classification: [Breadth/Depth/Simple]
+- Subagents Deployed: [Number and focus areas]
+- Total Sources Analyzed: [Combined count]
+- Research Artifacts: [List of all /tmp/research_*.md files]
+
+## Research Principles
+
+### Quality Heuristics
+- Start with broad searches, then narrow based on findings
+- Prefer authoritative sources (academic papers, official docs, primary sources)
+- Cross-reference claims across multiple sources
+- Identify gaps and contradictions in available information
+
+### Effort Scaling by Query Type
+- **Simple Factual**: 1-2 subagents, 3-5 searches each (verification focus)
+- **Depth-First**: 2-4 subagents, 10-15 searches each (deep understanding)
+- **Breadth-First**: 5-10 subagents, 5-10 searches each (wide coverage)
+- **Maximum Complexity**: 10 subagents (Claude Code limit)
+
+### Parallelization Strategy
+- Spawn all initial subagents simultaneously for speed
+- Each subagent performs multiple parallel searches
+- 90% time reduction compared to sequential searching
+- Independent exploration prevents bias and groupthink
+
+## Execution
+
+**Step 1: CLASSIFY THE QUERY** (Breadth-first, Depth-first, or Simple factual)
+
+**Step 2: LAUNCH APPROPRIATE SUBAGENT CONFIGURATION**
+
+### Example Execution Patterns:
+
+**BREADTH-FIRST Example:** "Compare AI capabilities of Google, OpenAI, and Anthropic"
+- Classification: Breadth-first (multiple independent comparisons)
+- Launch 6 subagents in ONE message with focused investigation mode:
+ - Task 1: "Investigate: Google's current AI products, models, and capabilities"
+ - Task 2: "Investigate: OpenAI's current AI products, models, and capabilities"
+ - Task 3: "Investigate: Anthropic's current AI products, models, and capabilities"
+ - Task 4: "Explore: Performance benchmarks comparing models from all three companies"
+ - Task 5: "Investigate: Business models, pricing, and market positioning for each"
+ - Task 6: "Quick check: Latest announcements and news from each company (2024)"
+
+**DEPTH-FIRST Example:** "How do transformer models achieve attention?"
+- Classification: Depth-first (single topic, deep understanding)
+- Launch 3 subagents in ONE message with deep research mode:
+ - Task 1: "Deep dive: Mathematical foundations and formulas behind attention mechanisms"
+ - Task 2: "Comprehensive: Visual diagrams and step-by-step walkthrough of self-attention"
+ - Task 3: "Thorough research: Seminal papers including 'Attention is All You Need' and subsequent improvements"
+
+**SIMPLE FACTUAL Example:** "When was Claude 3 released?"
+- Classification: Simple factual query
+- Launch 1 subagent with verification mode:
+ - Task 1: "Quick check: Verify the official release date of Claude 3 from Anthropic"
+
+Each subagent works independently, writes findings to `/tmp/research_*.md`, and returns a lightweight summary.
+
+### Step 3: SYNTHESIZE AND DELIVER
+
+After all subagents complete:
+1. Read all research artifact files from `/tmp/research_*.md`
+2. Synthesize findings into comprehensive report
+3. Write final report to `/tmp/research_final_[timestamp].md`
+4. Provide user with:
+ - Executive summary (displayed directly)
+ - Path to full report file
+ - Key insights and recommendations
+
+**Benefits of Filesystem Artifacts**:
+- 90% reduction in token usage (passing paths vs full reports)
+- No information loss during synthesis
+- Preserves formatting and structure
+- Enables selective reading of sections
+- Allows user to access individual subagent reports if needed
+
+Now executing query classification and multi-agent research...
\ No newline at end of file
diff --git a/.claude/commands/spec/create.md b/.claude/commands/spec/create.md
new file mode 100644
index 0000000..bf80000
--- /dev/null
+++ b/.claude/commands/spec/create.md
@@ -0,0 +1,202 @@
+---
+allowed-tools: Read, Write, Grep, Glob, TodoWrite, Task, mcp__context7__resolve-library-id, mcp__context7__get-library-docs, Bash(ls:*), Bash(echo:*), Bash(command:*), Bash(npm:*), Bash(claude:*)
+description: Generate a spec file for a new feature or bugfix
+category: validation
+argument-hint: ""
+---
+
+## Context
+- Existing specs: !`ls -la specs/ 2>/dev/null || echo "No specs directory found"`
+
+## Optional: Enhanced Library Documentation Support
+
+Context7 MCP server provides up-to-date library documentation for better spec creation.
+
+Check if Context7 is available: !`command -v context7-mcp || echo "NOT_INSTALLED"`
+
+If NOT_INSTALLED and the feature involves external libraries, offer to enable Context7:
+```
+████ Optional: Enable Context7 for Enhanced Documentation ████
+
+Context7 provides up-to-date library documentation to improve spec quality.
+This is optional but recommended when working with external libraries.
+
+Would you like me to install Context7 for you? I can:
+ 1. Install globally: npm install -g @upstash/context7-mcp
+ 2. Add to Claude Code: claude mcp add context7 context7-mcp
+
+Or you can install it manually later if you prefer.
+```
+
+If user agrees to installation:
+- Run: `npm install -g @upstash/context7-mcp`
+- Then run: `claude mcp add context7 context7-mcp`
+- Verify installation and proceed with enhanced documentation support
+
+If user declines or wants to continue without it:
+- Proceed with spec creation using existing knowledge
+
+## FIRST PRINCIPLES PROBLEM ANALYSIS
+
+Before defining any solution, validate the problem from first principles:
+
+### Core Problem Investigation
+- **Strip Away Solution Assumptions**: What is the core problem, completely separate from any proposed solution?
+- **Root Cause Analysis**: Why does this problem exist? What created this need?
+- **Goal Decomposition**: What are we fundamentally trying to achieve for users/business?
+- **Success Definition**: What would success look like if we had unlimited resources and no constraints?
+- **Alternative Approaches**: Could we achieve the underlying goal without building anything? Are there simpler approaches?
+
+### Problem Validation Questions
+- **Real vs. Perceived**: Is this solving a real problem that users actually have?
+- **Assumption Audit**: What assumptions about user needs, technical constraints, or business requirements might be wrong?
+- **Value Proposition**: What is the minimum viable solution that delivers core value?
+- **Scope Validation**: Are we solving the right problem, or treating symptoms of a deeper issue?
+
+**CRITICAL: Only proceed if the core problem is clearly defined and validated. If uncertain, request additional context.**
+
+## MANDATORY PRE-CREATION VERIFICATION
+
+After validating the problem from first principles, complete these technical checks:
+
+### 1. Context Discovery Phase
+- Search existing codebase for similar features/specs using AgentTool
+- **Use specialized subagents** when research involves specific domains (TypeScript, React, testing, databases, etc.)
+- Run `claudekit list agents` to see available specialized experts
+- Match research requirements to expert domains for optimal analysis
+- Use general-purpose approach only when no specialized expert fits
+- Identify potential conflicts or duplicates
+- Verify feature request is technically feasible
+- Document any missing prerequisites
+
+### 2. Request Validation
+- Confirm request is well-defined and actionable
+- If vague or incomplete, STOP and ask clarifying questions
+- Validate scope is appropriate (not too broad/narrow)
+
+### 3. Quality Gate
+- Only proceed if you have 80%+ confidence in implementation approach
+- If uncertain, request additional context before continuing
+- Document any assumptions being made
+
+**CRITICAL: If any validation fails, STOP immediately and request clarification.**
+
+## Your task
+
+Create a comprehensive specification document in the `specs/` folder for the following feature/bugfix: $ARGUMENTS
+
+First, analyze the request to understand:
+1. Whether this is a feature or bugfix
+2. The scope and complexity
+3. Related existing code/features
+4. External libraries/frameworks involved
+
+If the feature involves external libraries or frameworks AND Context7 is available:
+- Use `mcp__context7__resolve-library-id` to find the library
+- Use `mcp__context7__get-library-docs` to get up-to-date documentation
+- Reference official patterns and best practices from the docs
+
+## END-TO-END INTEGRATION ANALYSIS
+
+Before writing the detailed specification, map the complete system impact:
+
+### System Integration Mapping
+- **Data Flow Tracing**: Trace data flow from user action → processing → storage → response
+- **Service Dependencies**: Identify all affected services, APIs, databases, and external systems
+- **Integration Points**: Map every place this feature touches existing functionality
+- **Cross-System Impact**: How does this change affect other teams, services, or user workflows?
+
+### Complete User Journey Analysis
+- **Entry Points**: How do users discover and access this feature?
+- **Step-by-Step Flow**: What is the complete sequence from start to finish?
+- **Error Scenarios**: What happens when things go wrong at each step?
+- **Exit Points**: How does this connect to what users do next?
+
+### Deployment and Rollback Considerations
+- **Migration Path**: How do we get from current state to new state?
+- **Rollback Strategy**: What if we need to undo this feature?
+- **Deployment Dependencies**: What must be deployed together vs. independently?
+- **Data Migration**: How do we handle existing data during the transition?
+
+**VERIFICATION: Ensure you can trace the complete end-to-end flow before proceeding to detailed specification.**
+
+Then create a spec document that includes:
+
+1. **Title**: Clear, descriptive title of the feature/bugfix
+2. **Status**: Draft/Under Review/Approved/Implemented
+3. **Authors**: Your name and date
+4. **Overview**: Brief description and purpose
+5. **Background/Problem Statement**: Why this feature is needed or what problem it solves
+6. **Goals**: What we aim to achieve (bullet points)
+7. **Non-Goals**: What is explicitly out of scope (bullet points)
+8. **Technical Dependencies**:
+ - External libraries/frameworks used
+ - Version requirements
+ - Links to relevant documentation
+9. **Detailed Design**:
+ - Architecture changes
+ - Implementation approach
+ - Code structure and file organization
+ - API changes (if any)
+ - Data model changes (if any)
+ - Integration with external libraries (with examples from docs)
+10. **User Experience**: How users will interact with this feature
+11. **Testing Strategy**:
+ - Unit tests
+ - Integration tests
+ - E2E tests (if needed)
+ - Mocking strategies for external dependencies
+ - **Test documentation**: Each test should include a purpose comment explaining why it exists and what it validates
+ - **Meaningful tests**: Avoid tests that always pass regardless of behavior
+ - **Edge case testing**: Include tests that can fail to reveal real issues
+12. **Performance Considerations**: Impact on performance and mitigation strategies
+13. **Security Considerations**: Security implications and safeguards
+14. **Documentation**: What documentation needs to be created/updated
+15. **Implementation Phases**:
+ - Phase 1: MVP/Core functionality
+ - Phase 2: Enhanced features (if applicable)
+ - Phase 3: Polish and optimization (if applicable)
+16. **Open Questions**: Any unresolved questions or decisions
+17. **References**:
+ - Links to related issues, PRs, or documentation
+ - External library documentation links
+ - Relevant design patterns or architectural decisions
+
+Follow these guidelines:
+- Use Markdown format similar to existing specs
+- Be thorough and technical but also accessible
+- Include code examples where helpful (especially from library docs)
+- Consider edge cases and error scenarios
+- Reference existing project patterns and conventions
+- Use diagrams if they would clarify complex flows (using ASCII art or mermaid)
+- When referencing external libraries, include version-specific information
+- Do NOT include time or effort estimations (no "X days", "Y hours", or complexity estimates)
+
+Name the spec file descriptively based on the feature:
+- Features: `feat-{kebab-case-name}.md`
+- Bugfixes: `fix-{issue-number}-{brief-description}.md`
+
+## PROGRESSIVE VALIDATION CHECKPOINTS
+
+After completing each major section:
+
+- **Problem Statement**: Verify it's specific and measurable
+- **Technical Requirements**: Confirm all dependencies are available
+- **Implementation Plan**: Validate approach is technically sound
+- **Testing Strategy**: Ensure testability of all requirements
+
+At each checkpoint, if quality is insufficient, revise before proceeding.
+
+## FINAL SPECIFICATION VALIDATION
+
+Before marking complete:
+1. **Completeness Check**: All 17 sections meaningfully filled
+2. **Consistency Check**: No contradictions between sections
+3. **Implementability Check**: Someone could build this from the spec
+4. **Quality Score**: Rate spec 1-10, only accept 8+
+
+Before writing, use AgentTool to search for:
+- Related existing features or code
+- Similar patterns in the codebase
+- Potential conflicts or dependencies
+- Current library versions in package.json or equivalent
\ No newline at end of file
diff --git a/.claude/commands/spec/decompose.md b/.claude/commands/spec/decompose.md
new file mode 100644
index 0000000..878b8ca
--- /dev/null
+++ b/.claude/commands/spec/decompose.md
@@ -0,0 +1,535 @@
+---
+description: Break down a validated specification into actionable implementation tasks
+category: validation
+allowed-tools: Read, Task, Write, TodoWrite, Bash(mkdir:*), Bash(cat:*), Bash(grep:*), Bash(echo:*), Bash(basename:*), Bash(date:*), Bash(claudekit:status stm), Bash(stm:*)
+argument-hint: ""
+---
+
+# Decompose Specification into Tasks
+
+Decompose the specification at: $ARGUMENTS
+
+## Process Overview
+
+This command takes a validated specification and breaks it down into:
+1. Clear, actionable tasks with dependencies
+2. Implementation phases and milestones
+3. Testing and validation requirements
+4. Documentation needs
+
+!claudekit status stm
+
+## ⚠️ CRITICAL: Content Preservation Requirements
+
+**THIS IS THE MOST IMPORTANT PART**: When creating STM tasks, you MUST copy ALL content from the task breakdown into the STM tasks. Do NOT summarize or reference the spec - include the ACTUAL CODE and details.
+
+## Pre-Flight Checklist
+
+Before creating any STM tasks, confirm your understanding:
+- [ ] I will NOT write summaries like "Create X as specified in spec"
+- [ ] I will COPY all code blocks from the task breakdown into STM --details
+- [ ] I will USE heredocs or temp files for multi-line content
+- [ ] I will INCLUDE complete implementations, not references
+- [ ] Each STM task will be self-contained with ALL details from the breakdown
+
+**If you find yourself typing phrases like "as specified", "from spec", or "see specification" - STOP and copy the actual content instead!**
+
+## Instructions for Claude:
+
+0. **Task Management System**:
+ - Check the STM_STATUS output above
+ - If status is "Available but not initialized", run: `stm init`
+ - If status is "Available and initialized", use STM for task management
+ - If status is "Not installed", fall back to TodoWrite
+
+1. **Read and Validate Specification**:
+ - Read the specified spec file
+ - Verify it's a valid specification (has expected sections)
+ - Extract implementation phases and technical details
+
+2. **Analyze Specification Components**:
+ - Identify major features and components
+ - Extract technical requirements
+ - Note dependencies between components
+ - Identify testing requirements
+ - Document success criteria
+
+3. **Create Task Breakdown**:
+
+ Break down the specification into concrete, actionable tasks.
+
+ Key principles:
+ - Each task should have a single, clear objective
+ - **PRESERVE ALL CONTENT**: Copy implementation details, code blocks, and examples verbatim from the spec
+ - Define clear acceptance criteria with specific test scenarios
+ - Include tests as part of each task
+ - Document dependencies between tasks
+ * Write meaningful tests that can fail to reveal real issues
+ * Follow project principle: "When tests fail, fix the code, not the test"
+ - Create foundation tasks first, then build features on top
+ - Each task should be self-contained with all necessary details
+
+ **CRITICAL REQUIREMENT**: When creating tasks, you MUST preserve:
+ - Complete code examples (including full functions, not just snippets)
+ - All technical requirements and specifications
+ - Detailed implementation steps
+ - Configuration examples
+ - Error handling requirements
+ - All acceptance criteria and test scenarios
+
+ Think of each task as a complete mini-specification that contains everything needed to implement it without referring back to the original spec.
+
+ ## 📋 THE TWO-STEP PROCESS YOU MUST FOLLOW:
+
+ **Step 1**: Create the task breakdown DOCUMENT with all details
+ **Step 2**: Copy those SAME details into STM tasks
+
+ The task breakdown document is NOT just for reference - it's the SOURCE for your STM task content!
+
+ Task structure:
+ - Foundation tasks: Core infrastructure (database, frameworks, testing setup)
+ - Feature tasks: Complete vertical slices including all layers
+ - Testing tasks: Unit, integration, and E2E tests
+ - Documentation tasks: API docs, user guides, code comments
+
+4. **Generate Task Document**:
+
+ Create a comprehensive task breakdown document:
+
+ ```markdown
+ # Task Breakdown: [Specification Name]
+ Generated: [Date]
+ Source: [spec-file]
+
+ ## Overview
+ [Brief summary of what's being built]
+
+ ## Phase 1: Foundation
+
+ ### Task 1.1: [Task Title]
+ **Description**: One-line summary of what needs to be done
+ **Size**: Small/Medium/Large
+ **Priority**: High/Medium/Low
+ **Dependencies**: None
+ **Can run parallel with**: Task 1.2, 1.3
+
+ **Technical Requirements**:
+ - [All technical details from spec]
+ - [Specific library versions]
+ - [Code examples from spec]
+
+ **Implementation Steps**:
+ 1. [Detailed step from spec]
+ 2. [Another step with specifics]
+ 3. [Continue with all steps]
+
+ **Acceptance Criteria**:
+ - [ ] [Specific criteria from spec]
+ - [ ] Tests written and passing
+ - [ ] [Additional criteria]
+
+ ## Phase 2: Core Features
+ [Continue pattern...]
+ ```
+
+ Example task breakdown:
+ ```markdown
+ ### Task 2.3: Implement file system operations with backup support
+ **Description**: Build filesystem.ts module with Unix-focused operations and backup support
+ **Size**: Large
+ **Priority**: High
+ **Dependencies**: Task 1.1 (TypeScript setup), Task 1.2 (Project structure)
+ **Can run parallel with**: Task 2.4 (Config module)
+
+ **Source**: specs/feat-modernize-setup-installer.md
+
+ **Technical Requirements**:
+ - Path validation: Basic checks for reasonable paths
+ - Permission checks: Verify write permissions before operations
+ - Backup creation: Simple backup before overwriting files
+ - Error handling: Graceful failure with helpful messages
+ - Unix path handling: Use path.join, os.homedir(), standard Unix permissions
+
+ **Functions to implement**:
+ - validateProjectPath(input: string): boolean - Basic path validation
+ - ensureDirectoryExists(path: string): Promise
+ - copyFileWithBackup(source: string, target: string, backup: boolean): Promise
+ - setExecutablePermission(filePath: string): Promise - chmod 755
+ - needsUpdate(source: string, target: string): Promise - SHA-256 comparison
+ - getFileHash(filePath: string): Promise - SHA-256 hash generation
+
+ **Implementation example from spec**:
+ ```typescript
+ async function needsUpdate(source: string, target: string): Promise {
+ if (!await fs.pathExists(target)) return true;
+
+ const sourceHash = await getFileHash(source);
+ const targetHash = await getFileHash(target);
+
+ return sourceHash !== targetHash;
+ }
+ ```
+
+ **Acceptance Criteria**:
+ - [ ] All file operations handle Unix paths correctly
+ - [ ] SHA-256 based idempotency checking implemented
+ - [ ] Backup functionality creates timestamped backups
+ - [ ] Executable permissions set correctly for hooks (755)
+ - [ ] Path validation prevents directory traversal
+ - [ ] Tests: All operations work on macOS/Linux with proper error handling
+ ```
+
+5. **Create Task Management Entries**:
+
+ ## 🚨 STOP AND READ: Common Mistake vs Correct Approach
+
+ ❌ **WRONG - What NOT to do**:
+ ```bash
+ stm add "[P1.3] Implement common hook utilities" \
+ --description "Create shared utilities module for all hooks" \
+ --details "Create cli/hooks/utils.ts with readStdin() with 1-second timeout, findProjectRoot() using git rev-parse, detectPackageManager() checking lock files" \
+ --validation "readStdin with timeout. Project root discovery. Package manager detection."
+ ```
+
+ ✅ **CORRECT - What you MUST do**:
+ ```bash
+ # For each task in the breakdown, find the corresponding section and COPY ALL its content
+ # Use temporary files for large content to preserve formatting
+
+ cat > /tmp/task-details.txt << 'EOF'
+ Create cli/hooks/utils.ts with the following implementations:
+
+ ```typescript
+ import { exec } from 'child_process';
+ import { promisify } from 'util';
+ import * as fs from 'fs-extra';
+ import * as path from 'path';
+
+ const execAsync = promisify(exec);
+
+ // Standard input reader
+ export async function readStdin(): Promise {
+ return new Promise((resolve) => {
+ let data = '';
+ process.stdin.on('data', chunk => data += chunk);
+ process.stdin.on('end', () => resolve(data));
+ setTimeout(() => resolve(''), 1000); // Timeout fallback
+ });
+ }
+
+ // Project root discovery
+ export async function findProjectRoot(startDir: string = process.cwd()): Promise {
+ try {
+ const { stdout } = await execAsync('git rev-parse --show-toplevel', { cwd: startDir });
+ return stdout.trim();
+ } catch {
+ return process.cwd();
+ }
+ }
+
+ // [Include ALL other functions from the task breakdown...]
+ ```
+
+ Technical Requirements:
+ - Standard input reader with timeout
+ - Project root discovery using git
+ - Package manager detection (npm/yarn/pnpm)
+ - Command execution wrapper
+ - Error formatting helper
+ - Tool availability checker
+ EOF
+
+ stm add "[P1.3] Implement common hook utilities" \
+ --description "Create shared utilities module for all hooks with stdin reader, project root discovery, package manager detection, command execution wrapper, error formatting, and tool availability checking" \
+ --details "$(cat /tmp/task-details.txt)" \
+ --validation "readStdin with 1-second timeout. Project root discovery via git. Package manager detection for npm/yarn/pnpm. Command execution with timeout and output capture. Error formatting follows BLOCKED: pattern. Tool availability checker works." \
+ --tags "phase1,infrastructure,utilities"
+
+ rm /tmp/task-details.txt
+ ```
+
+ **Remember**: The task breakdown document you created has ALL the implementation details. Your job is to COPY those details into STM, not summarize them!
+
+ ```bash
+ # Example: Creating a task with complete specification details
+
+ # Method 1: Using heredocs for multi-line content
+ stm add "Implement auto-checkpoint hook logic" \
+ --description "Build the complete auto-checkpoint functionality with git integration to create timestamped git stashes on Stop events" \
+ --details "$(cat <<'EOF'
+ Technical Requirements:
+ - Check if current directory is git repository using git status
+ - Detect uncommitted changes using git status --porcelain
+ - Create timestamped stash with configurable prefix from config
+ - Apply stash to restore working directory after creation
+ - Handle exit codes properly (0 for success, 1 for errors)
+
+ Implementation from specification:
+ ```typescript
+ const hookName = process.argv[2];
+ if (hookName !== 'auto-checkpoint') {
+ console.error(`Unknown hook: ${hookName}`);
+ process.exit(1);
+ }
+
+ const hookConfig = config.hooks?.['auto-checkpoint'] || {};
+ const prefix = hookConfig.prefix || 'claude';
+
+ const gitStatus = spawn('git', ['status', '--porcelain'], {
+ stdio: ['ignore', 'pipe', 'pipe']
+ });
+
+ let stdout = '';
+ gitStatus.stdout.on('data', (data) => stdout += data);
+
+ gitStatus.on('close', (code) => {
+ if (code !== 0) {
+ console.log('Not a git repository, skipping checkpoint');
+ process.exit(0);
+ }
+
+ if (!stdout.trim()) {
+ console.log('No changes to checkpoint');
+ process.exit(0);
+ }
+
+ const timestamp = new Date().toISOString().replace(/[:.]/g, '-');
+ const message = `${prefix}-checkpoint-${timestamp}`;
+
+ const stash = spawn('git', ['stash', 'push', '-m', message], {
+ stdio: ['ignore', 'pipe', 'pipe']
+ });
+
+ stash.on('close', (stashCode) => {
+ if (stashCode !== 0) {
+ console.error('Failed to create checkpoint');
+ process.exit(1);
+ }
+
+ spawn('git', ['stash', 'apply'], {
+ stdio: 'ignore'
+ }).on('close', () => {
+ console.log(`✅ Checkpoint created: ${message}`);
+ process.exit(0);
+ });
+ });
+ });
+ ```
+
+ Key implementation notes:
+ - Use child_process.spawn for git commands
+ - Capture stdout to check for changes
+ - Generate ISO timestamp and sanitize for git message
+ - Chain git stash push and apply operations
+ EOF
+ )" \
+ --validation "$(cat <<'EOF'
+ - [ ] Correctly identifies git repositories
+ - [ ] Detects uncommitted changes using git status --porcelain
+ - [ ] Creates checkpoint with format: ${prefix}-checkpoint-${timestamp}
+ - [ ] Restores working directory after stash
+ - [ ] Exits with code 0 on success, 1 on error
+ - [ ] Respects configured prefix from .claudekit/config.json
+ - [ ] Handles missing config file gracefully
+
+ Test scenarios:
+ 1. Run in non-git directory - should exit 0
+ 2. Run with no changes - should exit 0
+ 3. Run with changes - should create checkpoint
+ 4. Run with custom config - should use custom prefix
+ EOF
+ )" \
+ --tags "phase2,core,high-priority,large" \
+ --status pending \
+ --deps "35,36"
+
+ # Method 2: Using temporary files for very large content
+ cat > /tmp/stm-details.txt << 'EOF'
+ [Full technical requirements and implementation details from spec...]
+ EOF
+
+ cat > /tmp/stm-validation.txt << 'EOF'
+ [Complete acceptance criteria and test scenarios...]
+ EOF
+
+ stm add "Task title" \
+ --description "Brief what and why" \
+ --details "$(cat /tmp/stm-details.txt)" \
+ --validation "$(cat /tmp/stm-validation.txt)" \
+ --tags "appropriate,tags" \
+ --status pending
+
+ rm /tmp/stm-details.txt /tmp/stm-validation.txt
+ ```
+
+ **Important STM field usage**:
+ - `--description`: Brief what & why (1-2 sentences max)
+ - `--details`: Complete technical implementation including:
+ - All technical requirements from spec
+ - Full code examples with proper formatting (COPY from breakdown, don't summarize!)
+ - Implementation steps and notes
+ - Architecture decisions
+ - **MUST be self-contained** - someone should be able to implement the task without seeing the original spec
+ - `--validation`: Complete acceptance criteria including:
+ - All test scenarios
+ - Success/failure conditions
+ - Edge cases to verify
+
+ ## Content Size Guidelines
+
+ - **Small tasks (< 20 lines)**: Can use heredocs directly in command
+ - **Medium tasks (20-200 lines)**: Use temporary files to preserve formatting
+ - **Large tasks (> 200 lines)**: Always use temporary files
+ - **Tasks with code blocks**: MUST use heredocs or files (never inline)
+
+ Example for medium/large content:
+ ```bash
+ # Extract the full implementation from your task breakdown
+ cat > /tmp/stm-task-details.txt << 'EOF'
+ [PASTE THE ENTIRE "Technical Requirements" and "Implementation" sections from the task breakdown]
+ [Include ALL code blocks with proper formatting]
+ [Include ALL technical notes and comments]
+ EOF
+
+ cat > /tmp/stm-task-validation.txt << 'EOF'
+ [PASTE THE ENTIRE "Acceptance Criteria" section]
+ [Include ALL test scenarios]
+ EOF
+
+ stm add "[Task Title]" \
+ --description "[One line summary]" \
+ --details "$(cat /tmp/stm-task-details.txt)" \
+ --validation "$(cat /tmp/stm-task-validation.txt)" \
+ --tags "appropriate,tags" \
+ --deps "1,2,3"
+
+ rm /tmp/stm-task-*.txt
+ ```
+
+ If STM is not available, use TodoWrite:
+ ```javascript
+ [
+ {
+ id: "1",
+ content: "Phase 1: Set up TypeScript project structure",
+ status: "pending",
+ priority: "high"
+ },
+ {
+ id: "2",
+ content: "Phase 1: Configure build system with esbuild",
+ status: "pending",
+ priority: "high"
+ },
+ // ... additional tasks
+ ]
+ ```
+
+6. **Save Task Breakdown**:
+ - Save the detailed task breakdown document to `specs/[spec-name]-tasks.md`
+ - Create tasks in STM or TodoWrite for immediate tracking
+ - Generate a summary report showing:
+ - Total number of tasks
+ - Breakdown by phase
+ - Parallel execution opportunities
+ - Task management system used (STM or TodoWrite)
+
+## Output Format
+
+### Task Breakdown Document
+The generated markdown file includes:
+- Executive summary
+- Phase-by-phase task breakdown
+- Dependency graph
+- Risk assessment
+- Execution strategy
+
+### Task Management Integration
+Tasks are immediately available in STM (if installed) or TodoWrite for:
+- Progress tracking
+- Status updates
+- Blocking issue identification
+- Parallel work coordination
+- Dependency tracking (STM only)
+- Persistent storage across sessions (STM only)
+
+### Summary Report
+Displays:
+- Total tasks created
+- Tasks per phase
+- Critical path identification
+- Recommended execution order
+
+## Usage Examples
+
+```bash
+# Decompose a feature specification
+/spec:decompose specs/feat-user-authentication.md
+
+# Decompose a system enhancement spec
+/spec:decompose specs/feat-api-rate-limiting.md
+```
+
+## Success Criteria
+
+The decomposition is complete when:
+- ✅ Task breakdown document is saved to specs directory
+- ✅ All tasks are created in STM (if available) or TodoWrite for tracking
+- ✅ **Tasks preserve ALL implementation details from the spec including:**
+ - Complete code blocks and examples (not summarized)
+ - Full technical requirements and specifications
+ - Detailed step-by-step implementation instructions
+ - All configuration examples
+ - Complete acceptance criteria with test scenarios
+- ✅ Foundation tasks are identified and prioritized
+- ✅ Dependencies between tasks are clearly documented
+- ✅ All tasks include testing requirements
+- ✅ Parallel execution opportunities are identified
+- ✅ **STM tasks use all three fields properly:**
+ - `--description`: Brief what & why (1-2 sentences)
+ - `--details`: Complete technical implementation from spec (ACTUAL CODE, not references)
+ - `--validation`: Full acceptance criteria and test scenarios
+- ✅ **Quality check passed**: Running `stm show [any-task-id]` displays full code implementations
+- ✅ **No summary phrases**: Tasks don't contain "as specified", "from spec", or similar references
+
+## Post-Creation Validation
+
+After creating STM tasks, perform these checks:
+
+1. **Sample Task Review**:
+ ```bash
+ # Pick a random task and check it has full implementation
+ stm show [task-id] | grep -E "(as specified|from spec|see specification)"
+ # Should return NO matches - if it does, the task is incomplete
+ ```
+
+2. **Content Length Check**:
+ ```bash
+ # Implementation tasks should have substantial details
+ stm list --format json | jq '.[] | select(.details | length < 500) | {id, title}'
+ # Review any tasks with very short details - they likely need more content
+ ```
+
+3. **Code Block Verification**:
+ ```bash
+ # Check that tasks contain actual code blocks
+ stm grep "```" | wc -l
+ # Should show many matches for tasks with code implementations
+ ```
+
+## Integration with Other Commands
+
+- **Prerequisites**: Run `/spec:validate` first to ensure spec quality
+- **Next step**: Use `/spec:execute` to implement the decomposed tasks
+- **Progress tracking**:
+ - With STM: `stm list --pretty` or `stm list --status pending`
+ - With TodoWrite: Monitor task completion in session
+- **Quality checks**: Run `/validate-and-fix` after implementation
+
+## Best Practices
+
+1. **Task Granularity**: Keep tasks focused on single objectives
+2. **Dependencies**: Clearly identify blocking vs parallel work
+3. **Testing**: Include test tasks for each component
+4. **Documentation**: Add documentation tasks alongside implementation
+5. **Phases**: Group related tasks into logical phases
\ No newline at end of file
diff --git a/.claude/commands/spec/execute.md b/.claude/commands/spec/execute.md
new file mode 100644
index 0000000..1e7620c
--- /dev/null
+++ b/.claude/commands/spec/execute.md
@@ -0,0 +1,174 @@
+---
+description: Implement a validated specification by orchestrating concurrent agents
+category: validation
+allowed-tools: Task, Read, TodoWrite, Grep, Glob, Bash(claudekit:status stm), Bash(stm:*), Bash(jq:*)
+argument-hint: ""
+---
+
+# Implement Specification
+
+Implement the specification at: $ARGUMENTS
+
+!claudekit status stm
+
+## Pre-Execution Checks
+
+1. **Check Task Management**:
+ - If STM shows "Available but not initialized" → Run `stm init` first, then `/spec:decompose` to create tasks
+ - If STM shows "Available and initialized" → Use STM for tasks
+ - If STM shows "Not installed" → Use TodoWrite instead
+
+2. **Verify Specification**:
+ - Confirm spec file exists and is complete
+ - Check that required tools are available
+ - Stop if anything is missing or unclear
+
+## Implementation Process
+
+### 1. Analyze Specification
+
+Read the specification to understand:
+- What components need to be built
+- Dependencies between components
+- Testing requirements
+- Success criteria
+
+### 2. Load or Create Tasks
+
+**Using STM** (if available):
+```bash
+stm list --status pending -f json
+```
+
+**Using TodoWrite** (fallback):
+Create tasks for each component in the specification
+
+### 3. Implementation Workflow
+
+For each task, follow this cycle:
+
+**Available Agents:**
+!`claudekit list agents`
+
+#### Step 1: Implement
+
+Launch appropriate specialist agent:
+
+```
+Task tool:
+- description: "Implement [component name]"
+- subagent_type: [choose specialist that matches the task]
+- prompt: |
+ First run: stm show [task-id]
+ This will give you the full task details and requirements.
+
+ Then implement the component based on those requirements.
+ Follow project code style and add error handling.
+ Report back when complete.
+```
+
+#### Step 2: Write Tests
+
+Launch testing expert:
+
+```
+Task tool:
+- description: "Write tests for [component]"
+- subagent_type: testing-expert [or jest/vitest-testing-expert]
+- prompt: |
+ First run: stm show [task-id]
+
+ Write comprehensive tests for the implemented component.
+ Cover edge cases and aim for >80% coverage.
+ Report back when complete.
+```
+
+Then run tests to verify they pass.
+
+#### Step 3: Code Review (Required)
+
+**Important:** Always run code review to verify both quality AND completeness. Task cannot be marked done without passing both.
+
+Launch code review expert:
+
+```
+Task tool:
+- description: "Review [component]"
+- subagent_type: code-review-expert
+- prompt: |
+ First run: stm show [task-id]
+
+ Review implementation for BOTH:
+ 1. COMPLETENESS - Are all requirements from the task fully implemented?
+ 2. QUALITY - Code quality, security, error handling, test coverage
+
+ Categorize any issues as: CRITICAL, IMPORTANT, or MINOR.
+ Report if implementation is COMPLETE or INCOMPLETE.
+ Report back with findings.
+```
+
+#### Step 4: Fix Issues & Complete Implementation
+
+If code review found the implementation INCOMPLETE or has CRITICAL issues:
+
+1. Launch specialist to complete/fix:
+ ```
+ Task tool:
+ - description: "Complete/fix [component]"
+ - subagent_type: [specialist matching the task]
+ - prompt: |
+ First run: stm show [task-id]
+
+ Address these items from code review:
+ - Missing requirements: [list any incomplete items]
+ - Critical issues: [list any critical issues]
+
+ Update tests if needed.
+ Report back when complete.
+ ```
+
+2. Re-run tests to verify fixes
+
+3. Re-review to confirm both COMPLETE and quality standards met
+
+4. Only when implementation is COMPLETE and all critical issues fixed:
+ - If using STM: `stm update [task-id] --status done`
+ - If using TodoWrite: Mark task as completed
+
+#### Step 5: Commit Changes
+
+Create atomic commit following project conventions:
+```bash
+git add [files]
+git commit -m "[follow project's commit convention]"
+```
+
+### 4. Track Progress
+
+Monitor implementation progress:
+
+**Using STM:**
+```bash
+stm list --pretty # View all tasks
+stm list --status pending # Pending tasks
+stm list --status in-progress # Active tasks
+stm list --status done # Completed tasks
+```
+
+**Using TodoWrite:**
+Track tasks in the session with status indicators.
+
+### 5. Complete Implementation
+
+Implementation is complete when:
+- All tasks are COMPLETE (all requirements implemented)
+- All tasks pass quality review (no critical issues)
+- All tests passing
+- Documentation updated
+
+## If Issues Arise
+
+If any agent encounters problems:
+1. Identify the specific issue
+2. Launch appropriate specialist to resolve
+3. Or request user assistance if blocked
\ No newline at end of file
diff --git a/.claude/commands/spec/validate.md b/.claude/commands/spec/validate.md
new file mode 100644
index 0000000..727a251
--- /dev/null
+++ b/.claude/commands/spec/validate.md
@@ -0,0 +1,187 @@
+---
+allowed-tools: Task, Read, Grep
+description: Analyzes a specification document to determine if it has enough detail for autonomous implementation
+category: validation
+argument-hint: ""
+---
+
+# Specification Completeness Check
+
+Analyze the specification at: $ARGUMENTS
+
+## Analysis Framework
+
+This command will analyze the provided specification document to determine if it contains sufficient detail for successful autonomous implementation, while also identifying overengineering and non-essential complexity that should be removed or deferred.
+
+### Domain Expert Consultation
+
+When analyzing specifications that involve specific technical domains:
+- **Use specialized subagents** when analysis involves specific domains (TypeScript, React, testing, databases, etc.)
+- Run `claudekit list agents` to see available specialized experts
+- Match specification domains to expert knowledge for thorough validation
+- Use general-purpose approach only when no specialized expert fits
+
+### What This Check Evaluates:
+
+The analysis evaluates three fundamental aspects, each with specific criteria:
+
+#### 1. **WHY - Intent and Purpose**
+- Background/Problem Statement clarity
+- Goals and Non-Goals definition
+- User value/benefit explanation
+- Justification vs alternatives
+- Success criteria
+
+#### 2. **WHAT - Scope and Requirements**
+- Features and functionality definition
+- Expected deliverables
+- API contracts and interfaces
+- Data models and structures
+- Integration requirements:
+ - External system interactions?
+ - Authentication mechanisms?
+ - Communication protocols?
+- Performance requirements
+- Security requirements
+
+#### 3. **HOW - Implementation Details**
+- Architecture and design patterns
+- Implementation phases/roadmap
+- Technical approach:
+ - Core logic and algorithms
+ - All functions and methods fully specified?
+ - Execution flow clearly defined?
+- Error handling:
+ - All failure modes identified?
+ - Recovery behavior specified?
+ - Edge cases documented?
+- Platform considerations:
+ - Cross-platform compatibility?
+ - Platform-specific implementations?
+ - Required dependencies per platform?
+- Resource management:
+ - Performance constraints defined?
+ - Resource limits specified?
+ - Cleanup procedures documented?
+- Testing strategy:
+ - Test purpose documentation (each test explains why it exists)
+ - Meaningful tests that can fail to reveal real issues
+ - Edge case coverage and failure scenarios
+ - Follows project testing philosophy: "When tests fail, fix the code, not the test"
+- Deployment considerations
+
+### Additional Quality Checks:
+
+**Completeness Assessment**
+- Missing critical sections
+- Unresolved decisions
+- Open questions
+
+**Clarity Assessment**
+- Ambiguous statements
+- Assumed knowledge
+- Inconsistencies
+
+**Overengineering Assessment**
+- Features not aligned with core user needs
+- Premature optimizations
+- Unnecessary complexity patterns
+
+### Overengineering Detection:
+
+**Core Value Alignment Analysis**
+Evaluate whether features directly serve the core user need:
+- Does this feature solve a real, immediate problem?
+- Is it being used frequently enough to justify complexity?
+- Would a simpler solution work for 80% of use cases?
+
+**YAGNI Principle (You Aren't Gonna Need It)**
+Be aggressive about cutting features:
+- If unsure whether it's needed → Cut it
+- If it's for "future flexibility" → Cut it
+- If only 20% of users need it → Cut it
+- If it adds any complexity → Question it, probably cut it
+
+**Common Overengineering Patterns to Detect:**
+
+1. **Premature Optimization**
+ - Caching for rarely accessed data
+ - Performance optimizations without benchmarks
+ - Complex algorithms for small datasets
+ - Micro-optimizations before profiling
+
+2. **Feature Creep**
+ - "Nice to have" features (cut them)
+ - Edge case handling for unlikely scenarios (cut them)
+ - Multiple ways to do the same thing (keep only one)
+ - Features that "might be useful someday" (definitely cut)
+
+3. **Over-abstraction**
+ - Generic solutions for specific problems
+ - Too many configuration options
+ - Unnecessary plugin/extension systems
+ - Abstract classes with single implementations
+
+4. **Infrastructure Overhead**
+ - Complex build pipelines for simple tools
+ - Multiple deployment environments for internal tools
+ - Extensive monitoring for non-critical features
+ - Database clustering for low-traffic applications
+
+5. **Testing Extremism**
+ - 100% coverage requirements
+ - Testing implementation details
+ - Mocking everything
+ - Edge case tests for prototype features
+
+**Simplification Recommendations:**
+- Identify features to cut from the spec entirely
+- Suggest simpler alternatives
+- Highlight unnecessary complexity
+- Recommend aggressive scope reduction to core essentials
+
+### Output Format:
+
+The analysis will provide:
+- **Summary**: Overall readiness assessment (Ready/Not Ready)
+- **Critical Gaps**: Must-fix issues blocking implementation
+- **Missing Details**: Specific areas needing clarification
+- **Risk Areas**: Potential implementation challenges
+- **Overengineering Analysis**:
+ - Non-core features that should be removed entirely
+ - Complexity that doesn't align with usage patterns
+ - Suggested simplifications or complete removal
+- **Features to Cut**: Specific items to remove from the spec
+- **Essential Scope**: Absolute minimum needed to solve the core problem
+- **Recommendations**: Next steps to improve the spec
+
+### Example Overengineering Detection:
+
+When analyzing a specification, the validator might identify patterns like:
+
+**Example 1: Unnecessary Caching**
+- Spec includes: "Cache user preferences with Redis"
+- Analysis: User preferences accessed once per session
+- Recommendation: Use in-memory storage or browser localStorage for MVP
+
+**Example 2: Premature Edge Cases**
+- Spec includes: "Handle 10,000+ concurrent connections"
+- Analysis: Expected usage is <100 concurrent users
+- Recommendation: Cut this entirely - let it fail at scale if needed
+
+**Example 3: Over-abstracted Architecture**
+- Spec includes: "Plugin system for custom validators"
+- Analysis: Only 3 validators needed, all known upfront
+- Recommendation: Implement validators directly, no plugin system needed
+
+**Example 4: Excessive Testing Requirements**
+- Spec includes: "100% code coverage with mutation testing"
+- Analysis: Tool used occasionally, not mission-critical
+- Recommendation: Focus on core functionality tests (70% coverage)
+
+**Example 5: Feature Creep**
+- Spec includes: "Support 5 export formats (JSON, CSV, XML, YAML, TOML)"
+- Analysis: 95% of users only need JSON
+- Recommendation: Cut all formats except JSON - YAGNI (You Aren't Gonna Need It)
+
+This comprehensive analysis helps ensure specifications are implementation-ready while keeping scope focused on core user needs, reducing both ambiguity and unnecessary complexity.
\ No newline at end of file
diff --git a/.claude/commands/validate-and-fix.md b/.claude/commands/validate-and-fix.md
new file mode 100644
index 0000000..8f1d891
--- /dev/null
+++ b/.claude/commands/validate-and-fix.md
@@ -0,0 +1,110 @@
+---
+description: Run quality checks and automatically fix issues using concurrent agents
+category: workflow
+allowed-tools: Bash, Task, TodoWrite, Read, Edit, MultiEdit
+---
+
+# Validate and Fix
+
+Run quality checks and automatically fix discovered issues using parallel execution.
+
+## Process
+
+### 1. SYSTEMATIC PRIORITY-BASED ANALYSIS
+
+#### Command Discovery
+First, discover what validation commands are available:
+1. Check AGENTS.md/CLAUDE.md for documented build/test/lint commands
+2. Examine package.json scripts section for available commands
+3. Look for common patterns in scripts:
+ - Linting: "lint", "eslint", "lint:fix", "check:lint", "lint:js"
+ - Type checking: "typecheck", "type-check", "tsc", "check:types", "types"
+ - Testing: "test", "test:unit", "jest", "check:test", "test:all"
+ - Formatting: "format", "prettier", "fmt", "format:fix"
+ - Build: "build", "compile", "build:prod"
+4. Check README.md for any additional validation instructions
+
+#### Discovery with Immediate Categorization
+Run all discovered quality checks in parallel using Bash. Capture full output including file paths, line numbers, and error messages:
+- Linting (ESLint, Prettier, Ruff, etc.)
+- Type checking (TypeScript, mypy, etc.)
+- Tests (Jest, pytest, go test, etc.)
+- Build verification
+- Custom project checks
+
+Immediately categorize findings by:
+- **CRITICAL**: Security issues, breaking changes, data loss risk
+- **HIGH**: Functionality bugs, test failures, build breaks
+- **MEDIUM**: Code quality, style violations, documentation gaps
+- **LOW**: Formatting, minor optimizations
+
+#### Risk Assessment Before Action
+- Identify "quick wins" vs. complex fixes
+- Map dependencies between issues (fix A before B)
+- Flag issues that require manual intervention
+
+### 2. STRATEGIC FIX EXECUTION
+
+#### Phase 1 - Safe Quick Wins
+- Start with LOW and MEDIUM priority fixes that can't break anything
+- Verify each fix immediately before proceeding
+
+#### Phase 2 - Functionality Fixes
+- Address HIGH priority issues one at a time
+- Run tests after each fix to ensure no regressions
+
+#### Phase 3 - Critical Issues
+- Handle CRITICAL issues with explicit user confirmation
+- Provide detailed plan before executing
+
+#### Phase 4 - Verification
+- Re-run ALL checks to confirm fixes were successful
+- Provide summary of what was fixed vs. what remains
+
+### 3. COMPREHENSIVE ERROR HANDLING
+
+#### Rollback Capability
+- Create git stash checkpoint before ANY changes
+- Provide instant rollback procedure if fixes cause issues
+
+#### Partial Success Handling
+- Continue execution even if some fixes fail
+- Clearly separate successful fixes from failures
+- Provide manual fix instructions for unfixable issues
+
+#### Quality Validation
+- Accept 100% success in each phase before proceeding
+- If phase fails, diagnose and provide specific next steps
+
+#### Task Distribution
+Create detailed task plans where each agent gets:
+- A specific, focused objective (e.g., "Fix all TypeScript errors in src/components/")
+- Exact file paths and line numbers to modify
+- Clear success criteria (e.g., "Ensure the project's type checking command passes for these files")
+- Any relevant context about dependencies or patterns to follow
+
+### 4. Parallel Execution
+Launch multiple agents concurrently for independent, parallelizable tasks:
+- **CRITICAL**: Include multiple Task tool calls in a SINGLE message ONLY when tasks can be done in parallel
+- Tasks that depend on each other should be executed sequentially (separate messages)
+- Parallelizable tasks: Different file fixes, independent test suites, non-overlapping components
+- Sequential tasks: Tasks with dependencies, shared state modifications, ordered phases
+- **Use specialized subagents** when tasks match expert domains (TypeScript, React, testing, databases, etc.)
+- Run `claudekit list agents` to see available specialized experts
+- Match task requirements to expert domains for optimal results
+- Use general-purpose approach only when no specialized expert fits
+- Each parallel agent should have non-overlapping responsibilities to avoid conflicts
+- Agents working on related files must understand the shared interfaces
+- Each agent verifies their fixes work before completing
+- Track progress with TodoWrite
+- Execute phases sequentially: complete Phase 1 before Phase 2, etc.
+- Create checkpoint after each successful phase
+
+### 5. Final Verification
+After all agents complete:
+- Re-run all checks to confirm 100% of fixable issues are resolved
+- Confirm no new issues were introduced by fixes
+- Report any remaining manual fixes needed with specific instructions
+- Provide summary: "Fixed X/Y issues, Z require manual intervention"
+
+This approach maximizes efficiency through parallel discovery and fixing while ensuring coordinated, conflict-free changes.
\ No newline at end of file
diff --git a/.claude/settings.json b/.claude/settings.json
new file mode 100644
index 0000000..a16a78b
--- /dev/null
+++ b/.claude/settings.json
@@ -0,0 +1,119 @@
+{
+ "hooks": {
+ "PreToolUse": [
+ {
+ "matcher": "Read|Edit|MultiEdit|Write|Bash",
+ "hooks": [
+ {
+ "type": "command",
+ "command": "claudekit-hooks run file-guard"
+ }
+ ]
+ }
+ ],
+ "PostToolUse": [
+ {
+ "matcher": "Write|Edit|MultiEdit",
+ "hooks": [
+ {
+ "type": "command",
+ "command": "claudekit-hooks run lint-changed"
+ }
+ ]
+ },
+ {
+ "matcher": "Write|Edit|MultiEdit",
+ "hooks": [
+ {
+ "type": "command",
+ "command": "claudekit-hooks run typecheck-changed"
+ }
+ ]
+ },
+ {
+ "matcher": "Write|Edit|MultiEdit",
+ "hooks": [
+ {
+ "type": "command",
+ "command": "claudekit-hooks run check-any-changed"
+ }
+ ]
+ },
+ {
+ "matcher": "Write|Edit|MultiEdit",
+ "hooks": [
+ {
+ "type": "command",
+ "command": "claudekit-hooks run test-changed"
+ }
+ ]
+ },
+ {
+ "matcher": "Edit|MultiEdit",
+ "hooks": [
+ {
+ "type": "command",
+ "command": "claudekit-hooks run check-comment-replacement"
+ }
+ ]
+ },
+ {
+ "matcher": "Edit|MultiEdit",
+ "hooks": [
+ {
+ "type": "command",
+ "command": "claudekit-hooks run check-unused-parameters"
+ }
+ ]
+ }
+ ],
+ "Stop": [
+ {
+ "matcher": "*",
+ "hooks": [
+ {
+ "type": "command",
+ "command": "claudekit-hooks run typecheck-project"
+ },
+ {
+ "type": "command",
+ "command": "claudekit-hooks run lint-project"
+ },
+ {
+ "type": "command",
+ "command": "claudekit-hooks run test-project"
+ },
+ {
+ "type": "command",
+ "command": "claudekit-hooks run check-todos"
+ },
+ {
+ "type": "command",
+ "command": "claudekit-hooks run self-review"
+ },
+ {
+ "type": "command",
+ "command": "claudekit-hooks run create-checkpoint"
+ }
+ ]
+ }
+ ],
+ "SubagentStop": [],
+ "SessionStart": [],
+ "UserPromptSubmit": [
+ {
+ "matcher": "*",
+ "hooks": [
+ {
+ "type": "command",
+ "command": "claudekit-hooks run codebase-map"
+ },
+ {
+ "type": "command",
+ "command": "claudekit-hooks run thinking-level"
+ }
+ ]
+ }
+ ]
+ }
+}
\ No newline at end of file