Initial commit
This commit is contained in:
155
skills/cli-testing-patterns/SKILL.md
Normal file
155
skills/cli-testing-patterns/SKILL.md
Normal file
@@ -0,0 +1,155 @@
|
||||
---
|
||||
name: cli-testing-patterns
|
||||
description: CLI testing strategies and patterns for Node.js (Jest) and Python (pytest, Click.testing.CliRunner). Use when writing tests for CLI tools, testing command execution, validating exit codes, testing output, implementing CLI test suites, or when user mentions CLI testing, Jest CLI tests, pytest CLI, Click.testing.CliRunner, command testing, or exit code validation.
|
||||
allowed-tools: Read, Write, Bash
|
||||
---
|
||||
|
||||
# CLI Testing Patterns
|
||||
|
||||
Comprehensive testing strategies for CLI applications using industry-standard testing frameworks. Covers command execution testing, exit code validation, output verification, interactive prompt testing, and integration testing patterns.
|
||||
|
||||
## Instructions
|
||||
|
||||
### When Testing Node.js CLI Tools
|
||||
|
||||
1. **Use Jest for testing CLI commands**
|
||||
- Import `child_process.execSync` for command execution
|
||||
- Create helper function to run CLI and capture output
|
||||
- Test exit codes, stdout, stderr separately
|
||||
- Handle both success and error cases
|
||||
|
||||
2. **Test Structure**
|
||||
- Set up CLI path relative to test location
|
||||
- Create `runCLI()` helper that returns `{stdout, stderr, code}`
|
||||
- Use try-catch to handle non-zero exit codes
|
||||
- Test common scenarios: version, help, unknown commands
|
||||
|
||||
3. **What to Test**
|
||||
- Command execution with various argument combinations
|
||||
- Exit code validation (0 for success, non-zero for errors)
|
||||
- Output content (stdout) validation
|
||||
- Error messages (stderr) validation
|
||||
- Configuration file handling
|
||||
- Interactive prompts (with mocked input)
|
||||
|
||||
### When Testing Python CLI Tools
|
||||
|
||||
1. **Use pytest with Click.testing.CliRunner**
|
||||
- Import `CliRunner` from `click.testing`
|
||||
- Create runner fixture for reusable test setup
|
||||
- Invoke commands with `runner.invoke(cli, ['args'])`
|
||||
- Check `result.exit_code` and `result.output`
|
||||
|
||||
2. **Test Structure**
|
||||
- Create pytest fixture for CliRunner instance
|
||||
- Use `runner.invoke()` to execute CLI commands
|
||||
- Access results through `result` object
|
||||
- Simulate interactive input with `input='responses\n'`
|
||||
|
||||
3. **What to Test**
|
||||
- Command invocation with various arguments
|
||||
- Exit code validation
|
||||
- Output content verification
|
||||
- Error handling and messages
|
||||
- Interactive prompt responses
|
||||
- Configuration handling
|
||||
|
||||
### Exit Code Testing Patterns
|
||||
|
||||
**Standard Exit Codes:**
|
||||
- `0` - Success
|
||||
- `1` - General error
|
||||
- `2` - Misuse of command (invalid arguments)
|
||||
- `126` - Command cannot execute
|
||||
- `127` - Command not found
|
||||
- `128+N` - Fatal error signal N
|
||||
|
||||
**Testing Strategy:**
|
||||
- Always test both success (0) and failure (non-zero) cases
|
||||
- Verify specific exit codes for different error conditions
|
||||
- Test argument validation returns appropriate codes
|
||||
- Ensure help/version return 0 (success)
|
||||
|
||||
### Output Validation Patterns
|
||||
|
||||
**Content Testing:**
|
||||
- Check for presence of key text in output
|
||||
- Validate format (JSON, YAML, tables)
|
||||
- Test color/formatting codes (if applicable)
|
||||
- Verify error messages are user-friendly
|
||||
|
||||
**Best Practices:**
|
||||
- Use `.toContain()` for flexible matching (Jest)
|
||||
- Use `in result.output` for Python tests
|
||||
- Test both positive and negative cases
|
||||
- Validate complete workflows (multi-command)
|
||||
|
||||
## Templates
|
||||
|
||||
Use these templates for CLI testing:
|
||||
|
||||
### Node.js/Jest Templates
|
||||
- `templates/jest-cli-test.ts` - Complete Jest test suite with execSync
|
||||
- `templates/jest-config-test.ts` - Configuration file testing
|
||||
- `templates/jest-integration-test.ts` - Multi-command integration tests
|
||||
|
||||
### Python/Pytest Templates
|
||||
- `templates/pytest-click-test.py` - Click.testing.CliRunner tests
|
||||
- `templates/pytest-fixtures.py` - Reusable pytest fixtures
|
||||
- `templates/pytest-integration-test.py` - Integration test patterns
|
||||
|
||||
### Test Utilities
|
||||
- `templates/test-helpers.ts` - Node.js test helper functions
|
||||
- `templates/test-helpers.py` - Python test helper functions
|
||||
|
||||
## Scripts
|
||||
|
||||
Use these scripts for test setup and execution:
|
||||
|
||||
- `scripts/setup-jest-testing.sh` - Install Jest and configure for CLI testing
|
||||
- `scripts/setup-pytest-testing.sh` - Install pytest and Click testing dependencies
|
||||
- `scripts/run-cli-tests.sh` - Execute all CLI tests with coverage
|
||||
- `scripts/validate-test-coverage.sh` - Check test coverage thresholds
|
||||
|
||||
## Examples
|
||||
|
||||
See complete examples in the `examples/` directory:
|
||||
|
||||
- `examples/jest-basic/` - Basic Jest CLI testing setup
|
||||
- `examples/jest-advanced/` - Advanced Jest patterns with mocking
|
||||
- `examples/pytest-click/` - Click.testing.CliRunner examples
|
||||
- `examples/integration-testing/` - Full integration test suites
|
||||
- `examples/exit-code-testing/` - Exit code validation patterns
|
||||
|
||||
## Requirements
|
||||
|
||||
**Node.js Testing:**
|
||||
- Jest 29.x or later
|
||||
- TypeScript support (ts-jest)
|
||||
- Node.js 16+
|
||||
|
||||
**Python Testing:**
|
||||
- pytest 7.x or later
|
||||
- Click 8.x or later
|
||||
- Python 3.8+
|
||||
|
||||
**Both:**
|
||||
- Test coverage reporting tools
|
||||
- CI/CD integration support
|
||||
- Mock/stub capabilities for external dependencies
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Test in Isolation** - Each test should be independent
|
||||
2. **Mock External Dependencies** - Don't make real API calls or file system changes
|
||||
3. **Test Error Paths** - Test failures as thoroughly as successes
|
||||
4. **Use Fixtures** - Share setup code across tests
|
||||
5. **Clear Test Names** - Name tests to describe what they validate
|
||||
6. **Fast Execution** - Keep tests fast for rapid feedback
|
||||
7. **Coverage Goals** - Aim for 80%+ code coverage
|
||||
8. **Integration Tests** - Test complete workflows, not just units
|
||||
|
||||
---
|
||||
|
||||
**Purpose**: Standardize CLI testing across Node.js and Python projects
|
||||
**Load when**: Writing tests for CLI tools, validating command execution, testing exit codes
|
||||
406
skills/cli-testing-patterns/examples/exit-code-testing/README.md
Normal file
406
skills/cli-testing-patterns/examples/exit-code-testing/README.md
Normal file
@@ -0,0 +1,406 @@
|
||||
# Exit Code Testing Patterns
|
||||
|
||||
Comprehensive guide to testing CLI exit codes correctly.
|
||||
|
||||
## Standard Exit Codes
|
||||
|
||||
### POSIX Standard Exit Codes
|
||||
|
||||
| Code | Meaning | When to Use |
|
||||
|------|---------|-------------|
|
||||
| 0 | Success | Command completed successfully |
|
||||
| 1 | General Error | Catchall for general errors |
|
||||
| 2 | Misuse of Command | Invalid arguments or options |
|
||||
| 126 | Command Cannot Execute | Permission problem or not executable |
|
||||
| 127 | Command Not Found | Command not found in PATH |
|
||||
| 128+N | Fatal Error Signal N | Process terminated by signal N |
|
||||
| 130 | Ctrl+C Termination | Process terminated by SIGINT |
|
||||
|
||||
### Custom Application Exit Codes
|
||||
|
||||
```typescript
|
||||
// Define custom exit codes
|
||||
enum ExitCode {
|
||||
SUCCESS = 0,
|
||||
GENERAL_ERROR = 1,
|
||||
INVALID_ARGUMENT = 2,
|
||||
CONFIG_ERROR = 3,
|
||||
NETWORK_ERROR = 4,
|
||||
AUTH_ERROR = 5,
|
||||
NOT_FOUND = 6,
|
||||
ALREADY_EXISTS = 7,
|
||||
PERMISSION_DENIED = 8,
|
||||
}
|
||||
```
|
||||
|
||||
## Node.js Exit Code Testing
|
||||
|
||||
### Basic Exit Code Testing
|
||||
|
||||
```typescript
|
||||
describe('Exit Code Tests', () => {
|
||||
test('success returns 0', () => {
|
||||
const { code } = runCLI('status');
|
||||
expect(code).toBe(0);
|
||||
});
|
||||
|
||||
test('general error returns 1', () => {
|
||||
const { code } = runCLI('fail-command');
|
||||
expect(code).toBe(1);
|
||||
});
|
||||
|
||||
test('invalid argument returns 2', () => {
|
||||
const { code } = runCLI('deploy --invalid-env unknown');
|
||||
expect(code).toBe(2);
|
||||
});
|
||||
|
||||
test('command not found returns 127', () => {
|
||||
const { code } = runCLI('nonexistent-command');
|
||||
expect(code).toBe(127);
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
### Specific Error Conditions
|
||||
|
||||
```typescript
|
||||
describe('Specific Exit Codes', () => {
|
||||
test('configuration error', () => {
|
||||
const { code, stderr } = runCLI('deploy production');
|
||||
expect(code).toBe(3); // CONFIG_ERROR
|
||||
expect(stderr).toContain('configuration');
|
||||
});
|
||||
|
||||
test('network error', () => {
|
||||
// Mock network failure
|
||||
const { code, stderr } = runCLI('fetch --url https://unreachable.example.com');
|
||||
expect(code).toBe(4); // NETWORK_ERROR
|
||||
expect(stderr).toContain('network');
|
||||
});
|
||||
|
||||
test('authentication error', () => {
|
||||
const { code, stderr } = runCLI('login --token invalid');
|
||||
expect(code).toBe(5); // AUTH_ERROR
|
||||
expect(stderr).toContain('authentication');
|
||||
});
|
||||
|
||||
test('resource not found', () => {
|
||||
const { code, stderr } = runCLI('get resource-123');
|
||||
expect(code).toBe(6); // NOT_FOUND
|
||||
expect(stderr).toContain('not found');
|
||||
});
|
||||
|
||||
test('resource already exists', () => {
|
||||
runCLI('create my-resource');
|
||||
const { code, stderr } = runCLI('create my-resource');
|
||||
expect(code).toBe(7); // ALREADY_EXISTS
|
||||
expect(stderr).toContain('already exists');
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
### Testing Exit Code Consistency
|
||||
|
||||
```typescript
|
||||
describe('Exit Code Consistency', () => {
|
||||
const errorScenarios = [
|
||||
{ args: 'deploy', expectedCode: 2, reason: 'missing required argument' },
|
||||
{ args: 'deploy --env invalid', expectedCode: 2, reason: 'invalid environment' },
|
||||
{ args: 'config get missing', expectedCode: 6, reason: 'config key not found' },
|
||||
{ args: 'unknown-cmd', expectedCode: 127, reason: 'command not found' },
|
||||
];
|
||||
|
||||
test.each(errorScenarios)(
|
||||
'should return exit code $expectedCode for $reason',
|
||||
({ args, expectedCode }) => {
|
||||
const { code } = runCLI(args);
|
||||
expect(code).toBe(expectedCode);
|
||||
}
|
||||
);
|
||||
});
|
||||
```
|
||||
|
||||
## Python Exit Code Testing
|
||||
|
||||
### Basic Exit Code Testing
|
||||
|
||||
```python
|
||||
class TestExitCodes:
|
||||
"""Test CLI exit codes"""
|
||||
|
||||
def test_success_exit_code(self, runner):
|
||||
"""Success should return 0"""
|
||||
result = runner.invoke(cli, ['status'])
|
||||
assert result.exit_code == 0
|
||||
|
||||
def test_general_error_exit_code(self, runner):
|
||||
"""General error should return 1"""
|
||||
result = runner.invoke(cli, ['fail-command'])
|
||||
assert result.exit_code == 1
|
||||
|
||||
def test_usage_error_exit_code(self, runner):
|
||||
"""Usage error should return 2"""
|
||||
result = runner.invoke(cli, ['deploy']) # Missing required arg
|
||||
assert result.exit_code == 2
|
||||
|
||||
def test_unknown_command_exit_code(self, runner):
|
||||
"""Unknown command handling"""
|
||||
result = runner.invoke(cli, ['nonexistent'])
|
||||
assert result.exit_code != 0
|
||||
```
|
||||
|
||||
### Custom Exit Codes with Click
|
||||
|
||||
```python
|
||||
import click
|
||||
import sys
|
||||
|
||||
# Define custom exit codes
|
||||
class ExitCode:
|
||||
SUCCESS = 0
|
||||
GENERAL_ERROR = 1
|
||||
INVALID_ARGUMENT = 2
|
||||
CONFIG_ERROR = 3
|
||||
NETWORK_ERROR = 4
|
||||
AUTH_ERROR = 5
|
||||
|
||||
|
||||
@click.command()
|
||||
def deploy():
|
||||
"""Deploy command with custom exit codes"""
|
||||
try:
|
||||
# Check configuration
|
||||
if not has_valid_config():
|
||||
click.echo("Configuration error", err=True)
|
||||
sys.exit(ExitCode.CONFIG_ERROR)
|
||||
|
||||
# Check authentication
|
||||
if not is_authenticated():
|
||||
click.echo("Authentication failed", err=True)
|
||||
sys.exit(ExitCode.AUTH_ERROR)
|
||||
|
||||
# Deploy
|
||||
deploy_application()
|
||||
click.echo("Deployment successful")
|
||||
sys.exit(ExitCode.SUCCESS)
|
||||
|
||||
except NetworkError:
|
||||
click.echo("Network error", err=True)
|
||||
sys.exit(ExitCode.NETWORK_ERROR)
|
||||
except Exception as e:
|
||||
click.echo(f"Error: {e}", err=True)
|
||||
sys.exit(ExitCode.GENERAL_ERROR)
|
||||
```
|
||||
|
||||
### Testing Custom Exit Codes
|
||||
|
||||
```python
|
||||
class TestCustomExitCodes:
|
||||
"""Test custom exit codes"""
|
||||
|
||||
def test_config_error_exit_code(self, runner, tmp_path):
|
||||
"""Configuration error should return 3"""
|
||||
# Remove config file
|
||||
result = runner.invoke(cli, ['deploy', 'production'])
|
||||
assert result.exit_code == 3
|
||||
assert 'configuration' in result.output.lower()
|
||||
|
||||
def test_network_error_exit_code(self, runner, monkeypatch):
|
||||
"""Network error should return 4"""
|
||||
def mock_request(*args, **kwargs):
|
||||
raise NetworkError("Connection failed")
|
||||
|
||||
monkeypatch.setattr('requests.post', mock_request)
|
||||
result = runner.invoke(cli, ['deploy', 'production'])
|
||||
assert result.exit_code == 4
|
||||
assert 'network' in result.output.lower()
|
||||
|
||||
def test_auth_error_exit_code(self, runner):
|
||||
"""Authentication error should return 5"""
|
||||
result = runner.invoke(cli, ['deploy', 'production', '--token', 'invalid'])
|
||||
assert result.exit_code == 5
|
||||
assert 'authentication' in result.output.lower()
|
||||
```
|
||||
|
||||
## Testing Exit Codes in Scripts
|
||||
|
||||
### Bash Script Exit Code Testing
|
||||
|
||||
```typescript
|
||||
describe('Script Exit Codes', () => {
|
||||
test('should respect shell exit codes', () => {
|
||||
// Test that CLI properly exits with script error codes
|
||||
const script = `
|
||||
#!/bin/bash
|
||||
${CLI_PATH} deploy staging
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Deployment failed"
|
||||
exit 1
|
||||
fi
|
||||
echo "Deployment succeeded"
|
||||
`;
|
||||
|
||||
const { code, stdout } = execSync(script, { encoding: 'utf8' });
|
||||
expect(code).toBe(0);
|
||||
expect(stdout).toContain('Deployment succeeded');
|
||||
});
|
||||
|
||||
test('should propagate errors in pipelines', () => {
|
||||
const { code } = execSync(`${CLI_PATH} invalid | tee output.log`, {
|
||||
encoding: 'utf8',
|
||||
});
|
||||
expect(code).not.toBe(0);
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
## Exit Code Best Practices
|
||||
|
||||
### 1. Document Exit Codes
|
||||
|
||||
```typescript
|
||||
/**
|
||||
* CLI Exit Codes
|
||||
*
|
||||
* 0 - Success
|
||||
* 1 - General error
|
||||
* 2 - Invalid arguments
|
||||
* 3 - Configuration error
|
||||
* 4 - Network error
|
||||
* 5 - Authentication error
|
||||
* 6 - Resource not found
|
||||
* 7 - Resource already exists
|
||||
* 8 - Permission denied
|
||||
*/
|
||||
```
|
||||
|
||||
### 2. Consistent Error Handling
|
||||
|
||||
```python
|
||||
def handle_error(error: Exception) -> int:
|
||||
"""
|
||||
Handle errors and return appropriate exit code
|
||||
|
||||
Returns:
|
||||
Appropriate exit code for the error type
|
||||
"""
|
||||
if isinstance(error, ConfigurationError):
|
||||
click.echo(f"Configuration error: {error}", err=True)
|
||||
return ExitCode.CONFIG_ERROR
|
||||
elif isinstance(error, NetworkError):
|
||||
click.echo(f"Network error: {error}", err=True)
|
||||
return ExitCode.NETWORK_ERROR
|
||||
elif isinstance(error, AuthenticationError):
|
||||
click.echo(f"Authentication failed: {error}", err=True)
|
||||
return ExitCode.AUTH_ERROR
|
||||
else:
|
||||
click.echo(f"Error: {error}", err=True)
|
||||
return ExitCode.GENERAL_ERROR
|
||||
```
|
||||
|
||||
### 3. Test Exit Codes with Error Messages
|
||||
|
||||
```typescript
|
||||
test('exit code matches error type', () => {
|
||||
const errorCases = [
|
||||
{ args: 'deploy', expectedCode: 2, expectedMsg: 'missing required argument' },
|
||||
{ args: 'login --token bad', expectedCode: 5, expectedMsg: 'authentication failed' },
|
||||
{ args: 'get missing-id', expectedCode: 6, expectedMsg: 'not found' },
|
||||
];
|
||||
|
||||
errorCases.forEach(({ args, expectedCode, expectedMsg }) => {
|
||||
const { code, stderr } = runCLI(args);
|
||||
expect(code).toBe(expectedCode);
|
||||
expect(stderr.toLowerCase()).toContain(expectedMsg);
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
### 4. Test Help and Version Return 0
|
||||
|
||||
```python
|
||||
def test_help_returns_success(runner):
|
||||
"""Help should return 0"""
|
||||
result = runner.invoke(cli, ['--help'])
|
||||
assert result.exit_code == 0
|
||||
|
||||
def test_version_returns_success(runner):
|
||||
"""Version should return 0"""
|
||||
result = runner.invoke(cli, ['--version'])
|
||||
assert result.exit_code == 0
|
||||
```
|
||||
|
||||
## Common Pitfalls
|
||||
|
||||
### 1. Don't Use Exit Code 0 for Errors
|
||||
|
||||
```typescript
|
||||
// ❌ Wrong - using 0 for errors
|
||||
if (error) {
|
||||
console.error('Error occurred');
|
||||
process.exit(0); // Should be non-zero!
|
||||
}
|
||||
|
||||
// ✅ Correct - using non-zero for errors
|
||||
if (error) {
|
||||
console.error('Error occurred');
|
||||
process.exit(1);
|
||||
}
|
||||
```
|
||||
|
||||
### 2. Don't Ignore Exit Codes in Tests
|
||||
|
||||
```python
|
||||
# ❌ Wrong - not checking exit code
|
||||
def test_deploy(runner):
|
||||
result = runner.invoke(cli, ['deploy', 'production'])
|
||||
assert 'deployed' in result.output # What if it failed?
|
||||
|
||||
# ✅ Correct - always check exit code
|
||||
def test_deploy(runner):
|
||||
result = runner.invoke(cli, ['deploy', 'production'])
|
||||
assert result.exit_code == 0
|
||||
assert 'deployed' in result.output
|
||||
```
|
||||
|
||||
### 3. Use Specific Exit Codes
|
||||
|
||||
```typescript
|
||||
// ❌ Wrong - using 1 for everything
|
||||
if (configError) process.exit(1);
|
||||
if (networkError) process.exit(1);
|
||||
if (authError) process.exit(1);
|
||||
|
||||
// ✅ Correct - using specific codes
|
||||
if (configError) process.exit(ExitCode.CONFIG_ERROR);
|
||||
if (networkError) process.exit(ExitCode.NETWORK_ERROR);
|
||||
if (authError) process.exit(ExitCode.AUTH_ERROR);
|
||||
```
|
||||
|
||||
## Testing Exit Codes in CI/CD
|
||||
|
||||
```yaml
|
||||
# GitHub Actions example
|
||||
- name: Test CLI Exit Codes
|
||||
run: |
|
||||
# Should succeed
|
||||
./cli status && echo "Status check passed" || exit 1
|
||||
|
||||
# Should fail
|
||||
./cli invalid-command && exit 1 || echo "Error handling works"
|
||||
|
||||
# Check specific exit code
|
||||
./cli deploy --missing-arg
|
||||
if [ $? -eq 2 ]; then
|
||||
echo "Correct exit code for invalid argument"
|
||||
else
|
||||
echo "Wrong exit code"
|
||||
exit 1
|
||||
fi
|
||||
```
|
||||
|
||||
## Resources
|
||||
|
||||
- [Exit Codes on Linux](https://tldp.org/LDP/abs/html/exitcodes.html)
|
||||
- [POSIX Exit Codes](https://pubs.opengroup.org/onlinepubs/9699919799/)
|
||||
- [GNU Exit Codes](https://www.gnu.org/software/libc/manual/html_node/Exit-Status.html)
|
||||
@@ -0,0 +1,349 @@
|
||||
# Integration Testing for CLI Applications
|
||||
|
||||
Complete workflows and integration testing patterns for CLI applications.
|
||||
|
||||
## Overview
|
||||
|
||||
Integration tests verify that multiple CLI commands work together correctly, testing complete user workflows rather than individual commands in isolation.
|
||||
|
||||
## Key Differences from Unit Tests
|
||||
|
||||
| Unit Tests | Integration Tests |
|
||||
|------------|-------------------|
|
||||
| Test individual commands | Test command sequences |
|
||||
| Mock external dependencies | May use real dependencies |
|
||||
| Fast execution | Slower execution |
|
||||
| Isolated state | Shared state across commands |
|
||||
|
||||
## Node.js Integration Testing
|
||||
|
||||
### Multi-Command Workflow
|
||||
|
||||
```typescript
|
||||
describe('Complete Deployment Workflow', () => {
|
||||
let tempDir: string;
|
||||
|
||||
beforeEach(() => {
|
||||
tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'cli-integration-'));
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
fs.rmSync(tempDir, { recursive: true, force: true });
|
||||
});
|
||||
|
||||
test('full deployment workflow', () => {
|
||||
// Step 1: Initialize project
|
||||
let result = runCLI(`init my-project --cwd ${tempDir}`);
|
||||
expect(result.code).toBe(0);
|
||||
expect(fs.existsSync(path.join(tempDir, 'my-project'))).toBe(true);
|
||||
|
||||
// Step 2: Configure
|
||||
const projectDir = path.join(tempDir, 'my-project');
|
||||
result = runCLI(`config set api_key test_key --cwd ${projectDir}`);
|
||||
expect(result.code).toBe(0);
|
||||
|
||||
// Step 3: Build
|
||||
result = runCLI(`build --production --cwd ${projectDir}`);
|
||||
expect(result.code).toBe(0);
|
||||
expect(fs.existsSync(path.join(projectDir, 'dist'))).toBe(true);
|
||||
|
||||
// Step 4: Deploy
|
||||
result = runCLI(`deploy staging --cwd ${projectDir}`);
|
||||
expect(result.code).toBe(0);
|
||||
expect(result.stdout).toContain('Deployed successfully');
|
||||
|
||||
// Step 5: Verify
|
||||
result = runCLI(`status --cwd ${projectDir}`);
|
||||
expect(result.code).toBe(0);
|
||||
expect(result.stdout).toContain('staging');
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
### State Persistence Testing
|
||||
|
||||
```typescript
|
||||
describe('State Persistence', () => {
|
||||
test('state persists across commands', () => {
|
||||
const workspace = createTempWorkspace();
|
||||
|
||||
try {
|
||||
// Create initial state
|
||||
runCLI(`init --cwd ${workspace}`);
|
||||
runCLI(`config set key1 value1 --cwd ${workspace}`);
|
||||
runCLI(`config set key2 value2 --cwd ${workspace}`);
|
||||
|
||||
// Verify state persists
|
||||
let result = runCLI(`config get key1 --cwd ${workspace}`);
|
||||
expect(result.stdout).toContain('value1');
|
||||
|
||||
// Modify state
|
||||
runCLI(`config set key1 updated --cwd ${workspace}`);
|
||||
|
||||
// Verify modification
|
||||
result = runCLI(`config get key1 --cwd ${workspace}`);
|
||||
expect(result.stdout).toContain('updated');
|
||||
|
||||
// Verify other keys unchanged
|
||||
result = runCLI(`config get key2 --cwd ${workspace}`);
|
||||
expect(result.stdout).toContain('value2');
|
||||
} finally {
|
||||
cleanupWorkspace(workspace);
|
||||
}
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
## Python Integration Testing
|
||||
|
||||
### Complete Workflow Testing
|
||||
|
||||
```python
|
||||
class TestCompleteWorkflow:
|
||||
"""Test complete CLI workflows"""
|
||||
|
||||
def test_project_lifecycle(self, runner):
|
||||
"""Test complete project lifecycle"""
|
||||
with runner.isolated_filesystem():
|
||||
# Initialize
|
||||
result = runner.invoke(cli, ['create', 'test-project'])
|
||||
assert result.exit_code == 0
|
||||
|
||||
# Enter project directory
|
||||
os.chdir('test-project')
|
||||
|
||||
# Configure
|
||||
result = runner.invoke(cli, ['config', 'set', 'api_key', 'test_key'])
|
||||
assert result.exit_code == 0
|
||||
|
||||
# Add dependencies
|
||||
result = runner.invoke(cli, ['add', 'dependency', 'requests'])
|
||||
assert result.exit_code == 0
|
||||
|
||||
# Build
|
||||
result = runner.invoke(cli, ['build'])
|
||||
assert result.exit_code == 0
|
||||
assert os.path.exists('dist')
|
||||
|
||||
# Test
|
||||
result = runner.invoke(cli, ['test'])
|
||||
assert result.exit_code == 0
|
||||
|
||||
# Deploy
|
||||
result = runner.invoke(cli, ['deploy', 'staging'])
|
||||
assert result.exit_code == 0
|
||||
|
||||
# Verify
|
||||
result = runner.invoke(cli, ['status'])
|
||||
assert result.exit_code == 0
|
||||
assert 'staging' in result.output
|
||||
|
||||
def test_multi_environment_workflow(self, runner):
|
||||
"""Test workflow across multiple environments"""
|
||||
with runner.isolated_filesystem():
|
||||
# Setup
|
||||
runner.invoke(cli, ['init', 'multi-env-app'])
|
||||
os.chdir('multi-env-app')
|
||||
|
||||
# Configure environments
|
||||
environments = ['development', 'staging', 'production']
|
||||
|
||||
for env in environments:
|
||||
result = runner.invoke(
|
||||
cli,
|
||||
['config', 'set', 'api_key', f'{env}_key', '--env', env]
|
||||
)
|
||||
assert result.exit_code == 0
|
||||
|
||||
# Deploy to each environment
|
||||
for env in environments:
|
||||
result = runner.invoke(cli, ['deploy', env])
|
||||
assert result.exit_code == 0
|
||||
assert env in result.output
|
||||
```
|
||||
|
||||
### Error Recovery Testing
|
||||
|
||||
```python
|
||||
class TestErrorRecovery:
|
||||
"""Test error recovery workflows"""
|
||||
|
||||
def test_rollback_on_failure(self, runner):
|
||||
"""Test rollback after failed deployment"""
|
||||
with runner.isolated_filesystem():
|
||||
# Setup
|
||||
runner.invoke(cli, ['init', 'rollback-test'])
|
||||
os.chdir('rollback-test')
|
||||
runner.invoke(cli, ['config', 'set', 'api_key', 'test_key'])
|
||||
|
||||
# Successful deployment
|
||||
result = runner.invoke(cli, ['deploy', 'staging'])
|
||||
assert result.exit_code == 0
|
||||
|
||||
# Failed deployment (simulate)
|
||||
result = runner.invoke(cli, ['deploy', 'staging', '--force-fail'])
|
||||
assert result.exit_code != 0
|
||||
|
||||
# Rollback
|
||||
result = runner.invoke(cli, ['rollback'])
|
||||
assert result.exit_code == 0
|
||||
assert 'rollback successful' in result.output.lower()
|
||||
|
||||
def test_recovery_from_corruption(self, runner):
|
||||
"""Test recovery from corrupted state"""
|
||||
with runner.isolated_filesystem():
|
||||
# Create valid state
|
||||
runner.invoke(cli, ['init', 'corrupt-test'])
|
||||
os.chdir('corrupt-test')
|
||||
runner.invoke(cli, ['config', 'set', 'key', 'value'])
|
||||
|
||||
# Corrupt state file
|
||||
with open('.cli-state', 'w') as f:
|
||||
f.write('invalid json {[}')
|
||||
|
||||
# Should detect and recover
|
||||
result = runner.invoke(cli, ['config', 'get', 'key'])
|
||||
assert result.exit_code != 0
|
||||
assert 'corrupt' in result.output.lower()
|
||||
|
||||
# Reset state
|
||||
result = runner.invoke(cli, ['reset', '--force'])
|
||||
assert result.exit_code == 0
|
||||
|
||||
# Should work after reset
|
||||
result = runner.invoke(cli, ['config', 'set', 'key', 'new_value'])
|
||||
assert result.exit_code == 0
|
||||
```
|
||||
|
||||
## Integration Test Patterns
|
||||
|
||||
### 1. Sequential Command Testing
|
||||
|
||||
Test commands that must run in a specific order:
|
||||
|
||||
```python
|
||||
def test_sequential_workflow(runner):
|
||||
"""Test commands that depend on each other"""
|
||||
with runner.isolated_filesystem():
|
||||
# Each command depends on the previous
|
||||
commands = [
|
||||
['init', 'project'],
|
||||
['config', 'set', 'key', 'value'],
|
||||
['build'],
|
||||
['test'],
|
||||
['deploy', 'staging']
|
||||
]
|
||||
|
||||
for cmd in commands:
|
||||
result = runner.invoke(cli, cmd)
|
||||
assert result.exit_code == 0, \
|
||||
f"Command {' '.join(cmd)} failed: {result.output}"
|
||||
```
|
||||
|
||||
### 2. Concurrent Operation Testing
|
||||
|
||||
Test that concurrent operations are handled correctly:
|
||||
|
||||
```python
|
||||
def test_concurrent_operations(runner):
|
||||
"""Test handling of concurrent operations"""
|
||||
import threading
|
||||
|
||||
results = []
|
||||
|
||||
def run_command():
|
||||
result = runner.invoke(cli, ['deploy', 'staging'])
|
||||
results.append(result)
|
||||
|
||||
# Start multiple deployments
|
||||
threads = [threading.Thread(target=run_command) for _ in range(3)]
|
||||
for thread in threads:
|
||||
thread.start()
|
||||
for thread in threads:
|
||||
thread.join()
|
||||
|
||||
# Only one should succeed, others should detect lock
|
||||
successful = sum(1 for r in results if r.exit_code == 0)
|
||||
assert successful == 1
|
||||
assert any('locked' in r.output.lower() for r in results if r.exit_code != 0)
|
||||
```
|
||||
|
||||
### 3. Data Migration Testing
|
||||
|
||||
Test data migration between versions:
|
||||
|
||||
```python
|
||||
def test_data_migration(runner):
|
||||
"""Test data migration workflow"""
|
||||
with runner.isolated_filesystem():
|
||||
# Create old version data
|
||||
old_data = {'version': 1, 'data': {'key': 'value'}}
|
||||
with open('data.json', 'w') as f:
|
||||
json.dump(old_data, f)
|
||||
|
||||
# Run migration
|
||||
result = runner.invoke(cli, ['migrate', '--to', '2.0'])
|
||||
assert result.exit_code == 0
|
||||
|
||||
# Verify new format
|
||||
with open('data.json', 'r') as f:
|
||||
new_data = json.load(f)
|
||||
assert new_data['version'] == 2
|
||||
assert new_data['data']['key'] == 'value'
|
||||
|
||||
# Verify backup created
|
||||
assert os.path.exists('data.json.backup')
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Use Isolated Environments**: Each test should run in a clean environment
|
||||
2. **Test Real Workflows**: Test actual user scenarios, not artificial sequences
|
||||
3. **Include Error Paths**: Test recovery from failures
|
||||
4. **Test State Persistence**: Verify data persists correctly across commands
|
||||
5. **Use Realistic Data**: Test with data similar to production use cases
|
||||
6. **Clean Up Resources**: Always cleanup temp files and resources
|
||||
7. **Document Workflows**: Clearly document what workflow each test verifies
|
||||
8. **Set Appropriate Timeouts**: Integration tests may take longer
|
||||
9. **Mark Slow Tests**: Use test markers for slow-running integration tests
|
||||
10. **Test Concurrency**: Verify handling of simultaneous operations
|
||||
|
||||
## Running Integration Tests
|
||||
|
||||
### Node.js/Jest
|
||||
|
||||
```bash
|
||||
# Run all integration tests
|
||||
npm test -- --testPathPattern=integration
|
||||
|
||||
# Run specific integration test
|
||||
npm test -- integration/deployment.test.ts
|
||||
|
||||
# Run with extended timeout
|
||||
npm test -- --testTimeout=30000
|
||||
```
|
||||
|
||||
### Python/pytest
|
||||
|
||||
```bash
|
||||
# Run all integration tests
|
||||
pytest tests/integration
|
||||
|
||||
# Run specific test
|
||||
pytest tests/integration/test_workflow.py
|
||||
|
||||
# Run marked integration tests
|
||||
pytest -m integration
|
||||
|
||||
# Run with verbose output
|
||||
pytest tests/integration -v
|
||||
|
||||
# Skip slow tests
|
||||
pytest -m "not slow"
|
||||
```
|
||||
|
||||
## Resources
|
||||
|
||||
- [Integration Testing Best Practices](https://martinfowler.com/bliki/IntegrationTest.html)
|
||||
- [Testing Strategies](https://testing.googleblog.com/)
|
||||
- [CLI Testing Patterns](https://clig.dev/#testing)
|
||||
277
skills/cli-testing-patterns/examples/jest-advanced/README.md
Normal file
277
skills/cli-testing-patterns/examples/jest-advanced/README.md
Normal file
@@ -0,0 +1,277 @@
|
||||
# Jest Advanced CLI Testing Example
|
||||
|
||||
Advanced testing patterns for CLI applications including mocking, fixtures, and integration tests.
|
||||
|
||||
## Advanced Patterns
|
||||
|
||||
### 1. Async Command Testing
|
||||
|
||||
```typescript
|
||||
import { spawn } from 'child_process';
|
||||
|
||||
async function runCLIAsync(args: string[]): Promise<CLIResult> {
|
||||
return new Promise((resolve) => {
|
||||
const child = spawn(CLI_PATH, args, { stdio: 'pipe' });
|
||||
|
||||
let stdout = '';
|
||||
let stderr = '';
|
||||
|
||||
child.stdout?.on('data', (data) => {
|
||||
stdout += data.toString();
|
||||
});
|
||||
|
||||
child.stderr?.on('data', (data) => {
|
||||
stderr += data.toString();
|
||||
});
|
||||
|
||||
child.on('close', (code) => {
|
||||
resolve({ stdout, stderr, code: code || 0 });
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
test('should handle long-running command', async () => {
|
||||
const result = await runCLIAsync(['deploy', 'production']);
|
||||
expect(result.code).toBe(0);
|
||||
}, 30000); // 30 second timeout
|
||||
```
|
||||
|
||||
### 2. Environment Variable Mocking
|
||||
|
||||
```typescript
|
||||
describe('environment configuration', () => {
|
||||
const originalEnv = { ...process.env };
|
||||
|
||||
afterEach(() => {
|
||||
process.env = { ...originalEnv };
|
||||
});
|
||||
|
||||
test('should use API key from environment', () => {
|
||||
process.env.API_KEY = 'test_key_123';
|
||||
const { stdout, code } = runCLI('status');
|
||||
expect(code).toBe(0);
|
||||
expect(stdout).toContain('Authenticated');
|
||||
});
|
||||
|
||||
test('should fail without API key', () => {
|
||||
delete process.env.API_KEY;
|
||||
const { stderr, code } = runCLI('status');
|
||||
expect(code).toBe(1);
|
||||
expect(stderr).toContain('API key not found');
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
### 3. File System Fixtures
|
||||
|
||||
```typescript
|
||||
import fs from 'fs';
|
||||
import os from 'os';
|
||||
|
||||
describe('config file handling', () => {
|
||||
let tempDir: string;
|
||||
|
||||
beforeEach(() => {
|
||||
tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'cli-test-'));
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
fs.rmSync(tempDir, { recursive: true, force: true });
|
||||
});
|
||||
|
||||
test('should create config file', () => {
|
||||
const configFile = path.join(tempDir, '.config');
|
||||
const result = runCLI(`init --config ${configFile}`);
|
||||
|
||||
expect(result.code).toBe(0);
|
||||
expect(fs.existsSync(configFile)).toBe(true);
|
||||
|
||||
const config = JSON.parse(fs.readFileSync(configFile, 'utf8'));
|
||||
expect(config).toHaveProperty('api_key');
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
### 4. Mocking External APIs
|
||||
|
||||
```typescript
|
||||
import nock from 'nock';
|
||||
|
||||
describe('API interaction', () => {
|
||||
beforeEach(() => {
|
||||
nock.cleanAll();
|
||||
});
|
||||
|
||||
test('should fetch deployment status', () => {
|
||||
nock('https://api.example.com')
|
||||
.get('/deployments/123')
|
||||
.reply(200, { status: 'success', environment: 'production' });
|
||||
|
||||
const { stdout, code } = runCLI('status --deployment 123');
|
||||
expect(code).toBe(0);
|
||||
expect(stdout).toContain('success');
|
||||
expect(stdout).toContain('production');
|
||||
});
|
||||
|
||||
test('should handle API errors', () => {
|
||||
nock('https://api.example.com')
|
||||
.get('/deployments/123')
|
||||
.reply(500, { error: 'Internal Server Error' });
|
||||
|
||||
const { stderr, code } = runCLI('status --deployment 123');
|
||||
expect(code).toBe(1);
|
||||
expect(stderr).toContain('API error');
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
### 5. Test Fixtures
|
||||
|
||||
```typescript
|
||||
// test-fixtures.ts
|
||||
export const createTestFixtures = () => {
|
||||
const tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'cli-test-'));
|
||||
|
||||
// Create sample project structure
|
||||
fs.mkdirSync(path.join(tempDir, 'src'));
|
||||
fs.writeFileSync(
|
||||
path.join(tempDir, 'package.json'),
|
||||
JSON.stringify({ name: 'test-project', version: '1.0.0' })
|
||||
);
|
||||
|
||||
return {
|
||||
tempDir,
|
||||
cleanup: () => fs.rmSync(tempDir, { recursive: true, force: true }),
|
||||
};
|
||||
};
|
||||
|
||||
// Usage in tests
|
||||
test('should build project', () => {
|
||||
const fixtures = createTestFixtures();
|
||||
|
||||
try {
|
||||
const result = runCLI(`build --cwd ${fixtures.tempDir}`);
|
||||
expect(result.code).toBe(0);
|
||||
expect(fs.existsSync(path.join(fixtures.tempDir, 'dist'))).toBe(true);
|
||||
} finally {
|
||||
fixtures.cleanup();
|
||||
}
|
||||
});
|
||||
```
|
||||
|
||||
### 6. Snapshot Testing
|
||||
|
||||
```typescript
|
||||
test('help output matches snapshot', () => {
|
||||
const { stdout } = runCLI('--help');
|
||||
expect(stdout).toMatchSnapshot();
|
||||
});
|
||||
|
||||
test('version format matches snapshot', () => {
|
||||
const { stdout } = runCLI('--version');
|
||||
expect(stdout).toMatchSnapshot();
|
||||
});
|
||||
```
|
||||
|
||||
### 7. Parameterized Tests
|
||||
|
||||
```typescript
|
||||
describe.each([
|
||||
['development', 'dev.example.com'],
|
||||
['staging', 'staging.example.com'],
|
||||
['production', 'api.example.com'],
|
||||
])('deploy to %s', (environment, expectedUrl) => {
|
||||
test(`should deploy to ${environment}`, () => {
|
||||
const { stdout, code } = runCLI(`deploy ${environment}`);
|
||||
expect(code).toBe(0);
|
||||
expect(stdout).toContain(expectedUrl);
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
### 8. Interactive Command Testing
|
||||
|
||||
```typescript
|
||||
import { Readable, Writable } from 'stream';
|
||||
|
||||
test('should handle interactive prompts', (done) => {
|
||||
const child = spawn(CLI_PATH, ['init'], { stdio: 'pipe' });
|
||||
|
||||
const inputs = ['my-project', 'John Doe', 'john@example.com'];
|
||||
let inputIndex = 0;
|
||||
|
||||
child.stdout?.on('data', (data) => {
|
||||
const output = data.toString();
|
||||
if (output.includes('?') && inputIndex < inputs.length) {
|
||||
child.stdin?.write(inputs[inputIndex] + '\n');
|
||||
inputIndex++;
|
||||
}
|
||||
});
|
||||
|
||||
child.on('close', (code) => {
|
||||
expect(code).toBe(0);
|
||||
done();
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
### 9. Coverage-Driven Testing
|
||||
|
||||
```typescript
|
||||
// Ensure all CLI commands are tested
|
||||
describe('CLI command coverage', () => {
|
||||
const commands = ['init', 'build', 'deploy', 'status', 'config'];
|
||||
|
||||
commands.forEach((command) => {
|
||||
test(`${command} command exists`, () => {
|
||||
const { stdout } = runCLI('--help');
|
||||
expect(stdout).toContain(command);
|
||||
});
|
||||
|
||||
test(`${command} has help text`, () => {
|
||||
const { stdout, code } = runCLI(`${command} --help`);
|
||||
expect(code).toBe(0);
|
||||
expect(stdout).toContain('Usage:');
|
||||
});
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
### 10. Performance Testing
|
||||
|
||||
```typescript
|
||||
test('command executes within time limit', () => {
|
||||
const startTime = Date.now();
|
||||
const { code } = runCLI('status');
|
||||
const duration = Date.now() - startTime;
|
||||
|
||||
expect(code).toBe(0);
|
||||
expect(duration).toBeLessThan(2000); // Should complete within 2 seconds
|
||||
});
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Use Test Fixtures**: Create reusable test data and cleanup functions
|
||||
2. **Mock External Dependencies**: Never make real API calls or database connections
|
||||
3. **Test Edge Cases**: Test boundary conditions, empty inputs, special characters
|
||||
4. **Async Handling**: Use proper async/await or promises for async operations
|
||||
5. **Cleanup**: Always cleanup temp files, reset mocks, restore environment
|
||||
6. **Isolation**: Tests should not depend on execution order
|
||||
7. **Clear Error Messages**: Write assertions with helpful failure messages
|
||||
|
||||
## Common Advanced Patterns
|
||||
|
||||
- Concurrent execution testing
|
||||
- File locking and race conditions
|
||||
- Signal handling (SIGTERM, SIGINT)
|
||||
- Large file processing
|
||||
- Streaming output
|
||||
- Progress indicators
|
||||
- Error recovery and retry logic
|
||||
|
||||
## Resources
|
||||
|
||||
- [Jest Advanced Features](https://jestjs.io/docs/advanced)
|
||||
- [Mocking with Jest](https://jestjs.io/docs/mock-functions)
|
||||
- [Snapshot Testing](https://jestjs.io/docs/snapshot-testing)
|
||||
145
skills/cli-testing-patterns/examples/jest-basic/README.md
Normal file
145
skills/cli-testing-patterns/examples/jest-basic/README.md
Normal file
@@ -0,0 +1,145 @@
|
||||
# Jest Basic CLI Testing Example
|
||||
|
||||
This example demonstrates basic CLI testing patterns using Jest for Node.js/TypeScript projects.
|
||||
|
||||
## Setup
|
||||
|
||||
```bash
|
||||
npm install --save-dev jest @types/jest ts-jest @types/node
|
||||
```
|
||||
|
||||
## Test Structure
|
||||
|
||||
```typescript
|
||||
import { execSync } from 'child_process';
|
||||
import path from 'path';
|
||||
|
||||
describe('CLI Tool Tests', () => {
|
||||
const CLI_PATH = path.join(__dirname, '../bin/mycli');
|
||||
|
||||
function runCLI(args: string) {
|
||||
try {
|
||||
const stdout = execSync(`${CLI_PATH} ${args}`, {
|
||||
encoding: 'utf8',
|
||||
stdio: 'pipe',
|
||||
});
|
||||
return { stdout, stderr: '', code: 0 };
|
||||
} catch (error: any) {
|
||||
return {
|
||||
stdout: error.stdout || '',
|
||||
stderr: error.stderr || '',
|
||||
code: error.status || 1,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
test('should display version', () => {
|
||||
const { stdout, code } = runCLI('--version');
|
||||
expect(code).toBe(0);
|
||||
expect(stdout).toContain('1.0.0');
|
||||
});
|
||||
|
||||
test('should display help', () => {
|
||||
const { stdout, code } = runCLI('--help');
|
||||
expect(code).toBe(0);
|
||||
expect(stdout).toContain('Usage:');
|
||||
});
|
||||
|
||||
test('should handle unknown command', () => {
|
||||
const { stderr, code } = runCLI('unknown-command');
|
||||
expect(code).toBe(1);
|
||||
expect(stderr).toContain('unknown command');
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
## Running Tests
|
||||
|
||||
```bash
|
||||
# Run all tests
|
||||
npm test
|
||||
|
||||
# Run with coverage
|
||||
npm run test:coverage
|
||||
|
||||
# Run in watch mode
|
||||
npm run test:watch
|
||||
```
|
||||
|
||||
## Key Patterns
|
||||
|
||||
### 1. Command Execution Helper
|
||||
|
||||
Create a reusable `runCLI()` function that:
|
||||
- Executes CLI commands using `execSync`
|
||||
- Captures stdout, stderr, and exit codes
|
||||
- Handles both success and failure cases
|
||||
|
||||
### 2. Exit Code Testing
|
||||
|
||||
Always test exit codes:
|
||||
- `0` for success
|
||||
- Non-zero for errors
|
||||
- Specific codes for different error types
|
||||
|
||||
### 3. Output Validation
|
||||
|
||||
Test output content using Jest matchers:
|
||||
- `.toContain()` for substring matching
|
||||
- `.toMatch()` for regex patterns
|
||||
- `.toBe()` for exact matches
|
||||
|
||||
### 4. Error Handling
|
||||
|
||||
Test error scenarios:
|
||||
- Unknown commands
|
||||
- Invalid options
|
||||
- Missing required arguments
|
||||
- Invalid argument types
|
||||
|
||||
## Example Test Cases
|
||||
|
||||
```typescript
|
||||
describe('deploy command', () => {
|
||||
test('should deploy with valid arguments', () => {
|
||||
const { stdout, code } = runCLI('deploy production --force');
|
||||
expect(code).toBe(0);
|
||||
expect(stdout).toContain('Deploying to production');
|
||||
});
|
||||
|
||||
test('should fail without required arguments', () => {
|
||||
const { stderr, code } = runCLI('deploy');
|
||||
expect(code).toBe(1);
|
||||
expect(stderr).toContain('missing required argument');
|
||||
});
|
||||
|
||||
test('should validate environment names', () => {
|
||||
const { stderr, code } = runCLI('deploy invalid-env');
|
||||
expect(code).toBe(1);
|
||||
expect(stderr).toContain('invalid environment');
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Isolate Tests**: Each test should be independent
|
||||
2. **Use Descriptive Names**: Test names should describe what they validate
|
||||
3. **Test Both Success and Failure**: Cover happy path and error cases
|
||||
4. **Mock External Dependencies**: Don't make real API calls or file system changes
|
||||
5. **Use Type Safety**: Leverage TypeScript for better test reliability
|
||||
6. **Keep Tests Fast**: Fast tests encourage frequent running
|
||||
|
||||
## Common Pitfalls
|
||||
|
||||
- ❌ Not testing exit codes
|
||||
- ❌ Only testing success cases
|
||||
- ❌ Hardcoding paths instead of using `path.join()`
|
||||
- ❌ Not handling async operations properly
|
||||
- ❌ Testing implementation details instead of behavior
|
||||
|
||||
## Resources
|
||||
|
||||
- [Jest Documentation](https://jestjs.io/docs/getting-started)
|
||||
- [Testing CLI Applications](https://jestjs.io/docs/cli)
|
||||
- [TypeScript with Jest](https://jestjs.io/docs/getting-started#using-typescript)
|
||||
353
skills/cli-testing-patterns/examples/pytest-click/README.md
Normal file
353
skills/cli-testing-patterns/examples/pytest-click/README.md
Normal file
@@ -0,0 +1,353 @@
|
||||
# Pytest Click Testing Example
|
||||
|
||||
Comprehensive examples for testing Click-based CLI applications using pytest and CliRunner.
|
||||
|
||||
## Basic Setup
|
||||
|
||||
```python
|
||||
import pytest
|
||||
from click.testing import CliRunner
|
||||
from mycli.cli import cli
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def runner():
|
||||
return CliRunner()
|
||||
```
|
||||
|
||||
## Basic Command Testing
|
||||
|
||||
```python
|
||||
class TestBasicCommands:
|
||||
"""Test basic CLI commands"""
|
||||
|
||||
def test_version(self, runner):
|
||||
"""Test version command"""
|
||||
result = runner.invoke(cli, ['--version'])
|
||||
assert result.exit_code == 0
|
||||
assert '1.0.0' in result.output
|
||||
|
||||
def test_help(self, runner):
|
||||
"""Test help command"""
|
||||
result = runner.invoke(cli, ['--help'])
|
||||
assert result.exit_code == 0
|
||||
assert 'Usage:' in result.output
|
||||
|
||||
def test_unknown_command(self, runner):
|
||||
"""Test unknown command handling"""
|
||||
result = runner.invoke(cli, ['unknown'])
|
||||
assert result.exit_code != 0
|
||||
assert 'no such command' in result.output.lower()
|
||||
```
|
||||
|
||||
## Testing with Arguments
|
||||
|
||||
```python
|
||||
class TestArgumentParsing:
|
||||
"""Test argument parsing"""
|
||||
|
||||
def test_required_argument(self, runner):
|
||||
"""Test command with required argument"""
|
||||
result = runner.invoke(cli, ['deploy', 'production'])
|
||||
assert result.exit_code == 0
|
||||
assert 'production' in result.output
|
||||
|
||||
def test_missing_required_argument(self, runner):
|
||||
"""Test missing required argument"""
|
||||
result = runner.invoke(cli, ['deploy'])
|
||||
assert result.exit_code != 0
|
||||
assert 'missing argument' in result.output.lower()
|
||||
|
||||
def test_optional_argument(self, runner):
|
||||
"""Test optional argument"""
|
||||
result = runner.invoke(cli, ['build', '--output', 'dist'])
|
||||
assert result.exit_code == 0
|
||||
assert 'dist' in result.output
|
||||
```
|
||||
|
||||
## Testing with Options
|
||||
|
||||
```python
|
||||
class TestOptionParsing:
|
||||
"""Test option parsing"""
|
||||
|
||||
def test_boolean_flag(self, runner):
|
||||
"""Test boolean flag option"""
|
||||
result = runner.invoke(cli, ['deploy', 'staging', '--force'])
|
||||
assert result.exit_code == 0
|
||||
assert 'force' in result.output.lower()
|
||||
|
||||
def test_option_with_value(self, runner):
|
||||
"""Test option with value"""
|
||||
result = runner.invoke(cli, ['config', 'set', '--key', 'api_key', '--value', 'test'])
|
||||
assert result.exit_code == 0
|
||||
|
||||
def test_multiple_options(self, runner):
|
||||
"""Test multiple options"""
|
||||
result = runner.invoke(
|
||||
cli,
|
||||
['deploy', 'production', '--verbose', '--dry-run', '--timeout', '60']
|
||||
)
|
||||
assert result.exit_code == 0
|
||||
```
|
||||
|
||||
## Testing Interactive Prompts
|
||||
|
||||
```python
|
||||
class TestInteractivePrompts:
|
||||
"""Test interactive prompt handling"""
|
||||
|
||||
def test_simple_prompt(self, runner):
|
||||
"""Test simple text prompt"""
|
||||
result = runner.invoke(cli, ['init'], input='my-project\n')
|
||||
assert result.exit_code == 0
|
||||
assert 'my-project' in result.output
|
||||
|
||||
def test_confirmation_prompt(self, runner):
|
||||
"""Test confirmation prompt (yes)"""
|
||||
result = runner.invoke(cli, ['delete', 'resource-id'], input='y\n')
|
||||
assert result.exit_code == 0
|
||||
assert 'deleted' in result.output.lower()
|
||||
|
||||
def test_confirmation_prompt_no(self, runner):
|
||||
"""Test confirmation prompt (no)"""
|
||||
result = runner.invoke(cli, ['delete', 'resource-id'], input='n\n')
|
||||
assert result.exit_code == 1
|
||||
assert 'cancelled' in result.output.lower()
|
||||
|
||||
def test_multiple_prompts(self, runner):
|
||||
"""Test multiple prompts in sequence"""
|
||||
inputs = 'my-project\nJohn Doe\njohn@example.com\n'
|
||||
result = runner.invoke(cli, ['init', '--interactive'], input=inputs)
|
||||
assert result.exit_code == 0
|
||||
assert 'my-project' in result.output
|
||||
assert 'John Doe' in result.output
|
||||
|
||||
def test_choice_prompt(self, runner):
|
||||
"""Test choice prompt"""
|
||||
result = runner.invoke(cli, ['deploy'], input='1\n') # Select option 1
|
||||
assert result.exit_code == 0
|
||||
```
|
||||
|
||||
## Testing with Isolated Filesystem
|
||||
|
||||
```python
|
||||
class TestFileOperations:
|
||||
"""Test file operations with isolated filesystem"""
|
||||
|
||||
def test_create_file(self, runner):
|
||||
"""Test file creation"""
|
||||
with runner.isolated_filesystem():
|
||||
result = runner.invoke(cli, ['init', 'test-project'])
|
||||
assert result.exit_code == 0
|
||||
|
||||
import os
|
||||
assert os.path.exists('test-project')
|
||||
|
||||
def test_read_file(self, runner):
|
||||
"""Test reading from file"""
|
||||
with runner.isolated_filesystem():
|
||||
# Create test file
|
||||
with open('input.txt', 'w') as f:
|
||||
f.write('test data')
|
||||
|
||||
result = runner.invoke(cli, ['process', '--input', 'input.txt'])
|
||||
assert result.exit_code == 0
|
||||
|
||||
def test_write_file(self, runner):
|
||||
"""Test writing to file"""
|
||||
with runner.isolated_filesystem():
|
||||
result = runner.invoke(cli, ['export', '--output', 'output.txt'])
|
||||
assert result.exit_code == 0
|
||||
|
||||
import os
|
||||
assert os.path.exists('output.txt')
|
||||
with open('output.txt', 'r') as f:
|
||||
content = f.read()
|
||||
assert len(content) > 0
|
||||
```
|
||||
|
||||
## Testing Environment Variables
|
||||
|
||||
```python
|
||||
class TestEnvironmentVariables:
|
||||
"""Test environment variable handling"""
|
||||
|
||||
def test_with_env_var(self, runner):
|
||||
"""Test command with environment variable"""
|
||||
result = runner.invoke(
|
||||
cli,
|
||||
['status'],
|
||||
env={'API_KEY': 'test_key_123'}
|
||||
)
|
||||
assert result.exit_code == 0
|
||||
|
||||
def test_without_env_var(self, runner):
|
||||
"""Test command without required environment variable"""
|
||||
result = runner.invoke(cli, ['status'])
|
||||
# Assuming API_KEY is required
|
||||
if 'API_KEY' not in result.output:
|
||||
assert result.exit_code != 0
|
||||
|
||||
def test_env_var_override(self, runner, monkeypatch):
|
||||
"""Test environment variable override"""
|
||||
monkeypatch.setenv('API_KEY', 'overridden_key')
|
||||
result = runner.invoke(cli, ['status'])
|
||||
assert result.exit_code == 0
|
||||
```
|
||||
|
||||
## Testing Output Formats
|
||||
|
||||
```python
|
||||
class TestOutputFormats:
|
||||
"""Test different output formats"""
|
||||
|
||||
def test_json_output(self, runner):
|
||||
"""Test JSON output format"""
|
||||
result = runner.invoke(cli, ['status', '--format', 'json'])
|
||||
assert result.exit_code == 0
|
||||
|
||||
import json
|
||||
try:
|
||||
data = json.loads(result.output)
|
||||
assert isinstance(data, dict)
|
||||
except json.JSONDecodeError:
|
||||
pytest.fail("Output is not valid JSON")
|
||||
|
||||
def test_yaml_output(self, runner):
|
||||
"""Test YAML output format"""
|
||||
result = runner.invoke(cli, ['status', '--format', 'yaml'])
|
||||
assert result.exit_code == 0
|
||||
assert ':' in result.output
|
||||
|
||||
def test_table_output(self, runner):
|
||||
"""Test table output format"""
|
||||
result = runner.invoke(cli, ['list'])
|
||||
assert result.exit_code == 0
|
||||
assert '│' in result.output or '|' in result.output
|
||||
```
|
||||
|
||||
## Testing Exit Codes
|
||||
|
||||
```python
|
||||
class TestExitCodes:
|
||||
"""Test exit codes"""
|
||||
|
||||
def test_success_exit_code(self, runner):
|
||||
"""Test success returns 0"""
|
||||
result = runner.invoke(cli, ['status'])
|
||||
assert result.exit_code == 0
|
||||
|
||||
def test_error_exit_code(self, runner):
|
||||
"""Test error returns non-zero"""
|
||||
result = runner.invoke(cli, ['invalid-command'])
|
||||
assert result.exit_code != 0
|
||||
|
||||
def test_validation_error_exit_code(self, runner):
|
||||
"""Test validation error returns 2"""
|
||||
result = runner.invoke(cli, ['deploy', '--invalid-option'])
|
||||
assert result.exit_code == 2 # Click uses 2 for usage errors
|
||||
|
||||
def test_exception_exit_code(self, runner):
|
||||
"""Test uncaught exception returns 1"""
|
||||
result = runner.invoke(cli, ['command-that-throws'])
|
||||
assert result.exit_code == 1
|
||||
```
|
||||
|
||||
## Testing with Fixtures
|
||||
|
||||
```python
|
||||
@pytest.fixture
|
||||
def sample_config(tmp_path):
|
||||
"""Create sample config file"""
|
||||
config_file = tmp_path / '.myclirc'
|
||||
config_file.write_text('''
|
||||
api_key: your_test_key_here
|
||||
environment: development
|
||||
verbose: false
|
||||
''')
|
||||
return config_file
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_api(monkeypatch):
|
||||
"""Mock external API calls"""
|
||||
class MockAPI:
|
||||
def __init__(self):
|
||||
self.calls = []
|
||||
|
||||
def get(self, endpoint):
|
||||
self.calls.append(('GET', endpoint))
|
||||
return {'status': 'success'}
|
||||
|
||||
mock = MockAPI()
|
||||
monkeypatch.setattr('mycli.api.client', mock)
|
||||
return mock
|
||||
|
||||
|
||||
class TestWithFixtures:
|
||||
"""Test using fixtures"""
|
||||
|
||||
def test_with_config_file(self, runner, sample_config):
|
||||
"""Test with config file"""
|
||||
result = runner.invoke(
|
||||
cli,
|
||||
['status', '--config', str(sample_config)]
|
||||
)
|
||||
assert result.exit_code == 0
|
||||
|
||||
def test_with_mock_api(self, runner, mock_api):
|
||||
"""Test with mocked API"""
|
||||
result = runner.invoke(cli, ['deploy', 'production'])
|
||||
assert result.exit_code == 0
|
||||
assert len(mock_api.calls) > 0
|
||||
```
|
||||
|
||||
## Testing Error Handling
|
||||
|
||||
```python
|
||||
class TestErrorHandling:
|
||||
"""Test error handling"""
|
||||
|
||||
def test_network_error(self, runner, monkeypatch):
|
||||
"""Test network error handling"""
|
||||
def mock_request(*args, **kwargs):
|
||||
raise ConnectionError("Network unreachable")
|
||||
|
||||
monkeypatch.setattr('requests.get', mock_request)
|
||||
result = runner.invoke(cli, ['status'])
|
||||
assert result.exit_code != 0
|
||||
assert 'network' in result.output.lower()
|
||||
|
||||
def test_file_not_found(self, runner):
|
||||
"""Test file not found error"""
|
||||
result = runner.invoke(cli, ['process', '--input', 'nonexistent.txt'])
|
||||
assert result.exit_code != 0
|
||||
assert 'not found' in result.output.lower()
|
||||
|
||||
def test_invalid_json(self, runner):
|
||||
"""Test invalid JSON handling"""
|
||||
with runner.isolated_filesystem():
|
||||
with open('config.json', 'w') as f:
|
||||
f.write('invalid json {[}')
|
||||
|
||||
result = runner.invoke(cli, ['config', 'load', 'config.json'])
|
||||
assert result.exit_code != 0
|
||||
assert 'invalid' in result.output.lower()
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Use Fixtures**: Share common setup across tests
|
||||
2. **Isolated Filesystem**: Use `runner.isolated_filesystem()` for file operations
|
||||
3. **Test Exit Codes**: Always check exit codes
|
||||
4. **Clear Test Names**: Use descriptive test method names
|
||||
5. **Test Edge Cases**: Test boundary conditions and error cases
|
||||
6. **Mock External Dependencies**: Don't make real API calls
|
||||
7. **Use Markers**: Mark tests as unit, integration, slow, etc.
|
||||
|
||||
## Resources
|
||||
|
||||
- [Click Testing Documentation](https://click.palletsprojects.com/en/8.1.x/testing/)
|
||||
- [Pytest Documentation](https://docs.pytest.org/)
|
||||
- [CliRunner API](https://click.palletsprojects.com/en/8.1.x/api/#click.testing.CliRunner)
|
||||
82
skills/cli-testing-patterns/scripts/run-cli-tests.sh
Executable file
82
skills/cli-testing-patterns/scripts/run-cli-tests.sh
Executable file
@@ -0,0 +1,82 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Run CLI Tests
|
||||
#
|
||||
# Detects the project type and runs appropriate tests with coverage
|
||||
|
||||
set -e
|
||||
|
||||
echo "🧪 Running CLI tests..."
|
||||
|
||||
# Detect project type
|
||||
if [ -f "package.json" ]; then
|
||||
PROJECT_TYPE="node"
|
||||
elif [ -f "setup.py" ] || [ -f "pyproject.toml" ]; then
|
||||
PROJECT_TYPE="python"
|
||||
else
|
||||
echo "❌ Error: Could not detect project type"
|
||||
echo " Expected package.json (Node.js) or setup.py/pyproject.toml (Python)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Run tests based on project type
|
||||
if [ "$PROJECT_TYPE" == "node" ]; then
|
||||
echo "📦 Node.js project detected"
|
||||
|
||||
# Check if npm test is configured
|
||||
if ! grep -q '"test"' package.json 2>/dev/null; then
|
||||
echo "❌ Error: No test script found in package.json"
|
||||
echo " Run setup-jest-testing.sh first"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Install dependencies if needed
|
||||
if [ ! -d "node_modules" ]; then
|
||||
echo "📦 Installing dependencies..."
|
||||
npm install
|
||||
fi
|
||||
|
||||
# Run tests with coverage
|
||||
echo "🧪 Running Jest tests..."
|
||||
npm run test:coverage
|
||||
|
||||
# Display coverage summary
|
||||
if [ -f "coverage/lcov-report/index.html" ]; then
|
||||
echo ""
|
||||
echo "✅ Tests complete!"
|
||||
echo "📊 Coverage report: coverage/lcov-report/index.html"
|
||||
fi
|
||||
|
||||
elif [ "$PROJECT_TYPE" == "python" ]; then
|
||||
echo "🐍 Python project detected"
|
||||
|
||||
# Check if pytest is installed
|
||||
if ! command -v pytest &> /dev/null; then
|
||||
echo "❌ Error: pytest is not installed"
|
||||
echo " Run setup-pytest-testing.sh first"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Create/activate virtual environment if it exists
|
||||
if [ -d "venv" ]; then
|
||||
echo "🔧 Activating virtual environment..."
|
||||
source venv/bin/activate
|
||||
elif [ -d ".venv" ]; then
|
||||
echo "🔧 Activating virtual environment..."
|
||||
source .venv/bin/activate
|
||||
fi
|
||||
|
||||
# Run tests with coverage
|
||||
echo "🧪 Running pytest tests..."
|
||||
pytest --cov --cov-report=term-missing --cov-report=html
|
||||
|
||||
# Display coverage summary
|
||||
if [ -d "htmlcov" ]; then
|
||||
echo ""
|
||||
echo "✅ Tests complete!"
|
||||
echo "📊 Coverage report: htmlcov/index.html"
|
||||
fi
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "🎉 All tests passed!"
|
||||
235
skills/cli-testing-patterns/scripts/setup-jest-testing.sh
Executable file
235
skills/cli-testing-patterns/scripts/setup-jest-testing.sh
Executable file
@@ -0,0 +1,235 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Setup Jest for CLI Testing (Node.js/TypeScript)
|
||||
#
|
||||
# This script installs and configures Jest for testing CLI applications
|
||||
# Includes TypeScript support, coverage reporting, and CLI testing utilities
|
||||
|
||||
set -e
|
||||
|
||||
echo "🔧 Setting up Jest for CLI testing..."
|
||||
|
||||
# Check if npm is available
|
||||
if ! command -v npm &> /dev/null; then
|
||||
echo "❌ Error: npm is not installed"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Install Jest and related dependencies
|
||||
echo "📦 Installing Jest and dependencies..."
|
||||
npm install --save-dev \
|
||||
jest \
|
||||
@types/jest \
|
||||
ts-jest \
|
||||
@types/node
|
||||
|
||||
# Create Jest configuration
|
||||
echo "⚙️ Creating Jest configuration..."
|
||||
cat > jest.config.js << 'EOF'
|
||||
module.exports = {
|
||||
preset: 'ts-jest',
|
||||
testEnvironment: 'node',
|
||||
roots: ['<rootDir>/tests'],
|
||||
testMatch: [
|
||||
'**/__tests__/**/*.ts',
|
||||
'**/?(*.)+(spec|test).ts'
|
||||
],
|
||||
collectCoverageFrom: [
|
||||
'src/**/*.{ts,js}',
|
||||
'!src/**/*.d.ts',
|
||||
'!src/**/*.test.ts',
|
||||
'!src/**/__tests__/**'
|
||||
],
|
||||
coverageDirectory: 'coverage',
|
||||
coverageReporters: ['text', 'lcov', 'html'],
|
||||
coverageThresholds: {
|
||||
global: {
|
||||
branches: 70,
|
||||
functions: 70,
|
||||
lines: 70,
|
||||
statements: 70
|
||||
}
|
||||
},
|
||||
verbose: true,
|
||||
testTimeout: 10000
|
||||
};
|
||||
EOF
|
||||
|
||||
# Create tests directory structure
|
||||
echo "📁 Creating test directory structure..."
|
||||
mkdir -p tests/{unit,integration,helpers}
|
||||
|
||||
# Create test helper file
|
||||
echo "📝 Creating test helpers..."
|
||||
cat > tests/helpers/cli-helpers.ts << 'EOF'
|
||||
import { execSync } from 'child_process';
|
||||
import path from 'path';
|
||||
|
||||
export interface CLIResult {
|
||||
stdout: string;
|
||||
stderr: string;
|
||||
code: number;
|
||||
}
|
||||
|
||||
export const CLI_PATH = path.join(__dirname, '../../bin/cli');
|
||||
|
||||
export function runCLI(args: string): CLIResult {
|
||||
try {
|
||||
const stdout = execSync(`${CLI_PATH} ${args}`, {
|
||||
encoding: 'utf8',
|
||||
stdio: 'pipe',
|
||||
});
|
||||
return { stdout, stderr: '', code: 0 };
|
||||
} catch (error: any) {
|
||||
return {
|
||||
stdout: error.stdout || '',
|
||||
stderr: error.stderr || '',
|
||||
code: error.status || 1,
|
||||
};
|
||||
}
|
||||
}
|
||||
EOF
|
||||
|
||||
# Create sample test file
|
||||
echo "📝 Creating sample test file..."
|
||||
cat > tests/unit/cli.test.ts << 'EOF'
|
||||
import { runCLI } from '../helpers/cli-helpers';
|
||||
|
||||
describe('CLI Tests', () => {
|
||||
test('should display version', () => {
|
||||
const { stdout, code } = runCLI('--version');
|
||||
expect(code).toBe(0);
|
||||
expect(stdout).toMatch(/\d+\.\d+\.\d+/);
|
||||
});
|
||||
|
||||
test('should display help', () => {
|
||||
const { stdout, code } = runCLI('--help');
|
||||
expect(code).toBe(0);
|
||||
expect(stdout).toContain('Usage:');
|
||||
});
|
||||
});
|
||||
EOF
|
||||
|
||||
# Create TypeScript configuration for tests
|
||||
echo "⚙️ Creating TypeScript configuration..."
|
||||
if [ ! -f tsconfig.json ]; then
|
||||
cat > tsconfig.json << 'EOF'
|
||||
{
|
||||
"compilerOptions": {
|
||||
"target": "ES2020",
|
||||
"module": "commonjs",
|
||||
"lib": ["ES2020"],
|
||||
"outDir": "./dist",
|
||||
"rootDir": "./src",
|
||||
"strict": true,
|
||||
"esModuleInterop": true,
|
||||
"skipLibCheck": true,
|
||||
"forceConsistentCasingInFileNames": true,
|
||||
"resolveJsonModule": true,
|
||||
"declaration": true,
|
||||
"declarationMap": true,
|
||||
"sourceMap": true
|
||||
},
|
||||
"include": ["src/**/*"],
|
||||
"exclude": ["node_modules", "dist", "tests"]
|
||||
}
|
||||
EOF
|
||||
fi
|
||||
|
||||
# Update package.json scripts
|
||||
echo "⚙️ Updating package.json scripts..."
|
||||
if [ -f package.json ]; then
|
||||
# Check if jq is available for JSON manipulation
|
||||
if command -v jq &> /dev/null; then
|
||||
# Add test scripts using jq
|
||||
tmp=$(mktemp)
|
||||
jq '.scripts.test = "jest" |
|
||||
.scripts["test:watch"] = "jest --watch" |
|
||||
.scripts["test:coverage"] = "jest --coverage" |
|
||||
.scripts["test:ci"] = "jest --ci --coverage --maxWorkers=2"' \
|
||||
package.json > "$tmp"
|
||||
mv "$tmp" package.json
|
||||
else
|
||||
echo "⚠️ jq not found. Please manually add test scripts to package.json:"
|
||||
echo ' "test": "jest"'
|
||||
echo ' "test:watch": "jest --watch"'
|
||||
echo ' "test:coverage": "jest --coverage"'
|
||||
echo ' "test:ci": "jest --ci --coverage --maxWorkers=2"'
|
||||
fi
|
||||
fi
|
||||
|
||||
# Create .gitignore entries
|
||||
echo "📝 Updating .gitignore..."
|
||||
if [ -f .gitignore ]; then
|
||||
grep -qxF 'coverage/' .gitignore || echo 'coverage/' >> .gitignore
|
||||
grep -qxF '*.log' .gitignore || echo '*.log' >> .gitignore
|
||||
else
|
||||
cat > .gitignore << 'EOF'
|
||||
node_modules/
|
||||
dist/
|
||||
coverage/
|
||||
*.log
|
||||
.env
|
||||
.env.local
|
||||
EOF
|
||||
fi
|
||||
|
||||
# Create README for tests
|
||||
echo "📝 Creating test documentation..."
|
||||
cat > tests/README.md << 'EOF'
|
||||
# CLI Tests
|
||||
|
||||
## Running Tests
|
||||
|
||||
```bash
|
||||
# Run all tests
|
||||
npm test
|
||||
|
||||
# Run tests in watch mode
|
||||
npm run test:watch
|
||||
|
||||
# Run tests with coverage
|
||||
npm run test:coverage
|
||||
|
||||
# Run tests in CI mode
|
||||
npm run test:ci
|
||||
```
|
||||
|
||||
## Test Structure
|
||||
|
||||
- `unit/` - Unit tests for individual functions
|
||||
- `integration/` - Integration tests for complete workflows
|
||||
- `helpers/` - Test helper functions and utilities
|
||||
|
||||
## Writing Tests
|
||||
|
||||
Use the `runCLI` helper to execute CLI commands:
|
||||
|
||||
```typescript
|
||||
import { runCLI } from '../helpers/cli-helpers';
|
||||
|
||||
test('should execute command', () => {
|
||||
const { stdout, stderr, code } = runCLI('command --flag');
|
||||
expect(code).toBe(0);
|
||||
expect(stdout).toContain('expected output');
|
||||
});
|
||||
```
|
||||
|
||||
## Coverage
|
||||
|
||||
Coverage reports are generated in the `coverage/` directory.
|
||||
Target: 70% coverage for branches, functions, lines, and statements.
|
||||
EOF
|
||||
|
||||
echo "✅ Jest setup complete!"
|
||||
echo ""
|
||||
echo "Next steps:"
|
||||
echo " 1. Run 'npm test' to execute tests"
|
||||
echo " 2. Add more tests in tests/unit/ and tests/integration/"
|
||||
echo " 3. Run 'npm run test:coverage' to see coverage report"
|
||||
echo ""
|
||||
echo "📚 Test files created:"
|
||||
echo " - jest.config.js"
|
||||
echo " - tests/helpers/cli-helpers.ts"
|
||||
echo " - tests/unit/cli.test.ts"
|
||||
echo " - tests/README.md"
|
||||
448
skills/cli-testing-patterns/scripts/setup-pytest-testing.sh
Executable file
448
skills/cli-testing-patterns/scripts/setup-pytest-testing.sh
Executable file
@@ -0,0 +1,448 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Setup pytest for CLI Testing (Python)
|
||||
#
|
||||
# This script installs and configures pytest for testing Click-based CLI applications
|
||||
# Includes coverage reporting, fixtures, and CLI testing utilities
|
||||
|
||||
set -e
|
||||
|
||||
echo "🔧 Setting up pytest for CLI testing..."
|
||||
|
||||
# Check if Python is available
|
||||
if ! command -v python3 &> /dev/null; then
|
||||
echo "❌ Error: python3 is not installed"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if pip is available
|
||||
if ! command -v pip3 &> /dev/null; then
|
||||
echo "❌ Error: pip3 is not installed"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Install pytest and related dependencies
|
||||
echo "📦 Installing pytest and dependencies..."
|
||||
pip3 install --upgrade \
|
||||
pytest \
|
||||
pytest-cov \
|
||||
pytest-mock \
|
||||
click
|
||||
|
||||
# Create pytest configuration
|
||||
echo "⚙️ Creating pytest configuration..."
|
||||
cat > pytest.ini << 'EOF'
|
||||
[pytest]
|
||||
testpaths = tests
|
||||
python_files = test_*.py
|
||||
python_classes = Test*
|
||||
python_functions = test_*
|
||||
addopts =
|
||||
-v
|
||||
--strict-markers
|
||||
--tb=short
|
||||
--cov=src
|
||||
--cov-report=term-missing
|
||||
--cov-report=html
|
||||
--cov-report=xml
|
||||
markers =
|
||||
unit: Unit tests
|
||||
integration: Integration tests
|
||||
slow: Slow running tests
|
||||
cli: CLI command tests
|
||||
filterwarnings =
|
||||
ignore::DeprecationWarning
|
||||
EOF
|
||||
|
||||
# Create tests directory structure
|
||||
echo "📁 Creating test directory structure..."
|
||||
mkdir -p tests/{unit,integration,fixtures}
|
||||
|
||||
# Create conftest.py with common fixtures
|
||||
echo "📝 Creating pytest fixtures..."
|
||||
cat > tests/conftest.py << 'EOF'
|
||||
"""
|
||||
Pytest configuration and fixtures for CLI testing
|
||||
"""
|
||||
|
||||
import pytest
|
||||
import tempfile
|
||||
import shutil
|
||||
from pathlib import Path
|
||||
from click.testing import CliRunner
|
||||
from src.cli import cli # Adjust import based on your CLI module
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def runner():
|
||||
"""Create a CliRunner instance for testing"""
|
||||
return CliRunner()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def isolated_runner():
|
||||
"""Create a CliRunner with isolated filesystem"""
|
||||
runner = CliRunner()
|
||||
with runner.isolated_filesystem():
|
||||
yield runner
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def temp_workspace(tmp_path):
|
||||
"""Create a temporary workspace directory"""
|
||||
workspace = tmp_path / 'workspace'
|
||||
workspace.mkdir()
|
||||
yield workspace
|
||||
# Cleanup handled by tmp_path fixture
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_config(temp_workspace):
|
||||
"""Create a mock configuration file"""
|
||||
config_file = temp_workspace / '.clirc'
|
||||
config_content = """
|
||||
api_key: your_test_key_here
|
||||
environment: development
|
||||
verbose: false
|
||||
"""
|
||||
config_file.write_text(config_content)
|
||||
return config_file
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def cli_harness(runner):
|
||||
"""Create CLI test harness with helper methods"""
|
||||
class CLIHarness:
|
||||
def __init__(self, runner):
|
||||
self.runner = runner
|
||||
|
||||
def run(self, args, input_data=None):
|
||||
"""Run CLI command and return result"""
|
||||
return self.runner.invoke(cli, args, input=input_data)
|
||||
|
||||
def assert_success(self, args, expected_in_output=None):
|
||||
"""Assert command succeeds"""
|
||||
result = self.run(args)
|
||||
assert result.exit_code == 0, f"Command failed: {result.output}"
|
||||
if expected_in_output:
|
||||
assert expected_in_output in result.output
|
||||
return result
|
||||
|
||||
def assert_failure(self, args, expected_in_output=None):
|
||||
"""Assert command fails"""
|
||||
result = self.run(args)
|
||||
assert result.exit_code != 0, f"Command should have failed: {result.output}"
|
||||
if expected_in_output:
|
||||
assert expected_in_output in result.output
|
||||
return result
|
||||
|
||||
return CLIHarness(runner)
|
||||
EOF
|
||||
|
||||
# Create __init__.py files
|
||||
touch tests/__init__.py
|
||||
touch tests/unit/__init__.py
|
||||
touch tests/integration/__init__.py
|
||||
touch tests/fixtures/__init__.py
|
||||
|
||||
# Create sample test file
|
||||
echo "📝 Creating sample test file..."
|
||||
cat > tests/unit/test_cli.py << 'EOF'
|
||||
"""
|
||||
Unit tests for CLI commands
|
||||
"""
|
||||
|
||||
import pytest
|
||||
from click.testing import CliRunner
|
||||
from src.cli import cli # Adjust import based on your CLI module
|
||||
|
||||
|
||||
class TestVersionCommand:
|
||||
"""Test version command"""
|
||||
|
||||
def test_version_flag(self, runner):
|
||||
"""Should display version with --version"""
|
||||
result = runner.invoke(cli, ['--version'])
|
||||
assert result.exit_code == 0
|
||||
# Adjust assertion based on your version format
|
||||
|
||||
def test_version_output_format(self, runner):
|
||||
"""Should display version in correct format"""
|
||||
result = runner.invoke(cli, ['--version'])
|
||||
assert result.output.count('.') >= 2 # X.Y.Z format
|
||||
|
||||
|
||||
class TestHelpCommand:
|
||||
"""Test help command"""
|
||||
|
||||
def test_help_flag(self, runner):
|
||||
"""Should display help with --help"""
|
||||
result = runner.invoke(cli, ['--help'])
|
||||
assert result.exit_code == 0
|
||||
assert 'Usage:' in result.output
|
||||
|
||||
def test_help_shows_commands(self, runner):
|
||||
"""Should list available commands"""
|
||||
result = runner.invoke(cli, ['--help'])
|
||||
assert 'Commands:' in result.output
|
||||
|
||||
|
||||
class TestErrorHandling:
|
||||
"""Test error handling"""
|
||||
|
||||
def test_unknown_command(self, runner):
|
||||
"""Should handle unknown commands gracefully"""
|
||||
result = runner.invoke(cli, ['unknown-command'])
|
||||
assert result.exit_code != 0
|
||||
assert 'no such command' in result.output.lower()
|
||||
|
||||
def test_invalid_option(self, runner):
|
||||
"""Should handle invalid options"""
|
||||
result = runner.invoke(cli, ['--invalid-option'])
|
||||
assert result.exit_code != 0
|
||||
EOF
|
||||
|
||||
# Create sample integration test
|
||||
echo "📝 Creating sample integration test..."
|
||||
cat > tests/integration/test_workflow.py << 'EOF'
|
||||
"""
|
||||
Integration tests for CLI workflows
|
||||
"""
|
||||
|
||||
import pytest
|
||||
from click.testing import CliRunner
|
||||
from src.cli import cli # Adjust import based on your CLI module
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
class TestCompleteWorkflow:
|
||||
"""Test complete CLI workflows"""
|
||||
|
||||
def test_init_and_config_workflow(self, isolated_runner):
|
||||
"""Should complete init -> config workflow"""
|
||||
runner = isolated_runner
|
||||
|
||||
# Initialize project
|
||||
result = runner.invoke(cli, ['init', 'test-project'])
|
||||
assert result.exit_code == 0
|
||||
|
||||
# Configure project
|
||||
result = runner.invoke(cli, ['config', 'set', 'key', 'value'])
|
||||
assert result.exit_code == 0
|
||||
|
||||
# Verify configuration
|
||||
result = runner.invoke(cli, ['config', 'get', 'key'])
|
||||
assert result.exit_code == 0
|
||||
assert 'value' in result.output
|
||||
EOF
|
||||
|
||||
# Create requirements file for testing
|
||||
echo "📝 Creating requirements-test.txt..."
|
||||
cat > requirements-test.txt << 'EOF'
|
||||
pytest>=7.0.0
|
||||
pytest-cov>=4.0.0
|
||||
pytest-mock>=3.10.0
|
||||
click>=8.0.0
|
||||
EOF
|
||||
|
||||
# Create .coveragerc for coverage configuration
|
||||
echo "⚙️ Creating coverage configuration..."
|
||||
cat > .coveragerc << 'EOF'
|
||||
[run]
|
||||
source = src
|
||||
omit =
|
||||
tests/*
|
||||
*/venv/*
|
||||
*/virtualenv/*
|
||||
*/__pycache__/*
|
||||
|
||||
[report]
|
||||
exclude_lines =
|
||||
pragma: no cover
|
||||
def __repr__
|
||||
raise AssertionError
|
||||
raise NotImplementedError
|
||||
if __name__ == .__main__.:
|
||||
if TYPE_CHECKING:
|
||||
@abstractmethod
|
||||
|
||||
precision = 2
|
||||
show_missing = True
|
||||
|
||||
[html]
|
||||
directory = htmlcov
|
||||
EOF
|
||||
|
||||
# Update .gitignore
|
||||
echo "📝 Updating .gitignore..."
|
||||
if [ -f .gitignore ]; then
|
||||
grep -qxF '__pycache__/' .gitignore || echo '__pycache__/' >> .gitignore
|
||||
grep -qxF '*.pyc' .gitignore || echo '*.pyc' >> .gitignore
|
||||
grep -qxF '.pytest_cache/' .gitignore || echo '.pytest_cache/' >> .gitignore
|
||||
grep -qxF 'htmlcov/' .gitignore || echo 'htmlcov/' >> .gitignore
|
||||
grep -qxF '.coverage' .gitignore || echo '.coverage' >> .gitignore
|
||||
grep -qxF 'coverage.xml' .gitignore || echo 'coverage.xml' >> .gitignore
|
||||
else
|
||||
cat > .gitignore << 'EOF'
|
||||
__pycache__/
|
||||
*.pyc
|
||||
*.pyo
|
||||
*.pyd
|
||||
.pytest_cache/
|
||||
htmlcov/
|
||||
.coverage
|
||||
coverage.xml
|
||||
*.log
|
||||
.env
|
||||
.env.local
|
||||
EOF
|
||||
fi
|
||||
|
||||
# Create Makefile for convenient test commands
|
||||
echo "📝 Creating Makefile..."
|
||||
cat > Makefile << 'EOF'
|
||||
.PHONY: test test-unit test-integration test-cov clean
|
||||
|
||||
test:
|
||||
pytest
|
||||
|
||||
test-unit:
|
||||
pytest tests/unit -v
|
||||
|
||||
test-integration:
|
||||
pytest tests/integration -v
|
||||
|
||||
test-cov:
|
||||
pytest --cov --cov-report=html --cov-report=term
|
||||
|
||||
test-watch:
|
||||
pytest --watch
|
||||
|
||||
clean:
|
||||
rm -rf .pytest_cache htmlcov .coverage coverage.xml
|
||||
find . -type d -name __pycache__ -exec rm -rf {} +
|
||||
find . -type f -name "*.pyc" -delete
|
||||
EOF
|
||||
|
||||
# Create README for tests
|
||||
echo "📝 Creating test documentation..."
|
||||
cat > tests/README.md << 'EOF'
|
||||
# CLI Tests
|
||||
|
||||
## Running Tests
|
||||
|
||||
```bash
|
||||
# Run all tests
|
||||
pytest
|
||||
|
||||
# Run unit tests only
|
||||
pytest tests/unit
|
||||
|
||||
# Run integration tests only
|
||||
pytest tests/integration
|
||||
|
||||
# Run with coverage
|
||||
pytest --cov --cov-report=html
|
||||
|
||||
# Run specific test file
|
||||
pytest tests/unit/test_cli.py
|
||||
|
||||
# Run specific test function
|
||||
pytest tests/unit/test_cli.py::test_version_flag
|
||||
|
||||
# Run with verbose output
|
||||
pytest -v
|
||||
|
||||
# Run and show print statements
|
||||
pytest -s
|
||||
```
|
||||
|
||||
## Using Makefile
|
||||
|
||||
```bash
|
||||
# Run all tests
|
||||
make test
|
||||
|
||||
# Run unit tests
|
||||
make test-unit
|
||||
|
||||
# Run integration tests
|
||||
make test-integration
|
||||
|
||||
# Run with coverage report
|
||||
make test-cov
|
||||
|
||||
# Clean test artifacts
|
||||
make clean
|
||||
```
|
||||
|
||||
## Test Structure
|
||||
|
||||
- `unit/` - Unit tests for individual functions and commands
|
||||
- `integration/` - Integration tests for complete workflows
|
||||
- `fixtures/` - Shared test fixtures and utilities
|
||||
- `conftest.py` - Pytest configuration and common fixtures
|
||||
|
||||
## Writing Tests
|
||||
|
||||
Use the fixtures from `conftest.py`:
|
||||
|
||||
```python
|
||||
def test_example(runner):
|
||||
"""Test using CliRunner fixture"""
|
||||
result = runner.invoke(cli, ['command', '--flag'])
|
||||
assert result.exit_code == 0
|
||||
assert 'expected' in result.output
|
||||
|
||||
def test_with_harness(cli_harness):
|
||||
"""Test using CLI harness"""
|
||||
result = cli_harness.assert_success(['command'], 'expected output')
|
||||
```
|
||||
|
||||
## Test Markers
|
||||
|
||||
Use markers to categorize tests:
|
||||
|
||||
```python
|
||||
@pytest.mark.unit
|
||||
def test_unit_example():
|
||||
pass
|
||||
|
||||
@pytest.mark.integration
|
||||
def test_integration_example():
|
||||
pass
|
||||
|
||||
@pytest.mark.slow
|
||||
def test_slow_operation():
|
||||
pass
|
||||
```
|
||||
|
||||
Run specific markers:
|
||||
```bash
|
||||
pytest -m unit
|
||||
pytest -m "not slow"
|
||||
```
|
||||
|
||||
## Coverage
|
||||
|
||||
Coverage reports are generated in `htmlcov/` directory.
|
||||
Open `htmlcov/index.html` to view detailed coverage report.
|
||||
|
||||
Target: 80%+ coverage for all modules.
|
||||
EOF
|
||||
|
||||
echo "✅ pytest setup complete!"
|
||||
echo ""
|
||||
echo "Next steps:"
|
||||
echo " 1. Run 'pytest' to execute tests"
|
||||
echo " 2. Run 'make test-cov' to see coverage report"
|
||||
echo " 3. Add more tests in tests/unit/ and tests/integration/"
|
||||
echo ""
|
||||
echo "📚 Test files created:"
|
||||
echo " - pytest.ini"
|
||||
echo " - .coveragerc"
|
||||
echo " - tests/conftest.py"
|
||||
echo " - tests/unit/test_cli.py"
|
||||
echo " - tests/integration/test_workflow.py"
|
||||
echo " - tests/README.md"
|
||||
echo " - Makefile"
|
||||
127
skills/cli-testing-patterns/scripts/validate-test-coverage.sh
Executable file
127
skills/cli-testing-patterns/scripts/validate-test-coverage.sh
Executable file
@@ -0,0 +1,127 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Validate Test Coverage
|
||||
#
|
||||
# Checks that test coverage meets minimum thresholds
|
||||
|
||||
set -e
|
||||
|
||||
# Default thresholds
|
||||
MIN_COVERAGE=${MIN_COVERAGE:-70}
|
||||
|
||||
echo "📊 Validating test coverage..."
|
||||
|
||||
# Detect project type
|
||||
if [ -f "package.json" ]; then
|
||||
PROJECT_TYPE="node"
|
||||
elif [ -f "setup.py" ] || [ -f "pyproject.toml" ]; then
|
||||
PROJECT_TYPE="python"
|
||||
else
|
||||
echo "❌ Error: Could not detect project type"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check coverage for Node.js projects
|
||||
if [ "$PROJECT_TYPE" == "node" ]; then
|
||||
echo "📦 Node.js project detected"
|
||||
|
||||
# Check if coverage data exists
|
||||
if [ ! -d "coverage" ]; then
|
||||
echo "❌ Error: No coverage data found"
|
||||
echo " Run 'npm run test:coverage' first"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if coverage summary exists
|
||||
if [ ! -f "coverage/coverage-summary.json" ]; then
|
||||
echo "❌ Error: coverage-summary.json not found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Extract coverage percentages using jq if available
|
||||
if command -v jq &> /dev/null; then
|
||||
LINES=$(jq '.total.lines.pct' coverage/coverage-summary.json)
|
||||
STATEMENTS=$(jq '.total.statements.pct' coverage/coverage-summary.json)
|
||||
FUNCTIONS=$(jq '.total.functions.pct' coverage/coverage-summary.json)
|
||||
BRANCHES=$(jq '.total.branches.pct' coverage/coverage-summary.json)
|
||||
|
||||
echo ""
|
||||
echo "Coverage Summary:"
|
||||
echo " Lines: ${LINES}%"
|
||||
echo " Statements: ${STATEMENTS}%"
|
||||
echo " Functions: ${FUNCTIONS}%"
|
||||
echo " Branches: ${BRANCHES}%"
|
||||
echo ""
|
||||
|
||||
# Check thresholds
|
||||
FAILED=0
|
||||
if (( $(echo "$LINES < $MIN_COVERAGE" | bc -l) )); then
|
||||
echo "❌ Lines coverage (${LINES}%) below threshold (${MIN_COVERAGE}%)"
|
||||
FAILED=1
|
||||
fi
|
||||
if (( $(echo "$STATEMENTS < $MIN_COVERAGE" | bc -l) )); then
|
||||
echo "❌ Statements coverage (${STATEMENTS}%) below threshold (${MIN_COVERAGE}%)"
|
||||
FAILED=1
|
||||
fi
|
||||
if (( $(echo "$FUNCTIONS < $MIN_COVERAGE" | bc -l) )); then
|
||||
echo "❌ Functions coverage (${FUNCTIONS}%) below threshold (${MIN_COVERAGE}%)"
|
||||
FAILED=1
|
||||
fi
|
||||
if (( $(echo "$BRANCHES < $MIN_COVERAGE" | bc -l) )); then
|
||||
echo "❌ Branches coverage (${BRANCHES}%) below threshold (${MIN_COVERAGE}%)"
|
||||
FAILED=1
|
||||
fi
|
||||
|
||||
if [ $FAILED -eq 1 ]; then
|
||||
echo ""
|
||||
echo "❌ Coverage validation failed"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "✅ Coverage thresholds met!"
|
||||
else
|
||||
echo "⚠️ jq not installed, skipping detailed validation"
|
||||
echo " Install jq for detailed coverage validation"
|
||||
fi
|
||||
|
||||
# Check coverage for Python projects
|
||||
elif [ "$PROJECT_TYPE" == "python" ]; then
|
||||
echo "🐍 Python project detected"
|
||||
|
||||
# Check if coverage data exists
|
||||
if [ ! -f ".coverage" ]; then
|
||||
echo "❌ Error: No coverage data found"
|
||||
echo " Run 'pytest --cov' first"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Generate coverage report
|
||||
if command -v coverage &> /dev/null; then
|
||||
echo ""
|
||||
coverage report
|
||||
|
||||
# Get total coverage percentage
|
||||
TOTAL_COVERAGE=$(coverage report | tail -1 | awk '{print $NF}' | sed 's/%//')
|
||||
|
||||
echo ""
|
||||
echo "Total Coverage: ${TOTAL_COVERAGE}%"
|
||||
echo "Minimum Required: ${MIN_COVERAGE}%"
|
||||
|
||||
# Compare coverage
|
||||
if (( $(echo "$TOTAL_COVERAGE < $MIN_COVERAGE" | bc -l) )); then
|
||||
echo ""
|
||||
echo "❌ Coverage (${TOTAL_COVERAGE}%) below threshold (${MIN_COVERAGE}%)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "✅ Coverage thresholds met!"
|
||||
else
|
||||
echo "❌ Error: coverage tool not installed"
|
||||
echo " Install with: pip install coverage"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "🎉 Coverage validation passed!"
|
||||
175
skills/cli-testing-patterns/templates/jest-cli-test.ts
Normal file
175
skills/cli-testing-patterns/templates/jest-cli-test.ts
Normal file
@@ -0,0 +1,175 @@
|
||||
/**
|
||||
* Jest CLI Test Template
|
||||
*
|
||||
* Complete test suite for CLI tools using Jest and child_process.execSync
|
||||
* Tests command execution, exit codes, stdout/stderr output
|
||||
*/
|
||||
|
||||
import { execSync } from 'child_process';
|
||||
import fs from 'fs';
|
||||
import path from 'path';
|
||||
|
||||
describe('CLI Tool Tests', () => {
|
||||
const CLI_PATH = path.join(__dirname, '../bin/mycli');
|
||||
|
||||
/**
|
||||
* Helper function to execute CLI commands and capture output
|
||||
* @param args - Command line arguments as string
|
||||
* @returns Object with stdout, stderr, and exit code
|
||||
*/
|
||||
function runCLI(args: string): {
|
||||
stdout: string;
|
||||
stderr: string;
|
||||
code: number;
|
||||
} {
|
||||
try {
|
||||
const stdout = execSync(`${CLI_PATH} ${args}`, {
|
||||
encoding: 'utf8',
|
||||
stdio: 'pipe',
|
||||
});
|
||||
return { stdout, stderr: '', code: 0 };
|
||||
} catch (error: any) {
|
||||
return {
|
||||
stdout: error.stdout || '',
|
||||
stderr: error.stderr || '',
|
||||
code: error.status || 1,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
// Version Testing
|
||||
describe('version command', () => {
|
||||
test('should display version with --version', () => {
|
||||
const { stdout, code } = runCLI('--version');
|
||||
expect(code).toBe(0);
|
||||
expect(stdout).toContain('1.0.0');
|
||||
});
|
||||
|
||||
test('should display version with -v', () => {
|
||||
const { stdout, code } = runCLI('-v');
|
||||
expect(code).toBe(0);
|
||||
expect(stdout).toMatch(/\d+\.\d+\.\d+/);
|
||||
});
|
||||
});
|
||||
|
||||
// Help Testing
|
||||
describe('help command', () => {
|
||||
test('should display help with --help', () => {
|
||||
const { stdout, code } = runCLI('--help');
|
||||
expect(code).toBe(0);
|
||||
expect(stdout).toContain('Usage:');
|
||||
expect(stdout).toContain('Commands:');
|
||||
expect(stdout).toContain('Options:');
|
||||
});
|
||||
|
||||
test('should display help with -h', () => {
|
||||
const { stdout, code } = runCLI('-h');
|
||||
expect(code).toBe(0);
|
||||
expect(stdout).toContain('Usage:');
|
||||
});
|
||||
});
|
||||
|
||||
// Error Handling
|
||||
describe('error handling', () => {
|
||||
test('should handle unknown command', () => {
|
||||
const { stderr, code } = runCLI('unknown-command');
|
||||
expect(code).toBe(1);
|
||||
expect(stderr).toContain('unknown command');
|
||||
});
|
||||
|
||||
test('should handle invalid options', () => {
|
||||
const { stderr, code } = runCLI('--invalid-option');
|
||||
expect(code).toBe(1);
|
||||
expect(stderr).toContain('unknown option');
|
||||
});
|
||||
|
||||
test('should validate required arguments', () => {
|
||||
const { stderr, code } = runCLI('deploy');
|
||||
expect(code).toBe(1);
|
||||
expect(stderr).toContain('missing required argument');
|
||||
});
|
||||
});
|
||||
|
||||
// Command Execution
|
||||
describe('command execution', () => {
|
||||
test('should execute deploy command', () => {
|
||||
const { stdout, code } = runCLI('deploy production --force');
|
||||
expect(code).toBe(0);
|
||||
expect(stdout).toContain('Deploying to production');
|
||||
expect(stdout).toContain('Force mode enabled');
|
||||
});
|
||||
|
||||
test('should execute with flags', () => {
|
||||
const { stdout, code } = runCLI('build --verbose --output dist');
|
||||
expect(code).toBe(0);
|
||||
expect(stdout).toContain('Building project');
|
||||
expect(stdout).toContain('Output: dist');
|
||||
});
|
||||
});
|
||||
|
||||
// Configuration Testing
|
||||
describe('configuration', () => {
|
||||
test('should set configuration value', () => {
|
||||
const { stdout, code } = runCLI('config set key value');
|
||||
expect(code).toBe(0);
|
||||
expect(stdout).toContain('Configuration updated');
|
||||
});
|
||||
|
||||
test('should get configuration value', () => {
|
||||
runCLI('config set api_key your_key_here');
|
||||
const { stdout, code } = runCLI('config get api_key');
|
||||
expect(code).toBe(0);
|
||||
expect(stdout).toContain('your_key_here');
|
||||
});
|
||||
|
||||
test('should list all configuration', () => {
|
||||
const { stdout, code } = runCLI('config list');
|
||||
expect(code).toBe(0);
|
||||
expect(stdout).toContain('Configuration:');
|
||||
});
|
||||
});
|
||||
|
||||
// Exit Code Validation
|
||||
describe('exit codes', () => {
|
||||
test('should return 0 on success', () => {
|
||||
const { code } = runCLI('status');
|
||||
expect(code).toBe(0);
|
||||
});
|
||||
|
||||
test('should return 1 on general error', () => {
|
||||
const { code } = runCLI('invalid-command');
|
||||
expect(code).toBe(1);
|
||||
});
|
||||
|
||||
test('should return 2 on invalid arguments', () => {
|
||||
const { code } = runCLI('deploy --invalid-flag');
|
||||
expect(code).toBe(2);
|
||||
});
|
||||
});
|
||||
|
||||
// Output Format Testing
|
||||
describe('output formatting', () => {
|
||||
test('should output JSON when requested', () => {
|
||||
const { stdout, code } = runCLI('status --format json');
|
||||
expect(code).toBe(0);
|
||||
expect(() => JSON.parse(stdout)).not.toThrow();
|
||||
});
|
||||
|
||||
test('should output YAML when requested', () => {
|
||||
const { stdout, code } = runCLI('status --format yaml');
|
||||
expect(code).toBe(0);
|
||||
expect(stdout).toContain(':');
|
||||
});
|
||||
|
||||
test('should output table by default', () => {
|
||||
const { stdout, code } = runCLI('status');
|
||||
expect(code).toBe(0);
|
||||
expect(stdout).toMatch(/[─┼│]/); // Table characters
|
||||
});
|
||||
});
|
||||
|
||||
// Cleanup
|
||||
afterAll(() => {
|
||||
// Clean up any test artifacts
|
||||
});
|
||||
});
|
||||
198
skills/cli-testing-patterns/templates/jest-config-test.ts
Normal file
198
skills/cli-testing-patterns/templates/jest-config-test.ts
Normal file
@@ -0,0 +1,198 @@
|
||||
/**
|
||||
* Jest Configuration Testing Template
|
||||
*
|
||||
* Test CLI configuration file handling, validation, and persistence
|
||||
*/
|
||||
|
||||
import { execSync } from 'child_process';
|
||||
import fs from 'fs';
|
||||
import path from 'path';
|
||||
import os from 'os';
|
||||
|
||||
describe('CLI Configuration Tests', () => {
|
||||
const CLI_PATH = path.join(__dirname, '../bin/mycli');
|
||||
const TEST_CONFIG_DIR = path.join(os.tmpdir(), 'cli-test-config');
|
||||
const TEST_CONFIG_FILE = path.join(TEST_CONFIG_DIR, '.myclirc');
|
||||
|
||||
function runCLI(args: string, env: Record<string, string> = {}): {
|
||||
stdout: string;
|
||||
stderr: string;
|
||||
code: number;
|
||||
} {
|
||||
try {
|
||||
const stdout = execSync(`${CLI_PATH} ${args}`, {
|
||||
encoding: 'utf8',
|
||||
stdio: 'pipe',
|
||||
env: {
|
||||
...process.env,
|
||||
HOME: TEST_CONFIG_DIR,
|
||||
...env,
|
||||
},
|
||||
});
|
||||
return { stdout, stderr: '', code: 0 };
|
||||
} catch (error: any) {
|
||||
return {
|
||||
stdout: error.stdout || '',
|
||||
stderr: error.stderr || '',
|
||||
code: error.status || 1,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
beforeEach(() => {
|
||||
// Create temporary config directory
|
||||
if (!fs.existsSync(TEST_CONFIG_DIR)) {
|
||||
fs.mkdirSync(TEST_CONFIG_DIR, { recursive: true });
|
||||
}
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
// Clean up test config directory
|
||||
if (fs.existsSync(TEST_CONFIG_DIR)) {
|
||||
fs.rmSync(TEST_CONFIG_DIR, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
|
||||
describe('config initialization', () => {
|
||||
test('should create config file on first run', () => {
|
||||
runCLI('config init');
|
||||
expect(fs.existsSync(TEST_CONFIG_FILE)).toBe(true);
|
||||
});
|
||||
|
||||
test('should not overwrite existing config', () => {
|
||||
fs.writeFileSync(TEST_CONFIG_FILE, 'existing: data\n');
|
||||
const { stderr, code } = runCLI('config init');
|
||||
expect(code).toBe(1);
|
||||
expect(stderr).toContain('Config file already exists');
|
||||
});
|
||||
|
||||
test('should create config with default values', () => {
|
||||
runCLI('config init');
|
||||
const config = fs.readFileSync(TEST_CONFIG_FILE, 'utf8');
|
||||
expect(config).toContain('api_key: your_api_key_here');
|
||||
expect(config).toContain('environment: development');
|
||||
});
|
||||
});
|
||||
|
||||
describe('config set operations', () => {
|
||||
beforeEach(() => {
|
||||
runCLI('config init');
|
||||
});
|
||||
|
||||
test('should set string value', () => {
|
||||
const { code } = runCLI('config set api_key test_key_123');
|
||||
expect(code).toBe(0);
|
||||
|
||||
const config = fs.readFileSync(TEST_CONFIG_FILE, 'utf8');
|
||||
expect(config).toContain('api_key: test_key_123');
|
||||
});
|
||||
|
||||
test('should set boolean value', () => {
|
||||
const { code } = runCLI('config set verbose true');
|
||||
expect(code).toBe(0);
|
||||
|
||||
const config = fs.readFileSync(TEST_CONFIG_FILE, 'utf8');
|
||||
expect(config).toContain('verbose: true');
|
||||
});
|
||||
|
||||
test('should set nested value', () => {
|
||||
const { code } = runCLI('config set logging.level debug');
|
||||
expect(code).toBe(0);
|
||||
|
||||
const config = fs.readFileSync(TEST_CONFIG_FILE, 'utf8');
|
||||
expect(config).toContain('level: debug');
|
||||
});
|
||||
|
||||
test('should handle invalid key names', () => {
|
||||
const { stderr, code } = runCLI('config set invalid..key value');
|
||||
expect(code).toBe(1);
|
||||
expect(stderr).toContain('Invalid key name');
|
||||
});
|
||||
});
|
||||
|
||||
describe('config get operations', () => {
|
||||
beforeEach(() => {
|
||||
runCLI('config init');
|
||||
runCLI('config set api_key test_key_123');
|
||||
runCLI('config set environment production');
|
||||
});
|
||||
|
||||
test('should get existing value', () => {
|
||||
const { stdout, code } = runCLI('config get api_key');
|
||||
expect(code).toBe(0);
|
||||
expect(stdout).toContain('test_key_123');
|
||||
});
|
||||
|
||||
test('should handle non-existent key', () => {
|
||||
const { stderr, code } = runCLI('config get nonexistent');
|
||||
expect(code).toBe(1);
|
||||
expect(stderr).toContain('Key not found');
|
||||
});
|
||||
|
||||
test('should get nested value', () => {
|
||||
runCLI('config set database.host localhost');
|
||||
const { stdout, code } = runCLI('config get database.host');
|
||||
expect(code).toBe(0);
|
||||
expect(stdout).toContain('localhost');
|
||||
});
|
||||
});
|
||||
|
||||
describe('config list operations', () => {
|
||||
beforeEach(() => {
|
||||
runCLI('config init');
|
||||
runCLI('config set api_key test_key_123');
|
||||
runCLI('config set verbose true');
|
||||
});
|
||||
|
||||
test('should list all configuration', () => {
|
||||
const { stdout, code } = runCLI('config list');
|
||||
expect(code).toBe(0);
|
||||
expect(stdout).toContain('api_key');
|
||||
expect(stdout).toContain('verbose');
|
||||
});
|
||||
|
||||
test('should format list output', () => {
|
||||
const { stdout, code } = runCLI('config list --format json');
|
||||
expect(code).toBe(0);
|
||||
const config = JSON.parse(stdout);
|
||||
expect(config.api_key).toBe('test_key_123');
|
||||
expect(config.verbose).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('config validation', () => {
|
||||
test('should validate config file on load', () => {
|
||||
fs.writeFileSync(TEST_CONFIG_FILE, 'invalid yaml: [}');
|
||||
const { stderr, code } = runCLI('config list');
|
||||
expect(code).toBe(1);
|
||||
expect(stderr).toContain('Invalid configuration file');
|
||||
});
|
||||
|
||||
test('should validate required fields', () => {
|
||||
runCLI('config init');
|
||||
fs.writeFileSync(TEST_CONFIG_FILE, 'optional: value\n');
|
||||
const { stderr, code } = runCLI('deploy production');
|
||||
expect(code).toBe(1);
|
||||
expect(stderr).toContain('api_key is required');
|
||||
});
|
||||
});
|
||||
|
||||
describe('environment variable overrides', () => {
|
||||
beforeEach(() => {
|
||||
runCLI('config init');
|
||||
runCLI('config set api_key file_key_123');
|
||||
});
|
||||
|
||||
test('should override with environment variable', () => {
|
||||
const { stdout } = runCLI('config get api_key', {
|
||||
MYCLI_API_KEY: 'env_key_123',
|
||||
});
|
||||
expect(stdout).toContain('env_key_123');
|
||||
});
|
||||
|
||||
test('should use file value when env var not set', () => {
|
||||
const { stdout } = runCLI('config get api_key');
|
||||
expect(stdout).toContain('file_key_123');
|
||||
});
|
||||
});
|
||||
});
|
||||
223
skills/cli-testing-patterns/templates/jest-integration-test.ts
Normal file
223
skills/cli-testing-patterns/templates/jest-integration-test.ts
Normal file
@@ -0,0 +1,223 @@
|
||||
/**
|
||||
* Jest Integration Test Template
|
||||
*
|
||||
* Test complete CLI workflows with multiple commands and state persistence
|
||||
*/
|
||||
|
||||
import { execSync } from 'child_process';
|
||||
import fs from 'fs';
|
||||
import path from 'path';
|
||||
import os from 'os';
|
||||
|
||||
describe('CLI Integration Tests', () => {
|
||||
const CLI_PATH = path.join(__dirname, '../bin/mycli');
|
||||
const TEST_WORKSPACE = path.join(os.tmpdir(), 'cli-integration-test');
|
||||
|
||||
function runCLI(args: string, cwd: string = TEST_WORKSPACE): {
|
||||
stdout: string;
|
||||
stderr: string;
|
||||
code: number;
|
||||
} {
|
||||
try {
|
||||
const stdout = execSync(`${CLI_PATH} ${args}`, {
|
||||
encoding: 'utf8',
|
||||
stdio: 'pipe',
|
||||
cwd,
|
||||
});
|
||||
return { stdout, stderr: '', code: 0 };
|
||||
} catch (error: any) {
|
||||
return {
|
||||
stdout: error.stdout || '',
|
||||
stderr: error.stderr || '',
|
||||
code: error.status || 1,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
beforeEach(() => {
|
||||
// Create clean test workspace
|
||||
if (fs.existsSync(TEST_WORKSPACE)) {
|
||||
fs.rmSync(TEST_WORKSPACE, { recursive: true, force: true });
|
||||
}
|
||||
fs.mkdirSync(TEST_WORKSPACE, { recursive: true });
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
// Clean up test workspace
|
||||
if (fs.existsSync(TEST_WORKSPACE)) {
|
||||
fs.rmSync(TEST_WORKSPACE, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
|
||||
describe('complete deployment workflow', () => {
|
||||
test('should initialize, configure, and deploy', () => {
|
||||
// Step 1: Initialize project
|
||||
const init = runCLI('init my-project');
|
||||
expect(init.code).toBe(0);
|
||||
expect(init.stdout).toContain('Project initialized');
|
||||
|
||||
// Step 2: Configure deployment
|
||||
const config = runCLI('config set api_key test_key_123');
|
||||
expect(config.code).toBe(0);
|
||||
|
||||
// Step 3: Build project
|
||||
const build = runCLI('build --production');
|
||||
expect(build.code).toBe(0);
|
||||
expect(build.stdout).toContain('Build successful');
|
||||
|
||||
// Step 4: Deploy
|
||||
const deploy = runCLI('deploy production');
|
||||
expect(deploy.code).toBe(0);
|
||||
expect(deploy.stdout).toContain('Deployed successfully');
|
||||
|
||||
// Verify deployment artifacts
|
||||
const deployFile = path.join(TEST_WORKSPACE, '.deploy');
|
||||
expect(fs.existsSync(deployFile)).toBe(true);
|
||||
});
|
||||
|
||||
test('should fail deployment without configuration', () => {
|
||||
runCLI('init my-project');
|
||||
|
||||
// Try to deploy without configuring API key
|
||||
const { stderr, code } = runCLI('deploy production');
|
||||
expect(code).toBe(1);
|
||||
expect(stderr).toContain('API key not configured');
|
||||
});
|
||||
});
|
||||
|
||||
describe('multi-environment workflow', () => {
|
||||
test('should manage multiple environments', () => {
|
||||
// Initialize project
|
||||
runCLI('init my-project');
|
||||
|
||||
// Configure development environment
|
||||
runCLI('config set api_key dev_key_123 --env development');
|
||||
runCLI('config set base_url https://dev.example.com --env development');
|
||||
|
||||
// Configure production environment
|
||||
runCLI('config set api_key prod_key_123 --env production');
|
||||
runCLI('config set base_url https://api.example.com --env production');
|
||||
|
||||
// Deploy to development
|
||||
const devDeploy = runCLI('deploy development');
|
||||
expect(devDeploy.code).toBe(0);
|
||||
expect(devDeploy.stdout).toContain('dev.example.com');
|
||||
|
||||
// Deploy to production
|
||||
const prodDeploy = runCLI('deploy production');
|
||||
expect(prodDeploy.code).toBe(0);
|
||||
expect(prodDeploy.stdout).toContain('api.example.com');
|
||||
});
|
||||
});
|
||||
|
||||
describe('state persistence workflow', () => {
|
||||
test('should persist and restore state', () => {
|
||||
// Create initial state
|
||||
runCLI('state set counter 0');
|
||||
|
||||
// Increment counter multiple times
|
||||
runCLI('increment');
|
||||
runCLI('increment');
|
||||
runCLI('increment');
|
||||
|
||||
// Verify final state
|
||||
const { stdout } = runCLI('state get counter');
|
||||
expect(stdout).toContain('3');
|
||||
});
|
||||
|
||||
test('should handle state file corruption', () => {
|
||||
runCLI('state set key value');
|
||||
|
||||
// Corrupt state file
|
||||
const stateFile = path.join(TEST_WORKSPACE, '.state');
|
||||
fs.writeFileSync(stateFile, 'invalid json {[}');
|
||||
|
||||
// Should recover gracefully
|
||||
const { stderr, code } = runCLI('state get key');
|
||||
expect(code).toBe(1);
|
||||
expect(stderr).toContain('Corrupted state file');
|
||||
});
|
||||
});
|
||||
|
||||
describe('plugin workflow', () => {
|
||||
test('should install and use plugins', () => {
|
||||
// Initialize project
|
||||
runCLI('init my-project');
|
||||
|
||||
// Install plugin
|
||||
const install = runCLI('plugin install my-plugin');
|
||||
expect(install.code).toBe(0);
|
||||
|
||||
// Verify plugin is listed
|
||||
const list = runCLI('plugin list');
|
||||
expect(list.stdout).toContain('my-plugin');
|
||||
|
||||
// Use plugin command
|
||||
const usePlugin = runCLI('my-plugin:command');
|
||||
expect(usePlugin.code).toBe(0);
|
||||
|
||||
// Uninstall plugin
|
||||
const uninstall = runCLI('plugin uninstall my-plugin');
|
||||
expect(uninstall.code).toBe(0);
|
||||
|
||||
// Verify plugin is removed
|
||||
const listAfter = runCLI('plugin list');
|
||||
expect(listAfter.stdout).not.toContain('my-plugin');
|
||||
});
|
||||
});
|
||||
|
||||
describe('error recovery workflow', () => {
|
||||
test('should recover from partial failure', () => {
|
||||
runCLI('init my-project');
|
||||
|
||||
// Simulate partial deployment failure
|
||||
runCLI('deploy staging --force');
|
||||
|
||||
// Should be able to rollback
|
||||
const rollback = runCLI('rollback');
|
||||
expect(rollback.code).toBe(0);
|
||||
expect(rollback.stdout).toContain('Rollback successful');
|
||||
|
||||
// Should be able to retry
|
||||
const retry = runCLI('deploy staging --retry');
|
||||
expect(retry.code).toBe(0);
|
||||
});
|
||||
});
|
||||
|
||||
describe('concurrent operations', () => {
|
||||
test('should handle file locking', async () => {
|
||||
runCLI('init my-project');
|
||||
|
||||
// Start long-running operation
|
||||
const longOp = execSync(`${CLI_PATH} long-running-task &`, {
|
||||
cwd: TEST_WORKSPACE,
|
||||
});
|
||||
|
||||
// Try to run another operation that needs lock
|
||||
const { stderr, code } = runCLI('another-task');
|
||||
expect(code).toBe(1);
|
||||
expect(stderr).toContain('Another operation in progress');
|
||||
});
|
||||
});
|
||||
|
||||
describe('data migration workflow', () => {
|
||||
test('should migrate data between versions', () => {
|
||||
// Create old version data
|
||||
const oldData = { version: 1, data: 'legacy format' };
|
||||
fs.writeFileSync(
|
||||
path.join(TEST_WORKSPACE, 'data.json'),
|
||||
JSON.stringify(oldData)
|
||||
);
|
||||
|
||||
// Run migration
|
||||
const migrate = runCLI('migrate --to 2.0');
|
||||
expect(migrate.code).toBe(0);
|
||||
|
||||
// Verify new format
|
||||
const newData = JSON.parse(
|
||||
fs.readFileSync(path.join(TEST_WORKSPACE, 'data.json'), 'utf8')
|
||||
);
|
||||
expect(newData.version).toBe(2);
|
||||
});
|
||||
});
|
||||
});
|
||||
270
skills/cli-testing-patterns/templates/pytest-click-test.py
Normal file
270
skills/cli-testing-patterns/templates/pytest-click-test.py
Normal file
@@ -0,0 +1,270 @@
|
||||
"""
|
||||
Pytest Click Testing Template
|
||||
|
||||
Complete test suite for Click-based CLI applications using CliRunner
|
||||
Tests command execution, exit codes, output validation, and interactive prompts
|
||||
"""
|
||||
|
||||
import pytest
|
||||
from click.testing import CliRunner
|
||||
from mycli.cli import cli
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def runner():
|
||||
"""Create a CliRunner instance for testing"""
|
||||
return CliRunner()
|
||||
|
||||
|
||||
class TestVersionCommand:
|
||||
"""Test version display"""
|
||||
|
||||
def test_version_flag(self, runner):
|
||||
"""Should display version with --version"""
|
||||
result = runner.invoke(cli, ['--version'])
|
||||
assert result.exit_code == 0
|
||||
assert '1.0.0' in result.output
|
||||
|
||||
def test_version_short_flag(self, runner):
|
||||
"""Should display version with -v"""
|
||||
result = runner.invoke(cli, ['-v'])
|
||||
assert result.exit_code == 0
|
||||
assert result.output.count('.') == 2 # Version format X.Y.Z
|
||||
|
||||
|
||||
class TestHelpCommand:
|
||||
"""Test help display"""
|
||||
|
||||
def test_help_flag(self, runner):
|
||||
"""Should display help with --help"""
|
||||
result = runner.invoke(cli, ['--help'])
|
||||
assert result.exit_code == 0
|
||||
assert 'Usage:' in result.output
|
||||
assert 'Commands:' in result.output
|
||||
assert 'Options:' in result.output
|
||||
|
||||
def test_help_short_flag(self, runner):
|
||||
"""Should display help with -h"""
|
||||
result = runner.invoke(cli, ['-h'])
|
||||
assert result.exit_code == 0
|
||||
assert 'Usage:' in result.output
|
||||
|
||||
def test_command_help(self, runner):
|
||||
"""Should display help for specific command"""
|
||||
result = runner.invoke(cli, ['deploy', '--help'])
|
||||
assert result.exit_code == 0
|
||||
assert 'deploy' in result.output.lower()
|
||||
|
||||
|
||||
class TestErrorHandling:
|
||||
"""Test error handling and validation"""
|
||||
|
||||
def test_unknown_command(self, runner):
|
||||
"""Should handle unknown commands"""
|
||||
result = runner.invoke(cli, ['unknown-command'])
|
||||
assert result.exit_code != 0
|
||||
assert 'no such command' in result.output.lower()
|
||||
|
||||
def test_invalid_option(self, runner):
|
||||
"""Should handle invalid options"""
|
||||
result = runner.invoke(cli, ['--invalid-option'])
|
||||
assert result.exit_code != 0
|
||||
assert 'no such option' in result.output.lower()
|
||||
|
||||
def test_missing_required_argument(self, runner):
|
||||
"""Should validate required arguments"""
|
||||
result = runner.invoke(cli, ['deploy'])
|
||||
assert result.exit_code != 0
|
||||
assert 'missing argument' in result.output.lower()
|
||||
|
||||
def test_invalid_argument_type(self, runner):
|
||||
"""Should validate argument types"""
|
||||
result = runner.invoke(cli, ['retry', '--count', 'invalid'])
|
||||
assert result.exit_code != 0
|
||||
assert 'invalid' in result.output.lower()
|
||||
|
||||
|
||||
class TestCommandExecution:
|
||||
"""Test command execution with various arguments"""
|
||||
|
||||
def test_deploy_command(self, runner):
|
||||
"""Should execute deploy command"""
|
||||
result = runner.invoke(cli, ['deploy', 'production', '--force'])
|
||||
assert result.exit_code == 0
|
||||
assert 'Deploying to production' in result.output
|
||||
assert 'Force mode enabled' in result.output
|
||||
|
||||
def test_deploy_with_flags(self, runner):
|
||||
"""Should handle multiple flags"""
|
||||
result = runner.invoke(cli, ['deploy', 'staging', '--verbose', '--dry-run'])
|
||||
assert result.exit_code == 0
|
||||
assert 'staging' in result.output
|
||||
assert 'dry run' in result.output.lower()
|
||||
|
||||
def test_build_command(self, runner):
|
||||
"""Should execute build command"""
|
||||
result = runner.invoke(cli, ['build', '--output', 'dist'])
|
||||
assert result.exit_code == 0
|
||||
assert 'Building project' in result.output
|
||||
assert 'dist' in result.output
|
||||
|
||||
|
||||
class TestConfiguration:
|
||||
"""Test configuration management"""
|
||||
|
||||
def test_config_set(self, runner):
|
||||
"""Should set configuration value"""
|
||||
result = runner.invoke(cli, ['config', 'set', 'api_key', 'your_key_here'])
|
||||
assert result.exit_code == 0
|
||||
assert 'Configuration updated' in result.output
|
||||
|
||||
def test_config_get(self, runner):
|
||||
"""Should get configuration value"""
|
||||
runner.invoke(cli, ['config', 'set', 'api_key', 'your_key_here'])
|
||||
result = runner.invoke(cli, ['config', 'get', 'api_key'])
|
||||
assert result.exit_code == 0
|
||||
assert 'your_key_here' in result.output
|
||||
|
||||
def test_config_list(self, runner):
|
||||
"""Should list all configuration"""
|
||||
result = runner.invoke(cli, ['config', 'list'])
|
||||
assert result.exit_code == 0
|
||||
assert 'Configuration:' in result.output
|
||||
|
||||
def test_config_delete(self, runner):
|
||||
"""Should delete configuration value"""
|
||||
runner.invoke(cli, ['config', 'set', 'temp_key', 'temp_value'])
|
||||
result = runner.invoke(cli, ['config', 'delete', 'temp_key'])
|
||||
assert result.exit_code == 0
|
||||
assert 'deleted' in result.output.lower()
|
||||
|
||||
|
||||
class TestExitCodes:
|
||||
"""Test exit code validation"""
|
||||
|
||||
def test_success_exit_code(self, runner):
|
||||
"""Should return 0 on success"""
|
||||
result = runner.invoke(cli, ['status'])
|
||||
assert result.exit_code == 0
|
||||
|
||||
def test_error_exit_code(self, runner):
|
||||
"""Should return non-zero on error"""
|
||||
result = runner.invoke(cli, ['invalid-command'])
|
||||
assert result.exit_code != 0
|
||||
|
||||
def test_validation_error_exit_code(self, runner):
|
||||
"""Should return specific code for validation errors"""
|
||||
result = runner.invoke(cli, ['deploy', '--invalid-flag'])
|
||||
assert result.exit_code == 2 # Click uses 2 for usage errors
|
||||
|
||||
|
||||
class TestInteractivePrompts:
|
||||
"""Test interactive prompt handling"""
|
||||
|
||||
def test_interactive_deploy_wizard(self, runner):
|
||||
"""Should handle interactive prompts"""
|
||||
result = runner.invoke(
|
||||
cli,
|
||||
['deploy-wizard'],
|
||||
input='my-app\n1\nyes\n'
|
||||
)
|
||||
assert result.exit_code == 0
|
||||
assert 'my-app' in result.output
|
||||
|
||||
def test_confirmation_prompt(self, runner):
|
||||
"""Should handle confirmation prompts"""
|
||||
result = runner.invoke(
|
||||
cli,
|
||||
['delete', 'resource-id'],
|
||||
input='y\n'
|
||||
)
|
||||
assert result.exit_code == 0
|
||||
assert 'deleted' in result.output.lower()
|
||||
|
||||
def test_confirmation_prompt_denied(self, runner):
|
||||
"""Should handle denied confirmation"""
|
||||
result = runner.invoke(
|
||||
cli,
|
||||
['delete', 'resource-id'],
|
||||
input='n\n'
|
||||
)
|
||||
assert result.exit_code == 1
|
||||
assert 'cancelled' in result.output.lower()
|
||||
|
||||
def test_multiple_prompts(self, runner):
|
||||
"""Should handle multiple prompts in sequence"""
|
||||
result = runner.invoke(
|
||||
cli,
|
||||
['init'],
|
||||
input='my-project\nJohn Doe\njohn@example.com\n'
|
||||
)
|
||||
assert result.exit_code == 0
|
||||
assert 'my-project' in result.output
|
||||
assert 'John Doe' in result.output
|
||||
|
||||
|
||||
class TestOutputFormatting:
|
||||
"""Test output formatting options"""
|
||||
|
||||
def test_json_output(self, runner):
|
||||
"""Should output JSON format"""
|
||||
result = runner.invoke(cli, ['status', '--format', 'json'])
|
||||
assert result.exit_code == 0
|
||||
import json
|
||||
try:
|
||||
json.loads(result.output)
|
||||
except json.JSONDecodeError:
|
||||
pytest.fail("Output is not valid JSON")
|
||||
|
||||
def test_yaml_output(self, runner):
|
||||
"""Should output YAML format"""
|
||||
result = runner.invoke(cli, ['status', '--format', 'yaml'])
|
||||
assert result.exit_code == 0
|
||||
assert ':' in result.output
|
||||
|
||||
def test_table_output(self, runner):
|
||||
"""Should output table format by default"""
|
||||
result = runner.invoke(cli, ['list'])
|
||||
assert result.exit_code == 0
|
||||
assert '│' in result.output or '|' in result.output
|
||||
|
||||
def test_quiet_mode(self, runner):
|
||||
"""Should suppress output in quiet mode"""
|
||||
result = runner.invoke(cli, ['deploy', 'production', '--quiet'])
|
||||
assert result.exit_code == 0
|
||||
assert len(result.output.strip()) == 0
|
||||
|
||||
|
||||
class TestFileOperations:
|
||||
"""Test file-based operations"""
|
||||
|
||||
def test_file_input(self, runner):
|
||||
"""Should read from file"""
|
||||
with runner.isolated_filesystem():
|
||||
with open('input.txt', 'w') as f:
|
||||
f.write('test data\n')
|
||||
|
||||
result = runner.invoke(cli, ['process', '--input', 'input.txt'])
|
||||
assert result.exit_code == 0
|
||||
|
||||
def test_file_output(self, runner):
|
||||
"""Should write to file"""
|
||||
with runner.isolated_filesystem():
|
||||
result = runner.invoke(cli, ['export', '--output', 'output.txt'])
|
||||
assert result.exit_code == 0
|
||||
with open('output.txt', 'r') as f:
|
||||
content = f.read()
|
||||
assert len(content) > 0
|
||||
|
||||
|
||||
class TestIsolation:
|
||||
"""Test isolated filesystem operations"""
|
||||
|
||||
def test_isolated_filesystem(self, runner):
|
||||
"""Should work in isolated filesystem"""
|
||||
with runner.isolated_filesystem():
|
||||
result = runner.invoke(cli, ['init', 'test-project'])
|
||||
assert result.exit_code == 0
|
||||
|
||||
import os
|
||||
assert os.path.exists('test-project')
|
||||
346
skills/cli-testing-patterns/templates/pytest-fixtures.py
Normal file
346
skills/cli-testing-patterns/templates/pytest-fixtures.py
Normal file
@@ -0,0 +1,346 @@
|
||||
"""
|
||||
Pytest Fixtures Template
|
||||
|
||||
Reusable pytest fixtures for CLI testing with Click.testing.CliRunner
|
||||
Provides common setup, teardown, and test utilities
|
||||
"""
|
||||
|
||||
import pytest
|
||||
import os
|
||||
import tempfile
|
||||
import shutil
|
||||
from pathlib import Path
|
||||
from click.testing import CliRunner
|
||||
from mycli.cli import cli
|
||||
|
||||
|
||||
# Basic Fixtures
|
||||
|
||||
@pytest.fixture
|
||||
def runner():
|
||||
"""Create a CliRunner instance for testing"""
|
||||
return CliRunner()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def isolated_runner():
|
||||
"""Create a CliRunner with isolated filesystem"""
|
||||
runner = CliRunner()
|
||||
with runner.isolated_filesystem():
|
||||
yield runner
|
||||
|
||||
|
||||
# Configuration Fixtures
|
||||
|
||||
@pytest.fixture
|
||||
def temp_config_dir(tmp_path):
|
||||
"""Create a temporary configuration directory"""
|
||||
config_dir = tmp_path / '.mycli'
|
||||
config_dir.mkdir()
|
||||
return config_dir
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def config_file(temp_config_dir):
|
||||
"""Create a temporary configuration file"""
|
||||
config_path = temp_config_dir / 'config.yaml'
|
||||
config_content = """
|
||||
api_key: your_test_key_here
|
||||
environment: development
|
||||
verbose: false
|
||||
timeout: 30
|
||||
"""
|
||||
config_path.write_text(config_content)
|
||||
return config_path
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def env_with_config(temp_config_dir, monkeypatch):
|
||||
"""Set up environment with config directory"""
|
||||
monkeypatch.setenv('MYCLI_CONFIG_DIR', str(temp_config_dir))
|
||||
return temp_config_dir
|
||||
|
||||
|
||||
# File System Fixtures
|
||||
|
||||
@pytest.fixture
|
||||
def temp_workspace(tmp_path):
|
||||
"""Create a temporary workspace directory"""
|
||||
workspace = tmp_path / 'workspace'
|
||||
workspace.mkdir()
|
||||
return workspace
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sample_project(temp_workspace):
|
||||
"""Create a sample project structure"""
|
||||
project = temp_workspace / 'sample-project'
|
||||
project.mkdir()
|
||||
|
||||
# Create sample files
|
||||
(project / 'package.json').write_text('{"name": "sample", "version": "1.0.0"}')
|
||||
(project / 'README.md').write_text('# Sample Project')
|
||||
|
||||
src_dir = project / 'src'
|
||||
src_dir.mkdir()
|
||||
(src_dir / 'index.js').write_text('console.log("Hello, World!");')
|
||||
|
||||
return project
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sample_files(temp_workspace):
|
||||
"""Create sample files for testing"""
|
||||
files = {
|
||||
'input.txt': 'test input data\n',
|
||||
'config.yaml': 'key: value\n',
|
||||
'data.json': '{"id": 1, "name": "test"}\n'
|
||||
}
|
||||
|
||||
created_files = {}
|
||||
for filename, content in files.items():
|
||||
file_path = temp_workspace / filename
|
||||
file_path.write_text(content)
|
||||
created_files[filename] = file_path
|
||||
|
||||
return created_files
|
||||
|
||||
|
||||
# Mock Fixtures
|
||||
|
||||
@pytest.fixture
|
||||
def mock_api_key(monkeypatch):
|
||||
"""Mock API key environment variable"""
|
||||
monkeypatch.setenv('MYCLI_API_KEY', 'test_api_key_123')
|
||||
return 'test_api_key_123'
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_home_dir(tmp_path, monkeypatch):
|
||||
"""Mock home directory"""
|
||||
home = tmp_path / 'home'
|
||||
home.mkdir()
|
||||
monkeypatch.setenv('HOME', str(home))
|
||||
return home
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_no_config(monkeypatch):
|
||||
"""Remove all configuration environment variables"""
|
||||
vars_to_remove = [
|
||||
'MYCLI_CONFIG_DIR',
|
||||
'MYCLI_API_KEY',
|
||||
'MYCLI_ENVIRONMENT',
|
||||
]
|
||||
for var in vars_to_remove:
|
||||
monkeypatch.delenv(var, raising=False)
|
||||
|
||||
|
||||
# State Management Fixtures
|
||||
|
||||
@pytest.fixture
|
||||
def cli_state(temp_workspace):
|
||||
"""Create a CLI state file"""
|
||||
state_file = temp_workspace / '.mycli-state'
|
||||
state = {
|
||||
'initialized': True,
|
||||
'last_command': None,
|
||||
'history': []
|
||||
}
|
||||
import json
|
||||
state_file.write_text(json.dumps(state, indent=2))
|
||||
return state_file
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def clean_state(temp_workspace):
|
||||
"""Ensure no state file exists"""
|
||||
state_file = temp_workspace / '.mycli-state'
|
||||
if state_file.exists():
|
||||
state_file.unlink()
|
||||
return temp_workspace
|
||||
|
||||
|
||||
# Helper Function Fixtures
|
||||
|
||||
@pytest.fixture
|
||||
def run_cli_command(runner):
|
||||
"""Helper function to run CLI commands and return parsed results"""
|
||||
def _run(args, input_data=None, env=None):
|
||||
"""
|
||||
Run a CLI command and return structured results
|
||||
|
||||
Args:
|
||||
args: List of command arguments
|
||||
input_data: Optional input for interactive prompts
|
||||
env: Optional environment variables dict
|
||||
|
||||
Returns:
|
||||
dict with keys: exit_code, output, lines, success
|
||||
"""
|
||||
result = runner.invoke(cli, args, input=input_data, env=env)
|
||||
return {
|
||||
'exit_code': result.exit_code,
|
||||
'output': result.output,
|
||||
'lines': result.output.splitlines(),
|
||||
'success': result.exit_code == 0
|
||||
}
|
||||
return _run
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def assert_cli_success(runner):
|
||||
"""Helper to assert successful CLI execution"""
|
||||
def _assert(args, expected_in_output=None):
|
||||
"""
|
||||
Run CLI command and assert success
|
||||
|
||||
Args:
|
||||
args: List of command arguments
|
||||
expected_in_output: Optional string expected in output
|
||||
"""
|
||||
result = runner.invoke(cli, args)
|
||||
assert result.exit_code == 0, f"Command failed: {result.output}"
|
||||
if expected_in_output:
|
||||
assert expected_in_output in result.output
|
||||
return result
|
||||
return _assert
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def assert_cli_failure(runner):
|
||||
"""Helper to assert CLI command failure"""
|
||||
def _assert(args, expected_in_output=None):
|
||||
"""
|
||||
Run CLI command and assert failure
|
||||
|
||||
Args:
|
||||
args: List of command arguments
|
||||
expected_in_output: Optional string expected in output
|
||||
"""
|
||||
result = runner.invoke(cli, args)
|
||||
assert result.exit_code != 0, f"Command should have failed: {result.output}"
|
||||
if expected_in_output:
|
||||
assert expected_in_output in result.output
|
||||
return result
|
||||
return _assert
|
||||
|
||||
|
||||
# Cleanup Fixtures
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def cleanup_temp_files(request):
|
||||
"""Automatically clean up temporary files after tests"""
|
||||
temp_files = []
|
||||
|
||||
def _register(filepath):
|
||||
temp_files.append(filepath)
|
||||
|
||||
request.addfinalizer(lambda: [
|
||||
os.remove(f) for f in temp_files if os.path.exists(f)
|
||||
])
|
||||
|
||||
return _register
|
||||
|
||||
|
||||
@pytest.fixture(scope='session')
|
||||
def test_data_dir():
|
||||
"""Provide path to test data directory"""
|
||||
return Path(__file__).parent / 'test_data'
|
||||
|
||||
|
||||
# Parametrized Fixtures
|
||||
|
||||
@pytest.fixture(params=['json', 'yaml', 'table'])
|
||||
def output_format(request):
|
||||
"""Parametrize tests across different output formats"""
|
||||
return request.param
|
||||
|
||||
|
||||
@pytest.fixture(params=[True, False])
|
||||
def verbose_mode(request):
|
||||
"""Parametrize tests with and without verbose mode"""
|
||||
return request.param
|
||||
|
||||
|
||||
@pytest.fixture(params=['development', 'staging', 'production'])
|
||||
def environment(request):
|
||||
"""Parametrize tests across different environments"""
|
||||
return request.param
|
||||
|
||||
|
||||
# Integration Test Fixtures
|
||||
|
||||
@pytest.fixture
|
||||
def integration_workspace(tmp_path):
|
||||
"""
|
||||
Create a complete integration test workspace with all necessary files
|
||||
"""
|
||||
workspace = tmp_path / 'integration'
|
||||
workspace.mkdir()
|
||||
|
||||
# Create directory structure
|
||||
(workspace / 'src').mkdir()
|
||||
(workspace / 'tests').mkdir()
|
||||
(workspace / 'config').mkdir()
|
||||
(workspace / 'data').mkdir()
|
||||
|
||||
# Create config files
|
||||
(workspace / 'config' / 'dev.yaml').write_text('env: development\n')
|
||||
(workspace / 'config' / 'prod.yaml').write_text('env: production\n')
|
||||
|
||||
# Initialize CLI
|
||||
runner = CliRunner()
|
||||
with runner.isolated_filesystem(temp_dir=workspace):
|
||||
runner.invoke(cli, ['init'])
|
||||
|
||||
return workspace
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_external_service(monkeypatch):
|
||||
"""Mock external service API calls"""
|
||||
class MockService:
|
||||
def __init__(self):
|
||||
self.calls = []
|
||||
|
||||
def call_api(self, endpoint, method='GET', data=None):
|
||||
self.calls.append({
|
||||
'endpoint': endpoint,
|
||||
'method': method,
|
||||
'data': data
|
||||
})
|
||||
return {'status': 'success', 'data': 'mock response'}
|
||||
|
||||
mock = MockService()
|
||||
# Replace actual service with mock
|
||||
monkeypatch.setattr('mycli.services.api', mock)
|
||||
return mock
|
||||
|
||||
|
||||
# Snapshot Testing Fixtures
|
||||
|
||||
@pytest.fixture
|
||||
def snapshot_dir(tmp_path):
|
||||
"""Create directory for snapshot testing"""
|
||||
snapshot = tmp_path / 'snapshots'
|
||||
snapshot.mkdir()
|
||||
return snapshot
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def compare_output(snapshot_dir):
|
||||
"""Compare CLI output with saved snapshot"""
|
||||
def _compare(output, snapshot_name):
|
||||
snapshot_file = snapshot_dir / f'{snapshot_name}.txt'
|
||||
|
||||
if not snapshot_file.exists():
|
||||
# Create snapshot
|
||||
snapshot_file.write_text(output)
|
||||
return True
|
||||
|
||||
# Compare with existing snapshot
|
||||
expected = snapshot_file.read_text()
|
||||
return output == expected
|
||||
|
||||
return _compare
|
||||
378
skills/cli-testing-patterns/templates/pytest-integration-test.py
Normal file
378
skills/cli-testing-patterns/templates/pytest-integration-test.py
Normal file
@@ -0,0 +1,378 @@
|
||||
"""
|
||||
Pytest Integration Test Template
|
||||
|
||||
Complete workflow testing for CLI applications using Click.testing.CliRunner
|
||||
Tests multi-command workflows, state persistence, and end-to-end scenarios
|
||||
"""
|
||||
|
||||
import pytest
|
||||
import os
|
||||
import json
|
||||
import yaml
|
||||
from pathlib import Path
|
||||
from click.testing import CliRunner
|
||||
from mycli.cli import cli
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def integration_runner():
|
||||
"""Create runner with isolated filesystem for integration tests"""
|
||||
runner = CliRunner()
|
||||
with runner.isolated_filesystem():
|
||||
yield runner
|
||||
|
||||
|
||||
class TestDeploymentWorkflow:
|
||||
"""Test complete deployment workflow"""
|
||||
|
||||
def test_full_deployment_workflow(self, integration_runner):
|
||||
"""Should complete init -> configure -> build -> deploy workflow"""
|
||||
runner = integration_runner
|
||||
|
||||
# Step 1: Initialize project
|
||||
result = runner.invoke(cli, ['init', 'my-project'])
|
||||
assert result.exit_code == 0
|
||||
assert 'Project initialized' in result.output
|
||||
assert os.path.exists('my-project')
|
||||
|
||||
# Step 2: Configure API key
|
||||
os.chdir('my-project')
|
||||
result = runner.invoke(cli, ['config', 'set', 'api_key', 'your_key_here'])
|
||||
assert result.exit_code == 0
|
||||
|
||||
# Step 3: Build project
|
||||
result = runner.invoke(cli, ['build', '--production'])
|
||||
assert result.exit_code == 0
|
||||
assert 'Build successful' in result.output
|
||||
|
||||
# Step 4: Deploy to production
|
||||
result = runner.invoke(cli, ['deploy', 'production'])
|
||||
assert result.exit_code == 0
|
||||
assert 'Deployed successfully' in result.output
|
||||
|
||||
def test_deployment_without_config_fails(self, integration_runner):
|
||||
"""Should fail deployment without required configuration"""
|
||||
runner = integration_runner
|
||||
|
||||
# Initialize but don't configure
|
||||
runner.invoke(cli, ['init', 'my-project'])
|
||||
os.chdir('my-project')
|
||||
|
||||
# Try to deploy without API key
|
||||
result = runner.invoke(cli, ['deploy', 'production'])
|
||||
assert result.exit_code != 0
|
||||
assert 'api_key' in result.output.lower()
|
||||
|
||||
def test_deployment_rollback(self, integration_runner):
|
||||
"""Should rollback failed deployment"""
|
||||
runner = integration_runner
|
||||
|
||||
# Setup and deploy
|
||||
runner.invoke(cli, ['init', 'my-project'])
|
||||
os.chdir('my-project')
|
||||
runner.invoke(cli, ['config', 'set', 'api_key', 'your_key_here'])
|
||||
runner.invoke(cli, ['deploy', 'staging'])
|
||||
|
||||
# Rollback
|
||||
result = runner.invoke(cli, ['rollback'])
|
||||
assert result.exit_code == 0
|
||||
assert 'Rollback successful' in result.output
|
||||
|
||||
|
||||
class TestMultiEnvironmentWorkflow:
|
||||
"""Test multi-environment configuration and deployment"""
|
||||
|
||||
def test_manage_multiple_environments(self, integration_runner):
|
||||
"""Should manage dev, staging, and production environments"""
|
||||
runner = integration_runner
|
||||
|
||||
runner.invoke(cli, ['init', 'multi-env-project'])
|
||||
os.chdir('multi-env-project')
|
||||
|
||||
# Configure development
|
||||
runner.invoke(cli, ['config', 'set', 'api_key', 'dev_key', '--env', 'development'])
|
||||
runner.invoke(cli, ['config', 'set', 'base_url', 'https://dev.api.example.com', '--env', 'development'])
|
||||
|
||||
# Configure staging
|
||||
runner.invoke(cli, ['config', 'set', 'api_key', 'staging_key', '--env', 'staging'])
|
||||
runner.invoke(cli, ['config', 'set', 'base_url', 'https://staging.api.example.com', '--env', 'staging'])
|
||||
|
||||
# Configure production
|
||||
runner.invoke(cli, ['config', 'set', 'api_key', 'prod_key', '--env', 'production'])
|
||||
runner.invoke(cli, ['config', 'set', 'base_url', 'https://api.example.com', '--env', 'production'])
|
||||
|
||||
# Deploy to each environment
|
||||
dev_result = runner.invoke(cli, ['deploy', 'development'])
|
||||
assert dev_result.exit_code == 0
|
||||
assert 'dev.api.example.com' in dev_result.output
|
||||
|
||||
staging_result = runner.invoke(cli, ['deploy', 'staging'])
|
||||
assert staging_result.exit_code == 0
|
||||
assert 'staging.api.example.com' in staging_result.output
|
||||
|
||||
prod_result = runner.invoke(cli, ['deploy', 'production'])
|
||||
assert prod_result.exit_code == 0
|
||||
assert 'api.example.com' in prod_result.output
|
||||
|
||||
def test_environment_isolation(self, integration_runner):
|
||||
"""Should keep environment configurations isolated"""
|
||||
runner = integration_runner
|
||||
|
||||
runner.invoke(cli, ['init', 'isolated-project'])
|
||||
os.chdir('isolated-project')
|
||||
|
||||
# Set different values for each environment
|
||||
runner.invoke(cli, ['config', 'set', 'timeout', '10', '--env', 'development'])
|
||||
runner.invoke(cli, ['config', 'set', 'timeout', '30', '--env', 'production'])
|
||||
|
||||
# Verify values are isolated
|
||||
dev_result = runner.invoke(cli, ['config', 'get', 'timeout', '--env', 'development'])
|
||||
assert '10' in dev_result.output
|
||||
|
||||
prod_result = runner.invoke(cli, ['config', 'get', 'timeout', '--env', 'production'])
|
||||
assert '30' in prod_result.output
|
||||
|
||||
|
||||
class TestStatePersistence:
|
||||
"""Test state management and persistence"""
|
||||
|
||||
def test_state_persistence_across_commands(self, integration_runner):
|
||||
"""Should maintain state across multiple commands"""
|
||||
runner = integration_runner
|
||||
|
||||
# Initialize state
|
||||
result = runner.invoke(cli, ['state', 'init'])
|
||||
assert result.exit_code == 0
|
||||
|
||||
# Set multiple state values
|
||||
runner.invoke(cli, ['state', 'set', 'counter', '0'])
|
||||
runner.invoke(cli, ['state', 'set', 'user', 'testuser'])
|
||||
|
||||
# Increment counter multiple times
|
||||
for i in range(5):
|
||||
runner.invoke(cli, ['increment'])
|
||||
|
||||
# Verify final state
|
||||
result = runner.invoke(cli, ['state', 'get', 'counter'])
|
||||
assert result.exit_code == 0
|
||||
assert '5' in result.output
|
||||
|
||||
result = runner.invoke(cli, ['state', 'get', 'user'])
|
||||
assert 'testuser' in result.output
|
||||
|
||||
def test_state_recovery_from_corruption(self, integration_runner):
|
||||
"""Should recover from corrupted state file"""
|
||||
runner = integration_runner
|
||||
|
||||
# Create valid state
|
||||
runner.invoke(cli, ['state', 'init'])
|
||||
runner.invoke(cli, ['state', 'set', 'key', 'value'])
|
||||
|
||||
# Corrupt the state file
|
||||
with open('.mycli-state', 'w') as f:
|
||||
f.write('invalid json {[}')
|
||||
|
||||
# Should detect corruption and recover
|
||||
result = runner.invoke(cli, ['state', 'get', 'key'])
|
||||
assert result.exit_code != 0
|
||||
assert 'corrupt' in result.output.lower()
|
||||
|
||||
# Should be able to reset
|
||||
result = runner.invoke(cli, ['state', 'reset'])
|
||||
assert result.exit_code == 0
|
||||
|
||||
|
||||
class TestPluginWorkflow:
|
||||
"""Test plugin installation and usage"""
|
||||
|
||||
def test_plugin_lifecycle(self, integration_runner):
|
||||
"""Should install, use, and uninstall plugins"""
|
||||
runner = integration_runner
|
||||
|
||||
runner.invoke(cli, ['init', 'plugin-project'])
|
||||
os.chdir('plugin-project')
|
||||
|
||||
# Install plugin
|
||||
result = runner.invoke(cli, ['plugin', 'install', 'test-plugin'])
|
||||
assert result.exit_code == 0
|
||||
assert 'installed' in result.output.lower()
|
||||
|
||||
# Verify plugin is listed
|
||||
result = runner.invoke(cli, ['plugin', 'list'])
|
||||
assert 'test-plugin' in result.output
|
||||
|
||||
# Use plugin command
|
||||
result = runner.invoke(cli, ['test-plugin:command', '--arg', 'value'])
|
||||
assert result.exit_code == 0
|
||||
|
||||
# Uninstall plugin
|
||||
result = runner.invoke(cli, ['plugin', 'uninstall', 'test-plugin'])
|
||||
assert result.exit_code == 0
|
||||
|
||||
# Verify plugin is removed
|
||||
result = runner.invoke(cli, ['plugin', 'list'])
|
||||
assert 'test-plugin' not in result.output
|
||||
|
||||
def test_plugin_conflict_detection(self, integration_runner):
|
||||
"""Should detect and handle plugin conflicts"""
|
||||
runner = integration_runner
|
||||
|
||||
runner.invoke(cli, ['init', 'conflict-project'])
|
||||
os.chdir('conflict-project')
|
||||
|
||||
# Install first plugin
|
||||
runner.invoke(cli, ['plugin', 'install', 'plugin-a'])
|
||||
|
||||
# Try to install conflicting plugin
|
||||
result = runner.invoke(cli, ['plugin', 'install', 'plugin-b'])
|
||||
if 'conflict' in result.output.lower():
|
||||
assert result.exit_code != 0
|
||||
|
||||
|
||||
class TestDataMigration:
|
||||
"""Test data migration workflows"""
|
||||
|
||||
def test_version_migration(self, integration_runner):
|
||||
"""Should migrate data between versions"""
|
||||
runner = integration_runner
|
||||
|
||||
# Create old version data
|
||||
old_data = {
|
||||
'version': 1,
|
||||
'format': 'legacy',
|
||||
'data': {'key': 'value'}
|
||||
}
|
||||
with open('data.json', 'w') as f:
|
||||
json.dump(old_data, f)
|
||||
|
||||
# Run migration
|
||||
result = runner.invoke(cli, ['migrate', '--to', '2.0'])
|
||||
assert result.exit_code == 0
|
||||
|
||||
# Verify new format
|
||||
with open('data.json', 'r') as f:
|
||||
new_data = json.load(f)
|
||||
assert new_data['version'] == 2
|
||||
assert 'legacy' not in new_data.get('format', '')
|
||||
|
||||
def test_migration_backup(self, integration_runner):
|
||||
"""Should create backup during migration"""
|
||||
runner = integration_runner
|
||||
|
||||
# Create data
|
||||
data = {'version': 1, 'data': 'important'}
|
||||
with open('data.json', 'w') as f:
|
||||
json.dump(data, f)
|
||||
|
||||
# Migrate with backup
|
||||
result = runner.invoke(cli, ['migrate', '--to', '2.0', '--backup'])
|
||||
assert result.exit_code == 0
|
||||
|
||||
# Verify backup exists
|
||||
assert os.path.exists('data.json.backup')
|
||||
|
||||
|
||||
class TestConcurrentOperations:
|
||||
"""Test handling of concurrent operations"""
|
||||
|
||||
def test_file_locking(self, integration_runner):
|
||||
"""Should prevent concurrent modifications"""
|
||||
runner = integration_runner
|
||||
|
||||
runner.invoke(cli, ['init', 'lock-project'])
|
||||
os.chdir('lock-project')
|
||||
|
||||
# Create lock file
|
||||
with open('.mycli.lock', 'w') as f:
|
||||
f.write('locked')
|
||||
|
||||
# Try to run command that needs lock
|
||||
result = runner.invoke(cli, ['deploy', 'production'])
|
||||
assert result.exit_code != 0
|
||||
assert 'lock' in result.output.lower()
|
||||
|
||||
def test_lock_timeout(self, integration_runner):
|
||||
"""Should timeout waiting for lock"""
|
||||
runner = integration_runner
|
||||
|
||||
runner.invoke(cli, ['init', 'timeout-project'])
|
||||
os.chdir('timeout-project')
|
||||
|
||||
# Create stale lock
|
||||
with open('.mycli.lock', 'w') as f:
|
||||
import time
|
||||
f.write(str(time.time() - 3600)) # 1 hour old
|
||||
|
||||
# Should detect stale lock and continue
|
||||
result = runner.invoke(cli, ['build'])
|
||||
assert result.exit_code == 0
|
||||
|
||||
|
||||
class TestErrorRecovery:
|
||||
"""Test error recovery and retry logic"""
|
||||
|
||||
def test_retry_on_failure(self, integration_runner):
|
||||
"""Should retry failed operations"""
|
||||
runner = integration_runner
|
||||
|
||||
runner.invoke(cli, ['init', 'retry-project'])
|
||||
os.chdir('retry-project')
|
||||
runner.invoke(cli, ['config', 'set', 'api_key', 'your_key_here'])
|
||||
|
||||
# Simulate failure and retry
|
||||
result = runner.invoke(cli, ['deploy', 'staging', '--retry', '3'])
|
||||
# Should attempt retry logic
|
||||
|
||||
def test_partial_failure_recovery(self, integration_runner):
|
||||
"""Should recover from partial failures"""
|
||||
runner = integration_runner
|
||||
|
||||
runner.invoke(cli, ['init', 'recovery-project'])
|
||||
os.chdir('recovery-project')
|
||||
|
||||
# Create partial state
|
||||
runner.invoke(cli, ['build', '--step', '1'])
|
||||
runner.invoke(cli, ['build', '--step', '2'])
|
||||
|
||||
# Complete from last successful step
|
||||
result = runner.invoke(cli, ['build', '--continue'])
|
||||
assert result.exit_code == 0
|
||||
|
||||
|
||||
class TestCompleteWorkflow:
|
||||
"""Test complete end-to-end workflows"""
|
||||
|
||||
def test_full_project_lifecycle(self, integration_runner):
|
||||
"""Should complete entire project lifecycle"""
|
||||
runner = integration_runner
|
||||
|
||||
# Create project
|
||||
result = runner.invoke(cli, ['create', 'full-project'])
|
||||
assert result.exit_code == 0
|
||||
|
||||
os.chdir('full-project')
|
||||
|
||||
# Configure
|
||||
runner.invoke(cli, ['config', 'set', 'api_key', 'your_key_here'])
|
||||
runner.invoke(cli, ['config', 'set', 'region', 'us-west-1'])
|
||||
|
||||
# Add dependencies
|
||||
result = runner.invoke(cli, ['add', 'dependency', 'package-name'])
|
||||
assert result.exit_code == 0
|
||||
|
||||
# Build
|
||||
result = runner.invoke(cli, ['build', '--production'])
|
||||
assert result.exit_code == 0
|
||||
|
||||
# Test
|
||||
result = runner.invoke(cli, ['test'])
|
||||
assert result.exit_code == 0
|
||||
|
||||
# Deploy
|
||||
result = runner.invoke(cli, ['deploy', 'production'])
|
||||
assert result.exit_code == 0
|
||||
|
||||
# Verify deployment
|
||||
result = runner.invoke(cli, ['status'])
|
||||
assert result.exit_code == 0
|
||||
assert 'deployed' in result.output.lower()
|
||||
509
skills/cli-testing-patterns/templates/test-helpers.py
Normal file
509
skills/cli-testing-patterns/templates/test-helpers.py
Normal file
@@ -0,0 +1,509 @@
|
||||
"""
|
||||
Python Test Helper Functions
|
||||
|
||||
Utility functions for CLI testing with pytest and Click.testing.CliRunner
|
||||
"""
|
||||
|
||||
import os
|
||||
import json
|
||||
import tempfile
|
||||
import shutil
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional, Callable
|
||||
from click.testing import CliRunner, Result
|
||||
|
||||
|
||||
class CLITestHarness:
|
||||
"""Test harness for CLI testing with helpful assertion methods"""
|
||||
|
||||
def __init__(self, cli_app):
|
||||
"""
|
||||
Initialize test harness
|
||||
|
||||
Args:
|
||||
cli_app: Click CLI application to test
|
||||
"""
|
||||
self.cli = cli_app
|
||||
self.runner = CliRunner()
|
||||
|
||||
def run(
|
||||
self,
|
||||
args: List[str],
|
||||
input_data: Optional[str] = None,
|
||||
env: Optional[Dict[str, str]] = None
|
||||
) -> Result:
|
||||
"""
|
||||
Run CLI command
|
||||
|
||||
Args:
|
||||
args: Command arguments
|
||||
input_data: Input for interactive prompts
|
||||
env: Environment variables
|
||||
|
||||
Returns:
|
||||
Click Result object
|
||||
"""
|
||||
return self.runner.invoke(self.cli, args, input=input_data, env=env)
|
||||
|
||||
def assert_success(
|
||||
self,
|
||||
args: List[str],
|
||||
expected_in_output: Optional[str] = None
|
||||
) -> Result:
|
||||
"""
|
||||
Run command and assert successful execution
|
||||
|
||||
Args:
|
||||
args: Command arguments
|
||||
expected_in_output: Optional string expected in output
|
||||
|
||||
Returns:
|
||||
Click Result object
|
||||
|
||||
Raises:
|
||||
AssertionError: If command fails or output doesn't match
|
||||
"""
|
||||
result = self.run(args)
|
||||
assert result.exit_code == 0, f"Command failed: {result.output}"
|
||||
|
||||
if expected_in_output:
|
||||
assert expected_in_output in result.output, \
|
||||
f"Expected '{expected_in_output}' in output: {result.output}"
|
||||
|
||||
return result
|
||||
|
||||
def assert_failure(
|
||||
self,
|
||||
args: List[str],
|
||||
expected_in_output: Optional[str] = None
|
||||
) -> Result:
|
||||
"""
|
||||
Run command and assert it fails
|
||||
|
||||
Args:
|
||||
args: Command arguments
|
||||
expected_in_output: Optional string expected in output
|
||||
|
||||
Returns:
|
||||
Click Result object
|
||||
|
||||
Raises:
|
||||
AssertionError: If command succeeds or output doesn't match
|
||||
"""
|
||||
result = self.run(args)
|
||||
assert result.exit_code != 0, f"Command should have failed: {result.output}"
|
||||
|
||||
if expected_in_output:
|
||||
assert expected_in_output in result.output, \
|
||||
f"Expected '{expected_in_output}' in output: {result.output}"
|
||||
|
||||
return result
|
||||
|
||||
def assert_exit_code(self, args: List[str], expected_code: int) -> Result:
|
||||
"""
|
||||
Run command and assert specific exit code
|
||||
|
||||
Args:
|
||||
args: Command arguments
|
||||
expected_code: Expected exit code
|
||||
|
||||
Returns:
|
||||
Click Result object
|
||||
|
||||
Raises:
|
||||
AssertionError: If exit code doesn't match
|
||||
"""
|
||||
result = self.run(args)
|
||||
assert result.exit_code == expected_code, \
|
||||
f"Expected exit code {expected_code}, got {result.exit_code}"
|
||||
return result
|
||||
|
||||
def run_json(self, args: List[str]) -> Dict[str, Any]:
|
||||
"""
|
||||
Run command and parse JSON output
|
||||
|
||||
Args:
|
||||
args: Command arguments
|
||||
|
||||
Returns:
|
||||
Parsed JSON object
|
||||
|
||||
Raises:
|
||||
AssertionError: If command fails
|
||||
json.JSONDecodeError: If output is not valid JSON
|
||||
"""
|
||||
result = self.assert_success(args)
|
||||
return json.loads(result.output)
|
||||
|
||||
|
||||
def create_temp_workspace() -> Path:
|
||||
"""
|
||||
Create temporary workspace directory
|
||||
|
||||
Returns:
|
||||
Path to temporary workspace
|
||||
"""
|
||||
temp_dir = Path(tempfile.mkdtemp(prefix='cli-test-'))
|
||||
return temp_dir
|
||||
|
||||
|
||||
def cleanup_workspace(workspace: Path) -> None:
|
||||
"""
|
||||
Clean up temporary workspace
|
||||
|
||||
Args:
|
||||
workspace: Path to workspace to remove
|
||||
"""
|
||||
if workspace.exists():
|
||||
shutil.rmtree(workspace)
|
||||
|
||||
|
||||
def create_temp_file(content: str, suffix: str = '.txt') -> Path:
|
||||
"""
|
||||
Create temporary file with content
|
||||
|
||||
Args:
|
||||
content: File content
|
||||
suffix: File extension
|
||||
|
||||
Returns:
|
||||
Path to created file
|
||||
"""
|
||||
fd, path = tempfile.mkstemp(suffix=suffix)
|
||||
with os.fdopen(fd, 'w') as f:
|
||||
f.write(content)
|
||||
return Path(path)
|
||||
|
||||
|
||||
def assert_file_exists(filepath: Path, message: Optional[str] = None) -> None:
|
||||
"""
|
||||
Assert file exists
|
||||
|
||||
Args:
|
||||
filepath: Path to file
|
||||
message: Optional custom error message
|
||||
"""
|
||||
assert filepath.exists(), message or f"File does not exist: {filepath}"
|
||||
|
||||
|
||||
def assert_file_contains(filepath: Path, expected: str) -> None:
|
||||
"""
|
||||
Assert file contains expected text
|
||||
|
||||
Args:
|
||||
filepath: Path to file
|
||||
expected: Expected text
|
||||
"""
|
||||
content = filepath.read_text()
|
||||
assert expected in content, \
|
||||
f"Expected '{expected}' in file {filepath}\nActual content: {content}"
|
||||
|
||||
|
||||
def assert_json_output(result: Result, schema: Dict[str, type]) -> Dict[str, Any]:
|
||||
"""
|
||||
Assert output is valid JSON matching schema
|
||||
|
||||
Args:
|
||||
result: Click Result object
|
||||
schema: Expected schema as dict of {key: expected_type}
|
||||
|
||||
Returns:
|
||||
Parsed JSON object
|
||||
|
||||
Raises:
|
||||
AssertionError: If JSON is invalid or doesn't match schema
|
||||
"""
|
||||
try:
|
||||
data = json.loads(result.output)
|
||||
except json.JSONDecodeError as e:
|
||||
raise AssertionError(f"Invalid JSON output: {e}\nOutput: {result.output}")
|
||||
|
||||
for key, expected_type in schema.items():
|
||||
assert key in data, f"Missing key in JSON output: {key}"
|
||||
assert isinstance(data[key], expected_type), \
|
||||
f"Expected type {expected_type} for key {key}, got {type(data[key])}"
|
||||
|
||||
return data
|
||||
|
||||
|
||||
def mock_env_vars(vars_dict: Dict[str, str]) -> Callable[[], None]:
|
||||
"""
|
||||
Mock environment variables
|
||||
|
||||
Args:
|
||||
vars_dict: Dictionary of environment variables to set
|
||||
|
||||
Returns:
|
||||
Function to restore original environment
|
||||
|
||||
Example:
|
||||
restore = mock_env_vars({'API_KEY': 'test_key'})
|
||||
# ... run tests ...
|
||||
restore()
|
||||
"""
|
||||
original = {}
|
||||
|
||||
for key, value in vars_dict.items():
|
||||
original[key] = os.environ.get(key)
|
||||
os.environ[key] = value
|
||||
|
||||
def restore():
|
||||
for key, value in original.items():
|
||||
if value is None:
|
||||
os.environ.pop(key, None)
|
||||
else:
|
||||
os.environ[key] = value
|
||||
|
||||
return restore
|
||||
|
||||
|
||||
def compare_output_lines(result: Result, expected_lines: List[str]) -> None:
|
||||
"""
|
||||
Compare output with expected lines
|
||||
|
||||
Args:
|
||||
result: Click Result object
|
||||
expected_lines: List of expected lines in output
|
||||
|
||||
Raises:
|
||||
AssertionError: If any expected line is missing
|
||||
"""
|
||||
output = result.output
|
||||
for expected in expected_lines:
|
||||
assert expected in output, \
|
||||
f"Expected line '{expected}' not found in output:\n{output}"
|
||||
|
||||
|
||||
def parse_table_output(result: Result) -> List[Dict[str, str]]:
|
||||
"""
|
||||
Parse table output into list of dictionaries
|
||||
|
||||
Args:
|
||||
result: Click Result object with table output
|
||||
|
||||
Returns:
|
||||
List of row dictionaries
|
||||
|
||||
Note:
|
||||
Expects table with headers and │ separators
|
||||
"""
|
||||
lines = result.output.strip().split('\n')
|
||||
|
||||
# Find header line
|
||||
header_line = None
|
||||
for i, line in enumerate(lines):
|
||||
if '│' in line and i > 0:
|
||||
header_line = i
|
||||
break
|
||||
|
||||
if header_line is None:
|
||||
raise ValueError("Could not find table header")
|
||||
|
||||
# Parse headers
|
||||
headers = [h.strip() for h in lines[header_line].split('│') if h.strip()]
|
||||
|
||||
# Parse rows
|
||||
rows = []
|
||||
for line in lines[header_line + 2:]: # Skip separator
|
||||
if '│' in line:
|
||||
values = [v.strip() for v in line.split('│') if v.strip()]
|
||||
if len(values) == len(headers):
|
||||
rows.append(dict(zip(headers, values)))
|
||||
|
||||
return rows
|
||||
|
||||
|
||||
class SnapshotTester:
|
||||
"""Helper for snapshot testing CLI output"""
|
||||
|
||||
def __init__(self, snapshot_dir: Path):
|
||||
"""
|
||||
Initialize snapshot tester
|
||||
|
||||
Args:
|
||||
snapshot_dir: Directory to store snapshots
|
||||
"""
|
||||
self.snapshot_dir = snapshot_dir
|
||||
self.snapshot_dir.mkdir(exist_ok=True)
|
||||
|
||||
def assert_matches(
|
||||
self,
|
||||
result: Result,
|
||||
snapshot_name: str,
|
||||
update: bool = False
|
||||
) -> None:
|
||||
"""
|
||||
Assert output matches snapshot
|
||||
|
||||
Args:
|
||||
result: Click Result object
|
||||
snapshot_name: Name of snapshot file
|
||||
update: Whether to update snapshot
|
||||
|
||||
Raises:
|
||||
AssertionError: If output doesn't match snapshot
|
||||
"""
|
||||
snapshot_file = self.snapshot_dir / f'{snapshot_name}.txt'
|
||||
|
||||
if update or not snapshot_file.exists():
|
||||
snapshot_file.write_text(result.output)
|
||||
return
|
||||
|
||||
expected = snapshot_file.read_text()
|
||||
assert result.output == expected, \
|
||||
f"Output doesn't match snapshot {snapshot_name}\n" \
|
||||
f"Expected:\n{expected}\n\nActual:\n{result.output}"
|
||||
|
||||
|
||||
class MockConfig:
|
||||
"""Mock configuration file for testing"""
|
||||
|
||||
def __init__(self, workspace: Path, filename: str = '.myclirc'):
|
||||
"""
|
||||
Initialize mock config
|
||||
|
||||
Args:
|
||||
workspace: Workspace directory
|
||||
filename: Config filename
|
||||
"""
|
||||
self.config_path = workspace / filename
|
||||
self.data = {}
|
||||
|
||||
def set(self, key: str, value: Any) -> None:
|
||||
"""Set configuration value"""
|
||||
self.data[key] = value
|
||||
self.save()
|
||||
|
||||
def get(self, key: str, default: Any = None) -> Any:
|
||||
"""Get configuration value"""
|
||||
return self.data.get(key, default)
|
||||
|
||||
def save(self) -> None:
|
||||
"""Save configuration to file"""
|
||||
import yaml
|
||||
with open(self.config_path, 'w') as f:
|
||||
yaml.dump(self.data, f)
|
||||
|
||||
def load(self) -> None:
|
||||
"""Load configuration from file"""
|
||||
if self.config_path.exists():
|
||||
import yaml
|
||||
with open(self.config_path, 'r') as f:
|
||||
self.data = yaml.safe_load(f) or {}
|
||||
|
||||
|
||||
def wait_for_file(filepath: Path, timeout: float = 5.0) -> None:
|
||||
"""
|
||||
Wait for file to exist
|
||||
|
||||
Args:
|
||||
filepath: Path to file
|
||||
timeout: Timeout in seconds
|
||||
|
||||
Raises:
|
||||
TimeoutError: If file doesn't exist within timeout
|
||||
"""
|
||||
import time
|
||||
start = time.time()
|
||||
|
||||
while not filepath.exists():
|
||||
if time.time() - start > timeout:
|
||||
raise TimeoutError(f"Timeout waiting for file: {filepath}")
|
||||
time.sleep(0.1)
|
||||
|
||||
|
||||
def capture_output(func: Callable) -> Dict[str, str]:
|
||||
"""
|
||||
Capture stdout and stderr during function execution
|
||||
|
||||
Args:
|
||||
func: Function to execute
|
||||
|
||||
Returns:
|
||||
Dictionary with 'stdout' and 'stderr' keys
|
||||
"""
|
||||
import sys
|
||||
from io import StringIO
|
||||
|
||||
old_stdout = sys.stdout
|
||||
old_stderr = sys.stderr
|
||||
|
||||
stdout_capture = StringIO()
|
||||
stderr_capture = StringIO()
|
||||
|
||||
sys.stdout = stdout_capture
|
||||
sys.stderr = stderr_capture
|
||||
|
||||
try:
|
||||
func()
|
||||
finally:
|
||||
sys.stdout = old_stdout
|
||||
sys.stderr = old_stderr
|
||||
|
||||
return {
|
||||
'stdout': stdout_capture.getvalue(),
|
||||
'stderr': stderr_capture.getvalue()
|
||||
}
|
||||
|
||||
|
||||
class IntegrationTestHelper:
|
||||
"""Helper for integration testing with state management"""
|
||||
|
||||
def __init__(self, cli_app, workspace: Optional[Path] = None):
|
||||
"""
|
||||
Initialize integration test helper
|
||||
|
||||
Args:
|
||||
cli_app: Click CLI application
|
||||
workspace: Optional workspace directory
|
||||
"""
|
||||
self.harness = CLITestHarness(cli_app)
|
||||
self.workspace = workspace or create_temp_workspace()
|
||||
self.original_cwd = Path.cwd()
|
||||
|
||||
def __enter__(self):
|
||||
"""Enter context - change to workspace"""
|
||||
os.chdir(self.workspace)
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
"""Exit context - restore cwd and cleanup"""
|
||||
os.chdir(self.original_cwd)
|
||||
cleanup_workspace(self.workspace)
|
||||
|
||||
def run_workflow(self, commands: List[List[str]]) -> List[Result]:
|
||||
"""
|
||||
Run multiple commands in sequence
|
||||
|
||||
Args:
|
||||
commands: List of command argument lists
|
||||
|
||||
Returns:
|
||||
List of Result objects
|
||||
"""
|
||||
results = []
|
||||
for cmd in commands:
|
||||
result = self.harness.run(cmd)
|
||||
results.append(result)
|
||||
if result.exit_code != 0:
|
||||
break
|
||||
return results
|
||||
|
||||
def assert_workflow_success(self, commands: List[List[str]]) -> List[Result]:
|
||||
"""
|
||||
Run workflow and assert all commands succeed
|
||||
|
||||
Args:
|
||||
commands: List of command argument lists
|
||||
|
||||
Returns:
|
||||
List of Result objects
|
||||
|
||||
Raises:
|
||||
AssertionError: If any command fails
|
||||
"""
|
||||
results = []
|
||||
for i, cmd in enumerate(commands):
|
||||
result = self.harness.assert_success(cmd)
|
||||
results.append(result)
|
||||
return results
|
||||
362
skills/cli-testing-patterns/templates/test-helpers.ts
Normal file
362
skills/cli-testing-patterns/templates/test-helpers.ts
Normal file
@@ -0,0 +1,362 @@
|
||||
/**
|
||||
* Node.js Test Helper Functions
|
||||
*
|
||||
* Utility functions for CLI testing with Jest
|
||||
*/
|
||||
|
||||
import { execSync, spawn, SpawnOptions } from 'child_process';
|
||||
import fs from 'fs';
|
||||
import path from 'path';
|
||||
import os from 'os';
|
||||
|
||||
/**
|
||||
* CLI execution result interface
|
||||
*/
|
||||
export interface CLIResult {
|
||||
stdout: string;
|
||||
stderr: string;
|
||||
code: number;
|
||||
success: boolean;
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute CLI command synchronously
|
||||
* @param cliPath - Path to CLI executable
|
||||
* @param args - Command arguments
|
||||
* @param options - Execution options
|
||||
* @returns CLI execution result
|
||||
*/
|
||||
export function runCLI(
|
||||
cliPath: string,
|
||||
args: string,
|
||||
options: {
|
||||
cwd?: string;
|
||||
env?: Record<string, string>;
|
||||
timeout?: number;
|
||||
} = {}
|
||||
): CLIResult {
|
||||
try {
|
||||
const stdout = execSync(`${cliPath} ${args}`, {
|
||||
encoding: 'utf8',
|
||||
stdio: 'pipe',
|
||||
cwd: options.cwd,
|
||||
env: { ...process.env, ...options.env },
|
||||
timeout: options.timeout,
|
||||
});
|
||||
return {
|
||||
stdout,
|
||||
stderr: '',
|
||||
code: 0,
|
||||
success: true,
|
||||
};
|
||||
} catch (error: any) {
|
||||
return {
|
||||
stdout: error.stdout || '',
|
||||
stderr: error.stderr || '',
|
||||
code: error.status || 1,
|
||||
success: false,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute CLI command asynchronously
|
||||
* @param cliPath - Path to CLI executable
|
||||
* @param args - Command arguments array
|
||||
* @param options - Spawn options
|
||||
* @returns Promise of CLI execution result
|
||||
*/
|
||||
export function runCLIAsync(
|
||||
cliPath: string,
|
||||
args: string[],
|
||||
options: SpawnOptions = {}
|
||||
): Promise<CLIResult> {
|
||||
return new Promise((resolve) => {
|
||||
const child = spawn(cliPath, args, {
|
||||
...options,
|
||||
stdio: 'pipe',
|
||||
});
|
||||
|
||||
let stdout = '';
|
||||
let stderr = '';
|
||||
|
||||
child.stdout?.on('data', (data) => {
|
||||
stdout += data.toString();
|
||||
});
|
||||
|
||||
child.stderr?.on('data', (data) => {
|
||||
stderr += data.toString();
|
||||
});
|
||||
|
||||
child.on('close', (code) => {
|
||||
resolve({
|
||||
stdout,
|
||||
stderr,
|
||||
code: code || 0,
|
||||
success: code === 0,
|
||||
});
|
||||
});
|
||||
|
||||
child.on('error', (error) => {
|
||||
resolve({
|
||||
stdout,
|
||||
stderr: stderr + error.message,
|
||||
code: 1,
|
||||
success: false,
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Create temporary test directory
|
||||
* @returns Path to temporary directory
|
||||
*/
|
||||
export function createTempDir(): string {
|
||||
const tempDir = path.join(os.tmpdir(), `cli-test-${Date.now()}-${Math.random().toString(36).slice(2)}`);
|
||||
fs.mkdirSync(tempDir, { recursive: true });
|
||||
return tempDir;
|
||||
}
|
||||
|
||||
/**
|
||||
* Clean up temporary directory
|
||||
* @param dirPath - Directory to remove
|
||||
*/
|
||||
export function cleanupTempDir(dirPath: string): void {
|
||||
if (fs.existsSync(dirPath)) {
|
||||
fs.rmSync(dirPath, { recursive: true, force: true });
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Create temporary file with content
|
||||
* @param content - File content
|
||||
* @param extension - File extension
|
||||
* @returns Path to created file
|
||||
*/
|
||||
export function createTempFile(content: string, extension: string = 'txt'): string {
|
||||
const tempFile = path.join(os.tmpdir(), `test-${Date.now()}.${extension}`);
|
||||
fs.writeFileSync(tempFile, content);
|
||||
return tempFile;
|
||||
}
|
||||
|
||||
/**
|
||||
* Assert CLI command succeeds
|
||||
* @param result - CLI execution result
|
||||
* @param expectedOutput - Optional expected output substring
|
||||
*/
|
||||
export function assertSuccess(result: CLIResult, expectedOutput?: string): void {
|
||||
if (!result.success) {
|
||||
throw new Error(`CLI command failed with exit code ${result.code}\nStderr: ${result.stderr}`);
|
||||
}
|
||||
if (expectedOutput && !result.stdout.includes(expectedOutput)) {
|
||||
throw new Error(`Expected output to contain "${expectedOutput}"\nActual: ${result.stdout}`);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Assert CLI command fails
|
||||
* @param result - CLI execution result
|
||||
* @param expectedError - Optional expected error substring
|
||||
*/
|
||||
export function assertFailure(result: CLIResult, expectedError?: string): void {
|
||||
if (result.success) {
|
||||
throw new Error(`CLI command should have failed but succeeded\nStdout: ${result.stdout}`);
|
||||
}
|
||||
if (expectedError && !result.stderr.includes(expectedError) && !result.stdout.includes(expectedError)) {
|
||||
throw new Error(`Expected error to contain "${expectedError}"\nActual stderr: ${result.stderr}\nActual stdout: ${result.stdout}`);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Assert exit code matches expected value
|
||||
* @param result - CLI execution result
|
||||
* @param expectedCode - Expected exit code
|
||||
*/
|
||||
export function assertExitCode(result: CLIResult, expectedCode: number): void {
|
||||
if (result.code !== expectedCode) {
|
||||
throw new Error(`Expected exit code ${expectedCode} but got ${result.code}\nStderr: ${result.stderr}`);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse JSON output from CLI
|
||||
* @param result - CLI execution result
|
||||
* @returns Parsed JSON object
|
||||
*/
|
||||
export function parseJSONOutput<T = any>(result: CLIResult): T {
|
||||
try {
|
||||
return JSON.parse(result.stdout);
|
||||
} catch (error) {
|
||||
throw new Error(`Failed to parse JSON output: ${error}\nStdout: ${result.stdout}`);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Mock environment variables for test
|
||||
* @param vars - Environment variables to set
|
||||
* @returns Function to restore original environment
|
||||
*/
|
||||
export function mockEnv(vars: Record<string, string>): () => void {
|
||||
const original = { ...process.env };
|
||||
|
||||
Object.entries(vars).forEach(([key, value]) => {
|
||||
process.env[key] = value;
|
||||
});
|
||||
|
||||
return () => {
|
||||
Object.keys(process.env).forEach((key) => {
|
||||
if (!(key in original)) {
|
||||
delete process.env[key];
|
||||
}
|
||||
});
|
||||
Object.entries(original).forEach(([key, value]) => {
|
||||
process.env[key] = value;
|
||||
});
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Wait for file to exist
|
||||
* @param filePath - Path to file
|
||||
* @param timeout - Timeout in milliseconds
|
||||
* @returns Promise that resolves when file exists
|
||||
*/
|
||||
export async function waitForFile(filePath: string, timeout: number = 5000): Promise<void> {
|
||||
const startTime = Date.now();
|
||||
while (!fs.existsSync(filePath)) {
|
||||
if (Date.now() - startTime > timeout) {
|
||||
throw new Error(`Timeout waiting for file: ${filePath}`);
|
||||
}
|
||||
await new Promise((resolve) => setTimeout(resolve, 100));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Create CLI test fixture with setup and teardown
|
||||
* @param setup - Setup function
|
||||
* @param teardown - Teardown function
|
||||
* @returns Test fixture object
|
||||
*/
|
||||
export function createFixture<T>(
|
||||
setup: () => T | Promise<T>,
|
||||
teardown: (fixture: T) => void | Promise<void>
|
||||
): {
|
||||
beforeEach: () => Promise<T>;
|
||||
afterEach: (fixture: T) => Promise<void>;
|
||||
} {
|
||||
return {
|
||||
beforeEach: async () => setup(),
|
||||
afterEach: async (fixture: T) => teardown(fixture),
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Capture stdout/stderr during function execution
|
||||
* @param fn - Function to execute
|
||||
* @returns Captured output
|
||||
*/
|
||||
export function captureOutput(fn: () => void): { stdout: string; stderr: string } {
|
||||
const originalStdout = process.stdout.write;
|
||||
const originalStderr = process.stderr.write;
|
||||
|
||||
let stdout = '';
|
||||
let stderr = '';
|
||||
|
||||
process.stdout.write = ((chunk: any) => {
|
||||
stdout += chunk.toString();
|
||||
return true;
|
||||
}) as any;
|
||||
|
||||
process.stderr.write = ((chunk: any) => {
|
||||
stderr += chunk.toString();
|
||||
return true;
|
||||
}) as any;
|
||||
|
||||
try {
|
||||
fn();
|
||||
} finally {
|
||||
process.stdout.write = originalStdout;
|
||||
process.stderr.write = originalStderr;
|
||||
}
|
||||
|
||||
return { stdout, stderr };
|
||||
}
|
||||
|
||||
/**
|
||||
* Test helper for testing CLI with different input combinations
|
||||
*/
|
||||
export class CLITestHarness {
|
||||
constructor(private cliPath: string) {}
|
||||
|
||||
/**
|
||||
* Run command with arguments
|
||||
*/
|
||||
run(args: string, options?: { cwd?: string; env?: Record<string, string> }): CLIResult {
|
||||
return runCLI(this.cliPath, args, options);
|
||||
}
|
||||
|
||||
/**
|
||||
* Run command and assert success
|
||||
*/
|
||||
assertSuccess(args: string, expectedOutput?: string): CLIResult {
|
||||
const result = this.run(args);
|
||||
assertSuccess(result, expectedOutput);
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Run command and assert failure
|
||||
*/
|
||||
assertFailure(args: string, expectedError?: string): CLIResult {
|
||||
const result = this.run(args);
|
||||
assertFailure(result, expectedError);
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Run command and parse JSON output
|
||||
*/
|
||||
runJSON<T = any>(args: string): T {
|
||||
const result = this.run(args);
|
||||
assertSuccess(result);
|
||||
return parseJSONOutput<T>(result);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate JSON schema in CLI output
|
||||
* @param result - CLI execution result
|
||||
* @param schema - Expected schema object
|
||||
*/
|
||||
export function validateJSONSchema(result: CLIResult, schema: Record<string, string>): void {
|
||||
const output = parseJSONOutput(result);
|
||||
|
||||
Object.entries(schema).forEach(([key, expectedType]) => {
|
||||
if (!(key in output)) {
|
||||
throw new Error(`Missing expected key in JSON output: ${key}`);
|
||||
}
|
||||
const actualType = typeof output[key];
|
||||
if (actualType !== expectedType) {
|
||||
throw new Error(`Expected type ${expectedType} for key ${key}, but got ${actualType}`);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Compare CLI output with snapshot
|
||||
* @param result - CLI execution result
|
||||
* @param snapshotPath - Path to snapshot file
|
||||
* @param update - Whether to update snapshot
|
||||
*/
|
||||
export function compareSnapshot(result: CLIResult, snapshotPath: string, update: boolean = false): void {
|
||||
if (update || !fs.existsSync(snapshotPath)) {
|
||||
fs.writeFileSync(snapshotPath, result.stdout);
|
||||
return;
|
||||
}
|
||||
|
||||
const snapshot = fs.readFileSync(snapshotPath, 'utf8');
|
||||
if (result.stdout !== snapshot) {
|
||||
throw new Error(`Output does not match snapshot\nExpected:\n${snapshot}\n\nActual:\n${result.stdout}`);
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user