Initial commit

This commit is contained in:
Zhongwei Li
2025-11-30 09:04:14 +08:00
commit 70c36b5eff
248 changed files with 47482 additions and 0 deletions

View File

@@ -0,0 +1,406 @@
# Exit Code Testing Patterns
Comprehensive guide to testing CLI exit codes correctly.
## Standard Exit Codes
### POSIX Standard Exit Codes
| Code | Meaning | When to Use |
|------|---------|-------------|
| 0 | Success | Command completed successfully |
| 1 | General Error | Catchall for general errors |
| 2 | Misuse of Command | Invalid arguments or options |
| 126 | Command Cannot Execute | Permission problem or not executable |
| 127 | Command Not Found | Command not found in PATH |
| 128+N | Fatal Error Signal N | Process terminated by signal N |
| 130 | Ctrl+C Termination | Process terminated by SIGINT |
### Custom Application Exit Codes
```typescript
// Define custom exit codes
enum ExitCode {
SUCCESS = 0,
GENERAL_ERROR = 1,
INVALID_ARGUMENT = 2,
CONFIG_ERROR = 3,
NETWORK_ERROR = 4,
AUTH_ERROR = 5,
NOT_FOUND = 6,
ALREADY_EXISTS = 7,
PERMISSION_DENIED = 8,
}
```
## Node.js Exit Code Testing
### Basic Exit Code Testing
```typescript
describe('Exit Code Tests', () => {
test('success returns 0', () => {
const { code } = runCLI('status');
expect(code).toBe(0);
});
test('general error returns 1', () => {
const { code } = runCLI('fail-command');
expect(code).toBe(1);
});
test('invalid argument returns 2', () => {
const { code } = runCLI('deploy --invalid-env unknown');
expect(code).toBe(2);
});
test('command not found returns 127', () => {
const { code } = runCLI('nonexistent-command');
expect(code).toBe(127);
});
});
```
### Specific Error Conditions
```typescript
describe('Specific Exit Codes', () => {
test('configuration error', () => {
const { code, stderr } = runCLI('deploy production');
expect(code).toBe(3); // CONFIG_ERROR
expect(stderr).toContain('configuration');
});
test('network error', () => {
// Mock network failure
const { code, stderr } = runCLI('fetch --url https://unreachable.example.com');
expect(code).toBe(4); // NETWORK_ERROR
expect(stderr).toContain('network');
});
test('authentication error', () => {
const { code, stderr } = runCLI('login --token invalid');
expect(code).toBe(5); // AUTH_ERROR
expect(stderr).toContain('authentication');
});
test('resource not found', () => {
const { code, stderr } = runCLI('get resource-123');
expect(code).toBe(6); // NOT_FOUND
expect(stderr).toContain('not found');
});
test('resource already exists', () => {
runCLI('create my-resource');
const { code, stderr } = runCLI('create my-resource');
expect(code).toBe(7); // ALREADY_EXISTS
expect(stderr).toContain('already exists');
});
});
```
### Testing Exit Code Consistency
```typescript
describe('Exit Code Consistency', () => {
const errorScenarios = [
{ args: 'deploy', expectedCode: 2, reason: 'missing required argument' },
{ args: 'deploy --env invalid', expectedCode: 2, reason: 'invalid environment' },
{ args: 'config get missing', expectedCode: 6, reason: 'config key not found' },
{ args: 'unknown-cmd', expectedCode: 127, reason: 'command not found' },
];
test.each(errorScenarios)(
'should return exit code $expectedCode for $reason',
({ args, expectedCode }) => {
const { code } = runCLI(args);
expect(code).toBe(expectedCode);
}
);
});
```
## Python Exit Code Testing
### Basic Exit Code Testing
```python
class TestExitCodes:
"""Test CLI exit codes"""
def test_success_exit_code(self, runner):
"""Success should return 0"""
result = runner.invoke(cli, ['status'])
assert result.exit_code == 0
def test_general_error_exit_code(self, runner):
"""General error should return 1"""
result = runner.invoke(cli, ['fail-command'])
assert result.exit_code == 1
def test_usage_error_exit_code(self, runner):
"""Usage error should return 2"""
result = runner.invoke(cli, ['deploy']) # Missing required arg
assert result.exit_code == 2
def test_unknown_command_exit_code(self, runner):
"""Unknown command handling"""
result = runner.invoke(cli, ['nonexistent'])
assert result.exit_code != 0
```
### Custom Exit Codes with Click
```python
import click
import sys
# Define custom exit codes
class ExitCode:
SUCCESS = 0
GENERAL_ERROR = 1
INVALID_ARGUMENT = 2
CONFIG_ERROR = 3
NETWORK_ERROR = 4
AUTH_ERROR = 5
@click.command()
def deploy():
"""Deploy command with custom exit codes"""
try:
# Check configuration
if not has_valid_config():
click.echo("Configuration error", err=True)
sys.exit(ExitCode.CONFIG_ERROR)
# Check authentication
if not is_authenticated():
click.echo("Authentication failed", err=True)
sys.exit(ExitCode.AUTH_ERROR)
# Deploy
deploy_application()
click.echo("Deployment successful")
sys.exit(ExitCode.SUCCESS)
except NetworkError:
click.echo("Network error", err=True)
sys.exit(ExitCode.NETWORK_ERROR)
except Exception as e:
click.echo(f"Error: {e}", err=True)
sys.exit(ExitCode.GENERAL_ERROR)
```
### Testing Custom Exit Codes
```python
class TestCustomExitCodes:
"""Test custom exit codes"""
def test_config_error_exit_code(self, runner, tmp_path):
"""Configuration error should return 3"""
# Remove config file
result = runner.invoke(cli, ['deploy', 'production'])
assert result.exit_code == 3
assert 'configuration' in result.output.lower()
def test_network_error_exit_code(self, runner, monkeypatch):
"""Network error should return 4"""
def mock_request(*args, **kwargs):
raise NetworkError("Connection failed")
monkeypatch.setattr('requests.post', mock_request)
result = runner.invoke(cli, ['deploy', 'production'])
assert result.exit_code == 4
assert 'network' in result.output.lower()
def test_auth_error_exit_code(self, runner):
"""Authentication error should return 5"""
result = runner.invoke(cli, ['deploy', 'production', '--token', 'invalid'])
assert result.exit_code == 5
assert 'authentication' in result.output.lower()
```
## Testing Exit Codes in Scripts
### Bash Script Exit Code Testing
```typescript
describe('Script Exit Codes', () => {
test('should respect shell exit codes', () => {
// Test that CLI properly exits with script error codes
const script = `
#!/bin/bash
${CLI_PATH} deploy staging
if [ $? -ne 0 ]; then
echo "Deployment failed"
exit 1
fi
echo "Deployment succeeded"
`;
const { code, stdout } = execSync(script, { encoding: 'utf8' });
expect(code).toBe(0);
expect(stdout).toContain('Deployment succeeded');
});
test('should propagate errors in pipelines', () => {
const { code } = execSync(`${CLI_PATH} invalid | tee output.log`, {
encoding: 'utf8',
});
expect(code).not.toBe(0);
});
});
```
## Exit Code Best Practices
### 1. Document Exit Codes
```typescript
/**
* CLI Exit Codes
*
* 0 - Success
* 1 - General error
* 2 - Invalid arguments
* 3 - Configuration error
* 4 - Network error
* 5 - Authentication error
* 6 - Resource not found
* 7 - Resource already exists
* 8 - Permission denied
*/
```
### 2. Consistent Error Handling
```python
def handle_error(error: Exception) -> int:
"""
Handle errors and return appropriate exit code
Returns:
Appropriate exit code for the error type
"""
if isinstance(error, ConfigurationError):
click.echo(f"Configuration error: {error}", err=True)
return ExitCode.CONFIG_ERROR
elif isinstance(error, NetworkError):
click.echo(f"Network error: {error}", err=True)
return ExitCode.NETWORK_ERROR
elif isinstance(error, AuthenticationError):
click.echo(f"Authentication failed: {error}", err=True)
return ExitCode.AUTH_ERROR
else:
click.echo(f"Error: {error}", err=True)
return ExitCode.GENERAL_ERROR
```
### 3. Test Exit Codes with Error Messages
```typescript
test('exit code matches error type', () => {
const errorCases = [
{ args: 'deploy', expectedCode: 2, expectedMsg: 'missing required argument' },
{ args: 'login --token bad', expectedCode: 5, expectedMsg: 'authentication failed' },
{ args: 'get missing-id', expectedCode: 6, expectedMsg: 'not found' },
];
errorCases.forEach(({ args, expectedCode, expectedMsg }) => {
const { code, stderr } = runCLI(args);
expect(code).toBe(expectedCode);
expect(stderr.toLowerCase()).toContain(expectedMsg);
});
});
```
### 4. Test Help and Version Return 0
```python
def test_help_returns_success(runner):
"""Help should return 0"""
result = runner.invoke(cli, ['--help'])
assert result.exit_code == 0
def test_version_returns_success(runner):
"""Version should return 0"""
result = runner.invoke(cli, ['--version'])
assert result.exit_code == 0
```
## Common Pitfalls
### 1. Don't Use Exit Code 0 for Errors
```typescript
// ❌ Wrong - using 0 for errors
if (error) {
console.error('Error occurred');
process.exit(0); // Should be non-zero!
}
// ✅ Correct - using non-zero for errors
if (error) {
console.error('Error occurred');
process.exit(1);
}
```
### 2. Don't Ignore Exit Codes in Tests
```python
# ❌ Wrong - not checking exit code
def test_deploy(runner):
result = runner.invoke(cli, ['deploy', 'production'])
assert 'deployed' in result.output # What if it failed?
# ✅ Correct - always check exit code
def test_deploy(runner):
result = runner.invoke(cli, ['deploy', 'production'])
assert result.exit_code == 0
assert 'deployed' in result.output
```
### 3. Use Specific Exit Codes
```typescript
// ❌ Wrong - using 1 for everything
if (configError) process.exit(1);
if (networkError) process.exit(1);
if (authError) process.exit(1);
// ✅ Correct - using specific codes
if (configError) process.exit(ExitCode.CONFIG_ERROR);
if (networkError) process.exit(ExitCode.NETWORK_ERROR);
if (authError) process.exit(ExitCode.AUTH_ERROR);
```
## Testing Exit Codes in CI/CD
```yaml
# GitHub Actions example
- name: Test CLI Exit Codes
run: |
# Should succeed
./cli status && echo "Status check passed" || exit 1
# Should fail
./cli invalid-command && exit 1 || echo "Error handling works"
# Check specific exit code
./cli deploy --missing-arg
if [ $? -eq 2 ]; then
echo "Correct exit code for invalid argument"
else
echo "Wrong exit code"
exit 1
fi
```
## Resources
- [Exit Codes on Linux](https://tldp.org/LDP/abs/html/exitcodes.html)
- [POSIX Exit Codes](https://pubs.opengroup.org/onlinepubs/9699919799/)
- [GNU Exit Codes](https://www.gnu.org/software/libc/manual/html_node/Exit-Status.html)

View File

@@ -0,0 +1,349 @@
# Integration Testing for CLI Applications
Complete workflows and integration testing patterns for CLI applications.
## Overview
Integration tests verify that multiple CLI commands work together correctly, testing complete user workflows rather than individual commands in isolation.
## Key Differences from Unit Tests
| Unit Tests | Integration Tests |
|------------|-------------------|
| Test individual commands | Test command sequences |
| Mock external dependencies | May use real dependencies |
| Fast execution | Slower execution |
| Isolated state | Shared state across commands |
## Node.js Integration Testing
### Multi-Command Workflow
```typescript
describe('Complete Deployment Workflow', () => {
let tempDir: string;
beforeEach(() => {
tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'cli-integration-'));
});
afterEach(() => {
fs.rmSync(tempDir, { recursive: true, force: true });
});
test('full deployment workflow', () => {
// Step 1: Initialize project
let result = runCLI(`init my-project --cwd ${tempDir}`);
expect(result.code).toBe(0);
expect(fs.existsSync(path.join(tempDir, 'my-project'))).toBe(true);
// Step 2: Configure
const projectDir = path.join(tempDir, 'my-project');
result = runCLI(`config set api_key test_key --cwd ${projectDir}`);
expect(result.code).toBe(0);
// Step 3: Build
result = runCLI(`build --production --cwd ${projectDir}`);
expect(result.code).toBe(0);
expect(fs.existsSync(path.join(projectDir, 'dist'))).toBe(true);
// Step 4: Deploy
result = runCLI(`deploy staging --cwd ${projectDir}`);
expect(result.code).toBe(0);
expect(result.stdout).toContain('Deployed successfully');
// Step 5: Verify
result = runCLI(`status --cwd ${projectDir}`);
expect(result.code).toBe(0);
expect(result.stdout).toContain('staging');
});
});
```
### State Persistence Testing
```typescript
describe('State Persistence', () => {
test('state persists across commands', () => {
const workspace = createTempWorkspace();
try {
// Create initial state
runCLI(`init --cwd ${workspace}`);
runCLI(`config set key1 value1 --cwd ${workspace}`);
runCLI(`config set key2 value2 --cwd ${workspace}`);
// Verify state persists
let result = runCLI(`config get key1 --cwd ${workspace}`);
expect(result.stdout).toContain('value1');
// Modify state
runCLI(`config set key1 updated --cwd ${workspace}`);
// Verify modification
result = runCLI(`config get key1 --cwd ${workspace}`);
expect(result.stdout).toContain('updated');
// Verify other keys unchanged
result = runCLI(`config get key2 --cwd ${workspace}`);
expect(result.stdout).toContain('value2');
} finally {
cleanupWorkspace(workspace);
}
});
});
```
## Python Integration Testing
### Complete Workflow Testing
```python
class TestCompleteWorkflow:
"""Test complete CLI workflows"""
def test_project_lifecycle(self, runner):
"""Test complete project lifecycle"""
with runner.isolated_filesystem():
# Initialize
result = runner.invoke(cli, ['create', 'test-project'])
assert result.exit_code == 0
# Enter project directory
os.chdir('test-project')
# Configure
result = runner.invoke(cli, ['config', 'set', 'api_key', 'test_key'])
assert result.exit_code == 0
# Add dependencies
result = runner.invoke(cli, ['add', 'dependency', 'requests'])
assert result.exit_code == 0
# Build
result = runner.invoke(cli, ['build'])
assert result.exit_code == 0
assert os.path.exists('dist')
# Test
result = runner.invoke(cli, ['test'])
assert result.exit_code == 0
# Deploy
result = runner.invoke(cli, ['deploy', 'staging'])
assert result.exit_code == 0
# Verify
result = runner.invoke(cli, ['status'])
assert result.exit_code == 0
assert 'staging' in result.output
def test_multi_environment_workflow(self, runner):
"""Test workflow across multiple environments"""
with runner.isolated_filesystem():
# Setup
runner.invoke(cli, ['init', 'multi-env-app'])
os.chdir('multi-env-app')
# Configure environments
environments = ['development', 'staging', 'production']
for env in environments:
result = runner.invoke(
cli,
['config', 'set', 'api_key', f'{env}_key', '--env', env]
)
assert result.exit_code == 0
# Deploy to each environment
for env in environments:
result = runner.invoke(cli, ['deploy', env])
assert result.exit_code == 0
assert env in result.output
```
### Error Recovery Testing
```python
class TestErrorRecovery:
"""Test error recovery workflows"""
def test_rollback_on_failure(self, runner):
"""Test rollback after failed deployment"""
with runner.isolated_filesystem():
# Setup
runner.invoke(cli, ['init', 'rollback-test'])
os.chdir('rollback-test')
runner.invoke(cli, ['config', 'set', 'api_key', 'test_key'])
# Successful deployment
result = runner.invoke(cli, ['deploy', 'staging'])
assert result.exit_code == 0
# Failed deployment (simulate)
result = runner.invoke(cli, ['deploy', 'staging', '--force-fail'])
assert result.exit_code != 0
# Rollback
result = runner.invoke(cli, ['rollback'])
assert result.exit_code == 0
assert 'rollback successful' in result.output.lower()
def test_recovery_from_corruption(self, runner):
"""Test recovery from corrupted state"""
with runner.isolated_filesystem():
# Create valid state
runner.invoke(cli, ['init', 'corrupt-test'])
os.chdir('corrupt-test')
runner.invoke(cli, ['config', 'set', 'key', 'value'])
# Corrupt state file
with open('.cli-state', 'w') as f:
f.write('invalid json {[}')
# Should detect and recover
result = runner.invoke(cli, ['config', 'get', 'key'])
assert result.exit_code != 0
assert 'corrupt' in result.output.lower()
# Reset state
result = runner.invoke(cli, ['reset', '--force'])
assert result.exit_code == 0
# Should work after reset
result = runner.invoke(cli, ['config', 'set', 'key', 'new_value'])
assert result.exit_code == 0
```
## Integration Test Patterns
### 1. Sequential Command Testing
Test commands that must run in a specific order:
```python
def test_sequential_workflow(runner):
"""Test commands that depend on each other"""
with runner.isolated_filesystem():
# Each command depends on the previous
commands = [
['init', 'project'],
['config', 'set', 'key', 'value'],
['build'],
['test'],
['deploy', 'staging']
]
for cmd in commands:
result = runner.invoke(cli, cmd)
assert result.exit_code == 0, \
f"Command {' '.join(cmd)} failed: {result.output}"
```
### 2. Concurrent Operation Testing
Test that concurrent operations are handled correctly:
```python
def test_concurrent_operations(runner):
"""Test handling of concurrent operations"""
import threading
results = []
def run_command():
result = runner.invoke(cli, ['deploy', 'staging'])
results.append(result)
# Start multiple deployments
threads = [threading.Thread(target=run_command) for _ in range(3)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# Only one should succeed, others should detect lock
successful = sum(1 for r in results if r.exit_code == 0)
assert successful == 1
assert any('locked' in r.output.lower() for r in results if r.exit_code != 0)
```
### 3. Data Migration Testing
Test data migration between versions:
```python
def test_data_migration(runner):
"""Test data migration workflow"""
with runner.isolated_filesystem():
# Create old version data
old_data = {'version': 1, 'data': {'key': 'value'}}
with open('data.json', 'w') as f:
json.dump(old_data, f)
# Run migration
result = runner.invoke(cli, ['migrate', '--to', '2.0'])
assert result.exit_code == 0
# Verify new format
with open('data.json', 'r') as f:
new_data = json.load(f)
assert new_data['version'] == 2
assert new_data['data']['key'] == 'value'
# Verify backup created
assert os.path.exists('data.json.backup')
```
## Best Practices
1. **Use Isolated Environments**: Each test should run in a clean environment
2. **Test Real Workflows**: Test actual user scenarios, not artificial sequences
3. **Include Error Paths**: Test recovery from failures
4. **Test State Persistence**: Verify data persists correctly across commands
5. **Use Realistic Data**: Test with data similar to production use cases
6. **Clean Up Resources**: Always cleanup temp files and resources
7. **Document Workflows**: Clearly document what workflow each test verifies
8. **Set Appropriate Timeouts**: Integration tests may take longer
9. **Mark Slow Tests**: Use test markers for slow-running integration tests
10. **Test Concurrency**: Verify handling of simultaneous operations
## Running Integration Tests
### Node.js/Jest
```bash
# Run all integration tests
npm test -- --testPathPattern=integration
# Run specific integration test
npm test -- integration/deployment.test.ts
# Run with extended timeout
npm test -- --testTimeout=30000
```
### Python/pytest
```bash
# Run all integration tests
pytest tests/integration
# Run specific test
pytest tests/integration/test_workflow.py
# Run marked integration tests
pytest -m integration
# Run with verbose output
pytest tests/integration -v
# Skip slow tests
pytest -m "not slow"
```
## Resources
- [Integration Testing Best Practices](https://martinfowler.com/bliki/IntegrationTest.html)
- [Testing Strategies](https://testing.googleblog.com/)
- [CLI Testing Patterns](https://clig.dev/#testing)

View File

@@ -0,0 +1,277 @@
# Jest Advanced CLI Testing Example
Advanced testing patterns for CLI applications including mocking, fixtures, and integration tests.
## Advanced Patterns
### 1. Async Command Testing
```typescript
import { spawn } from 'child_process';
async function runCLIAsync(args: string[]): Promise<CLIResult> {
return new Promise((resolve) => {
const child = spawn(CLI_PATH, args, { stdio: 'pipe' });
let stdout = '';
let stderr = '';
child.stdout?.on('data', (data) => {
stdout += data.toString();
});
child.stderr?.on('data', (data) => {
stderr += data.toString();
});
child.on('close', (code) => {
resolve({ stdout, stderr, code: code || 0 });
});
});
}
test('should handle long-running command', async () => {
const result = await runCLIAsync(['deploy', 'production']);
expect(result.code).toBe(0);
}, 30000); // 30 second timeout
```
### 2. Environment Variable Mocking
```typescript
describe('environment configuration', () => {
const originalEnv = { ...process.env };
afterEach(() => {
process.env = { ...originalEnv };
});
test('should use API key from environment', () => {
process.env.API_KEY = 'test_key_123';
const { stdout, code } = runCLI('status');
expect(code).toBe(0);
expect(stdout).toContain('Authenticated');
});
test('should fail without API key', () => {
delete process.env.API_KEY;
const { stderr, code } = runCLI('status');
expect(code).toBe(1);
expect(stderr).toContain('API key not found');
});
});
```
### 3. File System Fixtures
```typescript
import fs from 'fs';
import os from 'os';
describe('config file handling', () => {
let tempDir: string;
beforeEach(() => {
tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'cli-test-'));
});
afterEach(() => {
fs.rmSync(tempDir, { recursive: true, force: true });
});
test('should create config file', () => {
const configFile = path.join(tempDir, '.config');
const result = runCLI(`init --config ${configFile}`);
expect(result.code).toBe(0);
expect(fs.existsSync(configFile)).toBe(true);
const config = JSON.parse(fs.readFileSync(configFile, 'utf8'));
expect(config).toHaveProperty('api_key');
});
});
```
### 4. Mocking External APIs
```typescript
import nock from 'nock';
describe('API interaction', () => {
beforeEach(() => {
nock.cleanAll();
});
test('should fetch deployment status', () => {
nock('https://api.example.com')
.get('/deployments/123')
.reply(200, { status: 'success', environment: 'production' });
const { stdout, code } = runCLI('status --deployment 123');
expect(code).toBe(0);
expect(stdout).toContain('success');
expect(stdout).toContain('production');
});
test('should handle API errors', () => {
nock('https://api.example.com')
.get('/deployments/123')
.reply(500, { error: 'Internal Server Error' });
const { stderr, code } = runCLI('status --deployment 123');
expect(code).toBe(1);
expect(stderr).toContain('API error');
});
});
```
### 5. Test Fixtures
```typescript
// test-fixtures.ts
export const createTestFixtures = () => {
const tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'cli-test-'));
// Create sample project structure
fs.mkdirSync(path.join(tempDir, 'src'));
fs.writeFileSync(
path.join(tempDir, 'package.json'),
JSON.stringify({ name: 'test-project', version: '1.0.0' })
);
return {
tempDir,
cleanup: () => fs.rmSync(tempDir, { recursive: true, force: true }),
};
};
// Usage in tests
test('should build project', () => {
const fixtures = createTestFixtures();
try {
const result = runCLI(`build --cwd ${fixtures.tempDir}`);
expect(result.code).toBe(0);
expect(fs.existsSync(path.join(fixtures.tempDir, 'dist'))).toBe(true);
} finally {
fixtures.cleanup();
}
});
```
### 6. Snapshot Testing
```typescript
test('help output matches snapshot', () => {
const { stdout } = runCLI('--help');
expect(stdout).toMatchSnapshot();
});
test('version format matches snapshot', () => {
const { stdout } = runCLI('--version');
expect(stdout).toMatchSnapshot();
});
```
### 7. Parameterized Tests
```typescript
describe.each([
['development', 'dev.example.com'],
['staging', 'staging.example.com'],
['production', 'api.example.com'],
])('deploy to %s', (environment, expectedUrl) => {
test(`should deploy to ${environment}`, () => {
const { stdout, code } = runCLI(`deploy ${environment}`);
expect(code).toBe(0);
expect(stdout).toContain(expectedUrl);
});
});
```
### 8. Interactive Command Testing
```typescript
import { Readable, Writable } from 'stream';
test('should handle interactive prompts', (done) => {
const child = spawn(CLI_PATH, ['init'], { stdio: 'pipe' });
const inputs = ['my-project', 'John Doe', 'john@example.com'];
let inputIndex = 0;
child.stdout?.on('data', (data) => {
const output = data.toString();
if (output.includes('?') && inputIndex < inputs.length) {
child.stdin?.write(inputs[inputIndex] + '\n');
inputIndex++;
}
});
child.on('close', (code) => {
expect(code).toBe(0);
done();
});
});
```
### 9. Coverage-Driven Testing
```typescript
// Ensure all CLI commands are tested
describe('CLI command coverage', () => {
const commands = ['init', 'build', 'deploy', 'status', 'config'];
commands.forEach((command) => {
test(`${command} command exists`, () => {
const { stdout } = runCLI('--help');
expect(stdout).toContain(command);
});
test(`${command} has help text`, () => {
const { stdout, code } = runCLI(`${command} --help`);
expect(code).toBe(0);
expect(stdout).toContain('Usage:');
});
});
});
```
### 10. Performance Testing
```typescript
test('command executes within time limit', () => {
const startTime = Date.now();
const { code } = runCLI('status');
const duration = Date.now() - startTime;
expect(code).toBe(0);
expect(duration).toBeLessThan(2000); // Should complete within 2 seconds
});
```
## Best Practices
1. **Use Test Fixtures**: Create reusable test data and cleanup functions
2. **Mock External Dependencies**: Never make real API calls or database connections
3. **Test Edge Cases**: Test boundary conditions, empty inputs, special characters
4. **Async Handling**: Use proper async/await or promises for async operations
5. **Cleanup**: Always cleanup temp files, reset mocks, restore environment
6. **Isolation**: Tests should not depend on execution order
7. **Clear Error Messages**: Write assertions with helpful failure messages
## Common Advanced Patterns
- Concurrent execution testing
- File locking and race conditions
- Signal handling (SIGTERM, SIGINT)
- Large file processing
- Streaming output
- Progress indicators
- Error recovery and retry logic
## Resources
- [Jest Advanced Features](https://jestjs.io/docs/advanced)
- [Mocking with Jest](https://jestjs.io/docs/mock-functions)
- [Snapshot Testing](https://jestjs.io/docs/snapshot-testing)

View File

@@ -0,0 +1,145 @@
# Jest Basic CLI Testing Example
This example demonstrates basic CLI testing patterns using Jest for Node.js/TypeScript projects.
## Setup
```bash
npm install --save-dev jest @types/jest ts-jest @types/node
```
## Test Structure
```typescript
import { execSync } from 'child_process';
import path from 'path';
describe('CLI Tool Tests', () => {
const CLI_PATH = path.join(__dirname, '../bin/mycli');
function runCLI(args: string) {
try {
const stdout = execSync(`${CLI_PATH} ${args}`, {
encoding: 'utf8',
stdio: 'pipe',
});
return { stdout, stderr: '', code: 0 };
} catch (error: any) {
return {
stdout: error.stdout || '',
stderr: error.stderr || '',
code: error.status || 1,
};
}
}
test('should display version', () => {
const { stdout, code } = runCLI('--version');
expect(code).toBe(0);
expect(stdout).toContain('1.0.0');
});
test('should display help', () => {
const { stdout, code } = runCLI('--help');
expect(code).toBe(0);
expect(stdout).toContain('Usage:');
});
test('should handle unknown command', () => {
const { stderr, code } = runCLI('unknown-command');
expect(code).toBe(1);
expect(stderr).toContain('unknown command');
});
});
```
## Running Tests
```bash
# Run all tests
npm test
# Run with coverage
npm run test:coverage
# Run in watch mode
npm run test:watch
```
## Key Patterns
### 1. Command Execution Helper
Create a reusable `runCLI()` function that:
- Executes CLI commands using `execSync`
- Captures stdout, stderr, and exit codes
- Handles both success and failure cases
### 2. Exit Code Testing
Always test exit codes:
- `0` for success
- Non-zero for errors
- Specific codes for different error types
### 3. Output Validation
Test output content using Jest matchers:
- `.toContain()` for substring matching
- `.toMatch()` for regex patterns
- `.toBe()` for exact matches
### 4. Error Handling
Test error scenarios:
- Unknown commands
- Invalid options
- Missing required arguments
- Invalid argument types
## Example Test Cases
```typescript
describe('deploy command', () => {
test('should deploy with valid arguments', () => {
const { stdout, code } = runCLI('deploy production --force');
expect(code).toBe(0);
expect(stdout).toContain('Deploying to production');
});
test('should fail without required arguments', () => {
const { stderr, code } = runCLI('deploy');
expect(code).toBe(1);
expect(stderr).toContain('missing required argument');
});
test('should validate environment names', () => {
const { stderr, code } = runCLI('deploy invalid-env');
expect(code).toBe(1);
expect(stderr).toContain('invalid environment');
});
});
```
## Best Practices
1. **Isolate Tests**: Each test should be independent
2. **Use Descriptive Names**: Test names should describe what they validate
3. **Test Both Success and Failure**: Cover happy path and error cases
4. **Mock External Dependencies**: Don't make real API calls or file system changes
5. **Use Type Safety**: Leverage TypeScript for better test reliability
6. **Keep Tests Fast**: Fast tests encourage frequent running
## Common Pitfalls
- ❌ Not testing exit codes
- ❌ Only testing success cases
- ❌ Hardcoding paths instead of using `path.join()`
- ❌ Not handling async operations properly
- ❌ Testing implementation details instead of behavior
## Resources
- [Jest Documentation](https://jestjs.io/docs/getting-started)
- [Testing CLI Applications](https://jestjs.io/docs/cli)
- [TypeScript with Jest](https://jestjs.io/docs/getting-started#using-typescript)

View File

@@ -0,0 +1,353 @@
# Pytest Click Testing Example
Comprehensive examples for testing Click-based CLI applications using pytest and CliRunner.
## Basic Setup
```python
import pytest
from click.testing import CliRunner
from mycli.cli import cli
@pytest.fixture
def runner():
return CliRunner()
```
## Basic Command Testing
```python
class TestBasicCommands:
"""Test basic CLI commands"""
def test_version(self, runner):
"""Test version command"""
result = runner.invoke(cli, ['--version'])
assert result.exit_code == 0
assert '1.0.0' in result.output
def test_help(self, runner):
"""Test help command"""
result = runner.invoke(cli, ['--help'])
assert result.exit_code == 0
assert 'Usage:' in result.output
def test_unknown_command(self, runner):
"""Test unknown command handling"""
result = runner.invoke(cli, ['unknown'])
assert result.exit_code != 0
assert 'no such command' in result.output.lower()
```
## Testing with Arguments
```python
class TestArgumentParsing:
"""Test argument parsing"""
def test_required_argument(self, runner):
"""Test command with required argument"""
result = runner.invoke(cli, ['deploy', 'production'])
assert result.exit_code == 0
assert 'production' in result.output
def test_missing_required_argument(self, runner):
"""Test missing required argument"""
result = runner.invoke(cli, ['deploy'])
assert result.exit_code != 0
assert 'missing argument' in result.output.lower()
def test_optional_argument(self, runner):
"""Test optional argument"""
result = runner.invoke(cli, ['build', '--output', 'dist'])
assert result.exit_code == 0
assert 'dist' in result.output
```
## Testing with Options
```python
class TestOptionParsing:
"""Test option parsing"""
def test_boolean_flag(self, runner):
"""Test boolean flag option"""
result = runner.invoke(cli, ['deploy', 'staging', '--force'])
assert result.exit_code == 0
assert 'force' in result.output.lower()
def test_option_with_value(self, runner):
"""Test option with value"""
result = runner.invoke(cli, ['config', 'set', '--key', 'api_key', '--value', 'test'])
assert result.exit_code == 0
def test_multiple_options(self, runner):
"""Test multiple options"""
result = runner.invoke(
cli,
['deploy', 'production', '--verbose', '--dry-run', '--timeout', '60']
)
assert result.exit_code == 0
```
## Testing Interactive Prompts
```python
class TestInteractivePrompts:
"""Test interactive prompt handling"""
def test_simple_prompt(self, runner):
"""Test simple text prompt"""
result = runner.invoke(cli, ['init'], input='my-project\n')
assert result.exit_code == 0
assert 'my-project' in result.output
def test_confirmation_prompt(self, runner):
"""Test confirmation prompt (yes)"""
result = runner.invoke(cli, ['delete', 'resource-id'], input='y\n')
assert result.exit_code == 0
assert 'deleted' in result.output.lower()
def test_confirmation_prompt_no(self, runner):
"""Test confirmation prompt (no)"""
result = runner.invoke(cli, ['delete', 'resource-id'], input='n\n')
assert result.exit_code == 1
assert 'cancelled' in result.output.lower()
def test_multiple_prompts(self, runner):
"""Test multiple prompts in sequence"""
inputs = 'my-project\nJohn Doe\njohn@example.com\n'
result = runner.invoke(cli, ['init', '--interactive'], input=inputs)
assert result.exit_code == 0
assert 'my-project' in result.output
assert 'John Doe' in result.output
def test_choice_prompt(self, runner):
"""Test choice prompt"""
result = runner.invoke(cli, ['deploy'], input='1\n') # Select option 1
assert result.exit_code == 0
```
## Testing with Isolated Filesystem
```python
class TestFileOperations:
"""Test file operations with isolated filesystem"""
def test_create_file(self, runner):
"""Test file creation"""
with runner.isolated_filesystem():
result = runner.invoke(cli, ['init', 'test-project'])
assert result.exit_code == 0
import os
assert os.path.exists('test-project')
def test_read_file(self, runner):
"""Test reading from file"""
with runner.isolated_filesystem():
# Create test file
with open('input.txt', 'w') as f:
f.write('test data')
result = runner.invoke(cli, ['process', '--input', 'input.txt'])
assert result.exit_code == 0
def test_write_file(self, runner):
"""Test writing to file"""
with runner.isolated_filesystem():
result = runner.invoke(cli, ['export', '--output', 'output.txt'])
assert result.exit_code == 0
import os
assert os.path.exists('output.txt')
with open('output.txt', 'r') as f:
content = f.read()
assert len(content) > 0
```
## Testing Environment Variables
```python
class TestEnvironmentVariables:
"""Test environment variable handling"""
def test_with_env_var(self, runner):
"""Test command with environment variable"""
result = runner.invoke(
cli,
['status'],
env={'API_KEY': 'test_key_123'}
)
assert result.exit_code == 0
def test_without_env_var(self, runner):
"""Test command without required environment variable"""
result = runner.invoke(cli, ['status'])
# Assuming API_KEY is required
if 'API_KEY' not in result.output:
assert result.exit_code != 0
def test_env_var_override(self, runner, monkeypatch):
"""Test environment variable override"""
monkeypatch.setenv('API_KEY', 'overridden_key')
result = runner.invoke(cli, ['status'])
assert result.exit_code == 0
```
## Testing Output Formats
```python
class TestOutputFormats:
"""Test different output formats"""
def test_json_output(self, runner):
"""Test JSON output format"""
result = runner.invoke(cli, ['status', '--format', 'json'])
assert result.exit_code == 0
import json
try:
data = json.loads(result.output)
assert isinstance(data, dict)
except json.JSONDecodeError:
pytest.fail("Output is not valid JSON")
def test_yaml_output(self, runner):
"""Test YAML output format"""
result = runner.invoke(cli, ['status', '--format', 'yaml'])
assert result.exit_code == 0
assert ':' in result.output
def test_table_output(self, runner):
"""Test table output format"""
result = runner.invoke(cli, ['list'])
assert result.exit_code == 0
assert '' in result.output or '|' in result.output
```
## Testing Exit Codes
```python
class TestExitCodes:
"""Test exit codes"""
def test_success_exit_code(self, runner):
"""Test success returns 0"""
result = runner.invoke(cli, ['status'])
assert result.exit_code == 0
def test_error_exit_code(self, runner):
"""Test error returns non-zero"""
result = runner.invoke(cli, ['invalid-command'])
assert result.exit_code != 0
def test_validation_error_exit_code(self, runner):
"""Test validation error returns 2"""
result = runner.invoke(cli, ['deploy', '--invalid-option'])
assert result.exit_code == 2 # Click uses 2 for usage errors
def test_exception_exit_code(self, runner):
"""Test uncaught exception returns 1"""
result = runner.invoke(cli, ['command-that-throws'])
assert result.exit_code == 1
```
## Testing with Fixtures
```python
@pytest.fixture
def sample_config(tmp_path):
"""Create sample config file"""
config_file = tmp_path / '.myclirc'
config_file.write_text('''
api_key: your_test_key_here
environment: development
verbose: false
''')
return config_file
@pytest.fixture
def mock_api(monkeypatch):
"""Mock external API calls"""
class MockAPI:
def __init__(self):
self.calls = []
def get(self, endpoint):
self.calls.append(('GET', endpoint))
return {'status': 'success'}
mock = MockAPI()
monkeypatch.setattr('mycli.api.client', mock)
return mock
class TestWithFixtures:
"""Test using fixtures"""
def test_with_config_file(self, runner, sample_config):
"""Test with config file"""
result = runner.invoke(
cli,
['status', '--config', str(sample_config)]
)
assert result.exit_code == 0
def test_with_mock_api(self, runner, mock_api):
"""Test with mocked API"""
result = runner.invoke(cli, ['deploy', 'production'])
assert result.exit_code == 0
assert len(mock_api.calls) > 0
```
## Testing Error Handling
```python
class TestErrorHandling:
"""Test error handling"""
def test_network_error(self, runner, monkeypatch):
"""Test network error handling"""
def mock_request(*args, **kwargs):
raise ConnectionError("Network unreachable")
monkeypatch.setattr('requests.get', mock_request)
result = runner.invoke(cli, ['status'])
assert result.exit_code != 0
assert 'network' in result.output.lower()
def test_file_not_found(self, runner):
"""Test file not found error"""
result = runner.invoke(cli, ['process', '--input', 'nonexistent.txt'])
assert result.exit_code != 0
assert 'not found' in result.output.lower()
def test_invalid_json(self, runner):
"""Test invalid JSON handling"""
with runner.isolated_filesystem():
with open('config.json', 'w') as f:
f.write('invalid json {[}')
result = runner.invoke(cli, ['config', 'load', 'config.json'])
assert result.exit_code != 0
assert 'invalid' in result.output.lower()
```
## Best Practices
1. **Use Fixtures**: Share common setup across tests
2. **Isolated Filesystem**: Use `runner.isolated_filesystem()` for file operations
3. **Test Exit Codes**: Always check exit codes
4. **Clear Test Names**: Use descriptive test method names
5. **Test Edge Cases**: Test boundary conditions and error cases
6. **Mock External Dependencies**: Don't make real API calls
7. **Use Markers**: Mark tests as unit, integration, slow, etc.
## Resources
- [Click Testing Documentation](https://click.palletsprojects.com/en/8.1.x/testing/)
- [Pytest Documentation](https://docs.pytest.org/)
- [CliRunner API](https://click.palletsprojects.com/en/8.1.x/api/#click.testing.CliRunner)