10 KiB
10 KiB
Integration Testing for CLI Applications
Complete workflows and integration testing patterns for CLI applications.
Overview
Integration tests verify that multiple CLI commands work together correctly, testing complete user workflows rather than individual commands in isolation.
Key Differences from Unit Tests
| Unit Tests | Integration Tests |
|---|---|
| Test individual commands | Test command sequences |
| Mock external dependencies | May use real dependencies |
| Fast execution | Slower execution |
| Isolated state | Shared state across commands |
Node.js Integration Testing
Multi-Command Workflow
describe('Complete Deployment Workflow', () => {
let tempDir: string;
beforeEach(() => {
tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'cli-integration-'));
});
afterEach(() => {
fs.rmSync(tempDir, { recursive: true, force: true });
});
test('full deployment workflow', () => {
// Step 1: Initialize project
let result = runCLI(`init my-project --cwd ${tempDir}`);
expect(result.code).toBe(0);
expect(fs.existsSync(path.join(tempDir, 'my-project'))).toBe(true);
// Step 2: Configure
const projectDir = path.join(tempDir, 'my-project');
result = runCLI(`config set api_key test_key --cwd ${projectDir}`);
expect(result.code).toBe(0);
// Step 3: Build
result = runCLI(`build --production --cwd ${projectDir}`);
expect(result.code).toBe(0);
expect(fs.existsSync(path.join(projectDir, 'dist'))).toBe(true);
// Step 4: Deploy
result = runCLI(`deploy staging --cwd ${projectDir}`);
expect(result.code).toBe(0);
expect(result.stdout).toContain('Deployed successfully');
// Step 5: Verify
result = runCLI(`status --cwd ${projectDir}`);
expect(result.code).toBe(0);
expect(result.stdout).toContain('staging');
});
});
State Persistence Testing
describe('State Persistence', () => {
test('state persists across commands', () => {
const workspace = createTempWorkspace();
try {
// Create initial state
runCLI(`init --cwd ${workspace}`);
runCLI(`config set key1 value1 --cwd ${workspace}`);
runCLI(`config set key2 value2 --cwd ${workspace}`);
// Verify state persists
let result = runCLI(`config get key1 --cwd ${workspace}`);
expect(result.stdout).toContain('value1');
// Modify state
runCLI(`config set key1 updated --cwd ${workspace}`);
// Verify modification
result = runCLI(`config get key1 --cwd ${workspace}`);
expect(result.stdout).toContain('updated');
// Verify other keys unchanged
result = runCLI(`config get key2 --cwd ${workspace}`);
expect(result.stdout).toContain('value2');
} finally {
cleanupWorkspace(workspace);
}
});
});
Python Integration Testing
Complete Workflow Testing
class TestCompleteWorkflow:
"""Test complete CLI workflows"""
def test_project_lifecycle(self, runner):
"""Test complete project lifecycle"""
with runner.isolated_filesystem():
# Initialize
result = runner.invoke(cli, ['create', 'test-project'])
assert result.exit_code == 0
# Enter project directory
os.chdir('test-project')
# Configure
result = runner.invoke(cli, ['config', 'set', 'api_key', 'test_key'])
assert result.exit_code == 0
# Add dependencies
result = runner.invoke(cli, ['add', 'dependency', 'requests'])
assert result.exit_code == 0
# Build
result = runner.invoke(cli, ['build'])
assert result.exit_code == 0
assert os.path.exists('dist')
# Test
result = runner.invoke(cli, ['test'])
assert result.exit_code == 0
# Deploy
result = runner.invoke(cli, ['deploy', 'staging'])
assert result.exit_code == 0
# Verify
result = runner.invoke(cli, ['status'])
assert result.exit_code == 0
assert 'staging' in result.output
def test_multi_environment_workflow(self, runner):
"""Test workflow across multiple environments"""
with runner.isolated_filesystem():
# Setup
runner.invoke(cli, ['init', 'multi-env-app'])
os.chdir('multi-env-app')
# Configure environments
environments = ['development', 'staging', 'production']
for env in environments:
result = runner.invoke(
cli,
['config', 'set', 'api_key', f'{env}_key', '--env', env]
)
assert result.exit_code == 0
# Deploy to each environment
for env in environments:
result = runner.invoke(cli, ['deploy', env])
assert result.exit_code == 0
assert env in result.output
Error Recovery Testing
class TestErrorRecovery:
"""Test error recovery workflows"""
def test_rollback_on_failure(self, runner):
"""Test rollback after failed deployment"""
with runner.isolated_filesystem():
# Setup
runner.invoke(cli, ['init', 'rollback-test'])
os.chdir('rollback-test')
runner.invoke(cli, ['config', 'set', 'api_key', 'test_key'])
# Successful deployment
result = runner.invoke(cli, ['deploy', 'staging'])
assert result.exit_code == 0
# Failed deployment (simulate)
result = runner.invoke(cli, ['deploy', 'staging', '--force-fail'])
assert result.exit_code != 0
# Rollback
result = runner.invoke(cli, ['rollback'])
assert result.exit_code == 0
assert 'rollback successful' in result.output.lower()
def test_recovery_from_corruption(self, runner):
"""Test recovery from corrupted state"""
with runner.isolated_filesystem():
# Create valid state
runner.invoke(cli, ['init', 'corrupt-test'])
os.chdir('corrupt-test')
runner.invoke(cli, ['config', 'set', 'key', 'value'])
# Corrupt state file
with open('.cli-state', 'w') as f:
f.write('invalid json {[}')
# Should detect and recover
result = runner.invoke(cli, ['config', 'get', 'key'])
assert result.exit_code != 0
assert 'corrupt' in result.output.lower()
# Reset state
result = runner.invoke(cli, ['reset', '--force'])
assert result.exit_code == 0
# Should work after reset
result = runner.invoke(cli, ['config', 'set', 'key', 'new_value'])
assert result.exit_code == 0
Integration Test Patterns
1. Sequential Command Testing
Test commands that must run in a specific order:
def test_sequential_workflow(runner):
"""Test commands that depend on each other"""
with runner.isolated_filesystem():
# Each command depends on the previous
commands = [
['init', 'project'],
['config', 'set', 'key', 'value'],
['build'],
['test'],
['deploy', 'staging']
]
for cmd in commands:
result = runner.invoke(cli, cmd)
assert result.exit_code == 0, \
f"Command {' '.join(cmd)} failed: {result.output}"
2. Concurrent Operation Testing
Test that concurrent operations are handled correctly:
def test_concurrent_operations(runner):
"""Test handling of concurrent operations"""
import threading
results = []
def run_command():
result = runner.invoke(cli, ['deploy', 'staging'])
results.append(result)
# Start multiple deployments
threads = [threading.Thread(target=run_command) for _ in range(3)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# Only one should succeed, others should detect lock
successful = sum(1 for r in results if r.exit_code == 0)
assert successful == 1
assert any('locked' in r.output.lower() for r in results if r.exit_code != 0)
3. Data Migration Testing
Test data migration between versions:
def test_data_migration(runner):
"""Test data migration workflow"""
with runner.isolated_filesystem():
# Create old version data
old_data = {'version': 1, 'data': {'key': 'value'}}
with open('data.json', 'w') as f:
json.dump(old_data, f)
# Run migration
result = runner.invoke(cli, ['migrate', '--to', '2.0'])
assert result.exit_code == 0
# Verify new format
with open('data.json', 'r') as f:
new_data = json.load(f)
assert new_data['version'] == 2
assert new_data['data']['key'] == 'value'
# Verify backup created
assert os.path.exists('data.json.backup')
Best Practices
- Use Isolated Environments: Each test should run in a clean environment
- Test Real Workflows: Test actual user scenarios, not artificial sequences
- Include Error Paths: Test recovery from failures
- Test State Persistence: Verify data persists correctly across commands
- Use Realistic Data: Test with data similar to production use cases
- Clean Up Resources: Always cleanup temp files and resources
- Document Workflows: Clearly document what workflow each test verifies
- Set Appropriate Timeouts: Integration tests may take longer
- Mark Slow Tests: Use test markers for slow-running integration tests
- Test Concurrency: Verify handling of simultaneous operations
Running Integration Tests
Node.js/Jest
# Run all integration tests
npm test -- --testPathPattern=integration
# Run specific integration test
npm test -- integration/deployment.test.ts
# Run with extended timeout
npm test -- --testTimeout=30000
Python/pytest
# Run all integration tests
pytest tests/integration
# Run specific test
pytest tests/integration/test_workflow.py
# Run marked integration tests
pytest -m integration
# Run with verbose output
pytest tests/integration -v
# Skip slow tests
pytest -m "not slow"