Initial commit
This commit is contained in:
378
skills/cli-testing-patterns/templates/pytest-integration-test.py
Normal file
378
skills/cli-testing-patterns/templates/pytest-integration-test.py
Normal file
@@ -0,0 +1,378 @@
|
||||
"""
|
||||
Pytest Integration Test Template
|
||||
|
||||
Complete workflow testing for CLI applications using Click.testing.CliRunner
|
||||
Tests multi-command workflows, state persistence, and end-to-end scenarios
|
||||
"""
|
||||
|
||||
import pytest
|
||||
import os
|
||||
import json
|
||||
import yaml
|
||||
from pathlib import Path
|
||||
from click.testing import CliRunner
|
||||
from mycli.cli import cli
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def integration_runner():
|
||||
"""Create runner with isolated filesystem for integration tests"""
|
||||
runner = CliRunner()
|
||||
with runner.isolated_filesystem():
|
||||
yield runner
|
||||
|
||||
|
||||
class TestDeploymentWorkflow:
|
||||
"""Test complete deployment workflow"""
|
||||
|
||||
def test_full_deployment_workflow(self, integration_runner):
|
||||
"""Should complete init -> configure -> build -> deploy workflow"""
|
||||
runner = integration_runner
|
||||
|
||||
# Step 1: Initialize project
|
||||
result = runner.invoke(cli, ['init', 'my-project'])
|
||||
assert result.exit_code == 0
|
||||
assert 'Project initialized' in result.output
|
||||
assert os.path.exists('my-project')
|
||||
|
||||
# Step 2: Configure API key
|
||||
os.chdir('my-project')
|
||||
result = runner.invoke(cli, ['config', 'set', 'api_key', 'your_key_here'])
|
||||
assert result.exit_code == 0
|
||||
|
||||
# Step 3: Build project
|
||||
result = runner.invoke(cli, ['build', '--production'])
|
||||
assert result.exit_code == 0
|
||||
assert 'Build successful' in result.output
|
||||
|
||||
# Step 4: Deploy to production
|
||||
result = runner.invoke(cli, ['deploy', 'production'])
|
||||
assert result.exit_code == 0
|
||||
assert 'Deployed successfully' in result.output
|
||||
|
||||
def test_deployment_without_config_fails(self, integration_runner):
|
||||
"""Should fail deployment without required configuration"""
|
||||
runner = integration_runner
|
||||
|
||||
# Initialize but don't configure
|
||||
runner.invoke(cli, ['init', 'my-project'])
|
||||
os.chdir('my-project')
|
||||
|
||||
# Try to deploy without API key
|
||||
result = runner.invoke(cli, ['deploy', 'production'])
|
||||
assert result.exit_code != 0
|
||||
assert 'api_key' in result.output.lower()
|
||||
|
||||
def test_deployment_rollback(self, integration_runner):
|
||||
"""Should rollback failed deployment"""
|
||||
runner = integration_runner
|
||||
|
||||
# Setup and deploy
|
||||
runner.invoke(cli, ['init', 'my-project'])
|
||||
os.chdir('my-project')
|
||||
runner.invoke(cli, ['config', 'set', 'api_key', 'your_key_here'])
|
||||
runner.invoke(cli, ['deploy', 'staging'])
|
||||
|
||||
# Rollback
|
||||
result = runner.invoke(cli, ['rollback'])
|
||||
assert result.exit_code == 0
|
||||
assert 'Rollback successful' in result.output
|
||||
|
||||
|
||||
class TestMultiEnvironmentWorkflow:
|
||||
"""Test multi-environment configuration and deployment"""
|
||||
|
||||
def test_manage_multiple_environments(self, integration_runner):
|
||||
"""Should manage dev, staging, and production environments"""
|
||||
runner = integration_runner
|
||||
|
||||
runner.invoke(cli, ['init', 'multi-env-project'])
|
||||
os.chdir('multi-env-project')
|
||||
|
||||
# Configure development
|
||||
runner.invoke(cli, ['config', 'set', 'api_key', 'dev_key', '--env', 'development'])
|
||||
runner.invoke(cli, ['config', 'set', 'base_url', 'https://dev.api.example.com', '--env', 'development'])
|
||||
|
||||
# Configure staging
|
||||
runner.invoke(cli, ['config', 'set', 'api_key', 'staging_key', '--env', 'staging'])
|
||||
runner.invoke(cli, ['config', 'set', 'base_url', 'https://staging.api.example.com', '--env', 'staging'])
|
||||
|
||||
# Configure production
|
||||
runner.invoke(cli, ['config', 'set', 'api_key', 'prod_key', '--env', 'production'])
|
||||
runner.invoke(cli, ['config', 'set', 'base_url', 'https://api.example.com', '--env', 'production'])
|
||||
|
||||
# Deploy to each environment
|
||||
dev_result = runner.invoke(cli, ['deploy', 'development'])
|
||||
assert dev_result.exit_code == 0
|
||||
assert 'dev.api.example.com' in dev_result.output
|
||||
|
||||
staging_result = runner.invoke(cli, ['deploy', 'staging'])
|
||||
assert staging_result.exit_code == 0
|
||||
assert 'staging.api.example.com' in staging_result.output
|
||||
|
||||
prod_result = runner.invoke(cli, ['deploy', 'production'])
|
||||
assert prod_result.exit_code == 0
|
||||
assert 'api.example.com' in prod_result.output
|
||||
|
||||
def test_environment_isolation(self, integration_runner):
|
||||
"""Should keep environment configurations isolated"""
|
||||
runner = integration_runner
|
||||
|
||||
runner.invoke(cli, ['init', 'isolated-project'])
|
||||
os.chdir('isolated-project')
|
||||
|
||||
# Set different values for each environment
|
||||
runner.invoke(cli, ['config', 'set', 'timeout', '10', '--env', 'development'])
|
||||
runner.invoke(cli, ['config', 'set', 'timeout', '30', '--env', 'production'])
|
||||
|
||||
# Verify values are isolated
|
||||
dev_result = runner.invoke(cli, ['config', 'get', 'timeout', '--env', 'development'])
|
||||
assert '10' in dev_result.output
|
||||
|
||||
prod_result = runner.invoke(cli, ['config', 'get', 'timeout', '--env', 'production'])
|
||||
assert '30' in prod_result.output
|
||||
|
||||
|
||||
class TestStatePersistence:
|
||||
"""Test state management and persistence"""
|
||||
|
||||
def test_state_persistence_across_commands(self, integration_runner):
|
||||
"""Should maintain state across multiple commands"""
|
||||
runner = integration_runner
|
||||
|
||||
# Initialize state
|
||||
result = runner.invoke(cli, ['state', 'init'])
|
||||
assert result.exit_code == 0
|
||||
|
||||
# Set multiple state values
|
||||
runner.invoke(cli, ['state', 'set', 'counter', '0'])
|
||||
runner.invoke(cli, ['state', 'set', 'user', 'testuser'])
|
||||
|
||||
# Increment counter multiple times
|
||||
for i in range(5):
|
||||
runner.invoke(cli, ['increment'])
|
||||
|
||||
# Verify final state
|
||||
result = runner.invoke(cli, ['state', 'get', 'counter'])
|
||||
assert result.exit_code == 0
|
||||
assert '5' in result.output
|
||||
|
||||
result = runner.invoke(cli, ['state', 'get', 'user'])
|
||||
assert 'testuser' in result.output
|
||||
|
||||
def test_state_recovery_from_corruption(self, integration_runner):
|
||||
"""Should recover from corrupted state file"""
|
||||
runner = integration_runner
|
||||
|
||||
# Create valid state
|
||||
runner.invoke(cli, ['state', 'init'])
|
||||
runner.invoke(cli, ['state', 'set', 'key', 'value'])
|
||||
|
||||
# Corrupt the state file
|
||||
with open('.mycli-state', 'w') as f:
|
||||
f.write('invalid json {[}')
|
||||
|
||||
# Should detect corruption and recover
|
||||
result = runner.invoke(cli, ['state', 'get', 'key'])
|
||||
assert result.exit_code != 0
|
||||
assert 'corrupt' in result.output.lower()
|
||||
|
||||
# Should be able to reset
|
||||
result = runner.invoke(cli, ['state', 'reset'])
|
||||
assert result.exit_code == 0
|
||||
|
||||
|
||||
class TestPluginWorkflow:
|
||||
"""Test plugin installation and usage"""
|
||||
|
||||
def test_plugin_lifecycle(self, integration_runner):
|
||||
"""Should install, use, and uninstall plugins"""
|
||||
runner = integration_runner
|
||||
|
||||
runner.invoke(cli, ['init', 'plugin-project'])
|
||||
os.chdir('plugin-project')
|
||||
|
||||
# Install plugin
|
||||
result = runner.invoke(cli, ['plugin', 'install', 'test-plugin'])
|
||||
assert result.exit_code == 0
|
||||
assert 'installed' in result.output.lower()
|
||||
|
||||
# Verify plugin is listed
|
||||
result = runner.invoke(cli, ['plugin', 'list'])
|
||||
assert 'test-plugin' in result.output
|
||||
|
||||
# Use plugin command
|
||||
result = runner.invoke(cli, ['test-plugin:command', '--arg', 'value'])
|
||||
assert result.exit_code == 0
|
||||
|
||||
# Uninstall plugin
|
||||
result = runner.invoke(cli, ['plugin', 'uninstall', 'test-plugin'])
|
||||
assert result.exit_code == 0
|
||||
|
||||
# Verify plugin is removed
|
||||
result = runner.invoke(cli, ['plugin', 'list'])
|
||||
assert 'test-plugin' not in result.output
|
||||
|
||||
def test_plugin_conflict_detection(self, integration_runner):
|
||||
"""Should detect and handle plugin conflicts"""
|
||||
runner = integration_runner
|
||||
|
||||
runner.invoke(cli, ['init', 'conflict-project'])
|
||||
os.chdir('conflict-project')
|
||||
|
||||
# Install first plugin
|
||||
runner.invoke(cli, ['plugin', 'install', 'plugin-a'])
|
||||
|
||||
# Try to install conflicting plugin
|
||||
result = runner.invoke(cli, ['plugin', 'install', 'plugin-b'])
|
||||
if 'conflict' in result.output.lower():
|
||||
assert result.exit_code != 0
|
||||
|
||||
|
||||
class TestDataMigration:
|
||||
"""Test data migration workflows"""
|
||||
|
||||
def test_version_migration(self, integration_runner):
|
||||
"""Should migrate data between versions"""
|
||||
runner = integration_runner
|
||||
|
||||
# Create old version data
|
||||
old_data = {
|
||||
'version': 1,
|
||||
'format': 'legacy',
|
||||
'data': {'key': 'value'}
|
||||
}
|
||||
with open('data.json', 'w') as f:
|
||||
json.dump(old_data, f)
|
||||
|
||||
# Run migration
|
||||
result = runner.invoke(cli, ['migrate', '--to', '2.0'])
|
||||
assert result.exit_code == 0
|
||||
|
||||
# Verify new format
|
||||
with open('data.json', 'r') as f:
|
||||
new_data = json.load(f)
|
||||
assert new_data['version'] == 2
|
||||
assert 'legacy' not in new_data.get('format', '')
|
||||
|
||||
def test_migration_backup(self, integration_runner):
|
||||
"""Should create backup during migration"""
|
||||
runner = integration_runner
|
||||
|
||||
# Create data
|
||||
data = {'version': 1, 'data': 'important'}
|
||||
with open('data.json', 'w') as f:
|
||||
json.dump(data, f)
|
||||
|
||||
# Migrate with backup
|
||||
result = runner.invoke(cli, ['migrate', '--to', '2.0', '--backup'])
|
||||
assert result.exit_code == 0
|
||||
|
||||
# Verify backup exists
|
||||
assert os.path.exists('data.json.backup')
|
||||
|
||||
|
||||
class TestConcurrentOperations:
|
||||
"""Test handling of concurrent operations"""
|
||||
|
||||
def test_file_locking(self, integration_runner):
|
||||
"""Should prevent concurrent modifications"""
|
||||
runner = integration_runner
|
||||
|
||||
runner.invoke(cli, ['init', 'lock-project'])
|
||||
os.chdir('lock-project')
|
||||
|
||||
# Create lock file
|
||||
with open('.mycli.lock', 'w') as f:
|
||||
f.write('locked')
|
||||
|
||||
# Try to run command that needs lock
|
||||
result = runner.invoke(cli, ['deploy', 'production'])
|
||||
assert result.exit_code != 0
|
||||
assert 'lock' in result.output.lower()
|
||||
|
||||
def test_lock_timeout(self, integration_runner):
|
||||
"""Should timeout waiting for lock"""
|
||||
runner = integration_runner
|
||||
|
||||
runner.invoke(cli, ['init', 'timeout-project'])
|
||||
os.chdir('timeout-project')
|
||||
|
||||
# Create stale lock
|
||||
with open('.mycli.lock', 'w') as f:
|
||||
import time
|
||||
f.write(str(time.time() - 3600)) # 1 hour old
|
||||
|
||||
# Should detect stale lock and continue
|
||||
result = runner.invoke(cli, ['build'])
|
||||
assert result.exit_code == 0
|
||||
|
||||
|
||||
class TestErrorRecovery:
|
||||
"""Test error recovery and retry logic"""
|
||||
|
||||
def test_retry_on_failure(self, integration_runner):
|
||||
"""Should retry failed operations"""
|
||||
runner = integration_runner
|
||||
|
||||
runner.invoke(cli, ['init', 'retry-project'])
|
||||
os.chdir('retry-project')
|
||||
runner.invoke(cli, ['config', 'set', 'api_key', 'your_key_here'])
|
||||
|
||||
# Simulate failure and retry
|
||||
result = runner.invoke(cli, ['deploy', 'staging', '--retry', '3'])
|
||||
# Should attempt retry logic
|
||||
|
||||
def test_partial_failure_recovery(self, integration_runner):
|
||||
"""Should recover from partial failures"""
|
||||
runner = integration_runner
|
||||
|
||||
runner.invoke(cli, ['init', 'recovery-project'])
|
||||
os.chdir('recovery-project')
|
||||
|
||||
# Create partial state
|
||||
runner.invoke(cli, ['build', '--step', '1'])
|
||||
runner.invoke(cli, ['build', '--step', '2'])
|
||||
|
||||
# Complete from last successful step
|
||||
result = runner.invoke(cli, ['build', '--continue'])
|
||||
assert result.exit_code == 0
|
||||
|
||||
|
||||
class TestCompleteWorkflow:
|
||||
"""Test complete end-to-end workflows"""
|
||||
|
||||
def test_full_project_lifecycle(self, integration_runner):
|
||||
"""Should complete entire project lifecycle"""
|
||||
runner = integration_runner
|
||||
|
||||
# Create project
|
||||
result = runner.invoke(cli, ['create', 'full-project'])
|
||||
assert result.exit_code == 0
|
||||
|
||||
os.chdir('full-project')
|
||||
|
||||
# Configure
|
||||
runner.invoke(cli, ['config', 'set', 'api_key', 'your_key_here'])
|
||||
runner.invoke(cli, ['config', 'set', 'region', 'us-west-1'])
|
||||
|
||||
# Add dependencies
|
||||
result = runner.invoke(cli, ['add', 'dependency', 'package-name'])
|
||||
assert result.exit_code == 0
|
||||
|
||||
# Build
|
||||
result = runner.invoke(cli, ['build', '--production'])
|
||||
assert result.exit_code == 0
|
||||
|
||||
# Test
|
||||
result = runner.invoke(cli, ['test'])
|
||||
assert result.exit_code == 0
|
||||
|
||||
# Deploy
|
||||
result = runner.invoke(cli, ['deploy', 'production'])
|
||||
assert result.exit_code == 0
|
||||
|
||||
# Verify deployment
|
||||
result = runner.invoke(cli, ['status'])
|
||||
assert result.exit_code == 0
|
||||
assert 'deployed' in result.output.lower()
|
||||
Reference in New Issue
Block a user