Initial commit
This commit is contained in:
270
skills/cli-testing-patterns/templates/pytest-click-test.py
Normal file
270
skills/cli-testing-patterns/templates/pytest-click-test.py
Normal file
@@ -0,0 +1,270 @@
|
||||
"""
|
||||
Pytest Click Testing Template
|
||||
|
||||
Complete test suite for Click-based CLI applications using CliRunner
|
||||
Tests command execution, exit codes, output validation, and interactive prompts
|
||||
"""
|
||||
|
||||
import pytest
|
||||
from click.testing import CliRunner
|
||||
from mycli.cli import cli
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def runner():
|
||||
"""Create a CliRunner instance for testing"""
|
||||
return CliRunner()
|
||||
|
||||
|
||||
class TestVersionCommand:
|
||||
"""Test version display"""
|
||||
|
||||
def test_version_flag(self, runner):
|
||||
"""Should display version with --version"""
|
||||
result = runner.invoke(cli, ['--version'])
|
||||
assert result.exit_code == 0
|
||||
assert '1.0.0' in result.output
|
||||
|
||||
def test_version_short_flag(self, runner):
|
||||
"""Should display version with -v"""
|
||||
result = runner.invoke(cli, ['-v'])
|
||||
assert result.exit_code == 0
|
||||
assert result.output.count('.') == 2 # Version format X.Y.Z
|
||||
|
||||
|
||||
class TestHelpCommand:
|
||||
"""Test help display"""
|
||||
|
||||
def test_help_flag(self, runner):
|
||||
"""Should display help with --help"""
|
||||
result = runner.invoke(cli, ['--help'])
|
||||
assert result.exit_code == 0
|
||||
assert 'Usage:' in result.output
|
||||
assert 'Commands:' in result.output
|
||||
assert 'Options:' in result.output
|
||||
|
||||
def test_help_short_flag(self, runner):
|
||||
"""Should display help with -h"""
|
||||
result = runner.invoke(cli, ['-h'])
|
||||
assert result.exit_code == 0
|
||||
assert 'Usage:' in result.output
|
||||
|
||||
def test_command_help(self, runner):
|
||||
"""Should display help for specific command"""
|
||||
result = runner.invoke(cli, ['deploy', '--help'])
|
||||
assert result.exit_code == 0
|
||||
assert 'deploy' in result.output.lower()
|
||||
|
||||
|
||||
class TestErrorHandling:
|
||||
"""Test error handling and validation"""
|
||||
|
||||
def test_unknown_command(self, runner):
|
||||
"""Should handle unknown commands"""
|
||||
result = runner.invoke(cli, ['unknown-command'])
|
||||
assert result.exit_code != 0
|
||||
assert 'no such command' in result.output.lower()
|
||||
|
||||
def test_invalid_option(self, runner):
|
||||
"""Should handle invalid options"""
|
||||
result = runner.invoke(cli, ['--invalid-option'])
|
||||
assert result.exit_code != 0
|
||||
assert 'no such option' in result.output.lower()
|
||||
|
||||
def test_missing_required_argument(self, runner):
|
||||
"""Should validate required arguments"""
|
||||
result = runner.invoke(cli, ['deploy'])
|
||||
assert result.exit_code != 0
|
||||
assert 'missing argument' in result.output.lower()
|
||||
|
||||
def test_invalid_argument_type(self, runner):
|
||||
"""Should validate argument types"""
|
||||
result = runner.invoke(cli, ['retry', '--count', 'invalid'])
|
||||
assert result.exit_code != 0
|
||||
assert 'invalid' in result.output.lower()
|
||||
|
||||
|
||||
class TestCommandExecution:
|
||||
"""Test command execution with various arguments"""
|
||||
|
||||
def test_deploy_command(self, runner):
|
||||
"""Should execute deploy command"""
|
||||
result = runner.invoke(cli, ['deploy', 'production', '--force'])
|
||||
assert result.exit_code == 0
|
||||
assert 'Deploying to production' in result.output
|
||||
assert 'Force mode enabled' in result.output
|
||||
|
||||
def test_deploy_with_flags(self, runner):
|
||||
"""Should handle multiple flags"""
|
||||
result = runner.invoke(cli, ['deploy', 'staging', '--verbose', '--dry-run'])
|
||||
assert result.exit_code == 0
|
||||
assert 'staging' in result.output
|
||||
assert 'dry run' in result.output.lower()
|
||||
|
||||
def test_build_command(self, runner):
|
||||
"""Should execute build command"""
|
||||
result = runner.invoke(cli, ['build', '--output', 'dist'])
|
||||
assert result.exit_code == 0
|
||||
assert 'Building project' in result.output
|
||||
assert 'dist' in result.output
|
||||
|
||||
|
||||
class TestConfiguration:
|
||||
"""Test configuration management"""
|
||||
|
||||
def test_config_set(self, runner):
|
||||
"""Should set configuration value"""
|
||||
result = runner.invoke(cli, ['config', 'set', 'api_key', 'your_key_here'])
|
||||
assert result.exit_code == 0
|
||||
assert 'Configuration updated' in result.output
|
||||
|
||||
def test_config_get(self, runner):
|
||||
"""Should get configuration value"""
|
||||
runner.invoke(cli, ['config', 'set', 'api_key', 'your_key_here'])
|
||||
result = runner.invoke(cli, ['config', 'get', 'api_key'])
|
||||
assert result.exit_code == 0
|
||||
assert 'your_key_here' in result.output
|
||||
|
||||
def test_config_list(self, runner):
|
||||
"""Should list all configuration"""
|
||||
result = runner.invoke(cli, ['config', 'list'])
|
||||
assert result.exit_code == 0
|
||||
assert 'Configuration:' in result.output
|
||||
|
||||
def test_config_delete(self, runner):
|
||||
"""Should delete configuration value"""
|
||||
runner.invoke(cli, ['config', 'set', 'temp_key', 'temp_value'])
|
||||
result = runner.invoke(cli, ['config', 'delete', 'temp_key'])
|
||||
assert result.exit_code == 0
|
||||
assert 'deleted' in result.output.lower()
|
||||
|
||||
|
||||
class TestExitCodes:
|
||||
"""Test exit code validation"""
|
||||
|
||||
def test_success_exit_code(self, runner):
|
||||
"""Should return 0 on success"""
|
||||
result = runner.invoke(cli, ['status'])
|
||||
assert result.exit_code == 0
|
||||
|
||||
def test_error_exit_code(self, runner):
|
||||
"""Should return non-zero on error"""
|
||||
result = runner.invoke(cli, ['invalid-command'])
|
||||
assert result.exit_code != 0
|
||||
|
||||
def test_validation_error_exit_code(self, runner):
|
||||
"""Should return specific code for validation errors"""
|
||||
result = runner.invoke(cli, ['deploy', '--invalid-flag'])
|
||||
assert result.exit_code == 2 # Click uses 2 for usage errors
|
||||
|
||||
|
||||
class TestInteractivePrompts:
|
||||
"""Test interactive prompt handling"""
|
||||
|
||||
def test_interactive_deploy_wizard(self, runner):
|
||||
"""Should handle interactive prompts"""
|
||||
result = runner.invoke(
|
||||
cli,
|
||||
['deploy-wizard'],
|
||||
input='my-app\n1\nyes\n'
|
||||
)
|
||||
assert result.exit_code == 0
|
||||
assert 'my-app' in result.output
|
||||
|
||||
def test_confirmation_prompt(self, runner):
|
||||
"""Should handle confirmation prompts"""
|
||||
result = runner.invoke(
|
||||
cli,
|
||||
['delete', 'resource-id'],
|
||||
input='y\n'
|
||||
)
|
||||
assert result.exit_code == 0
|
||||
assert 'deleted' in result.output.lower()
|
||||
|
||||
def test_confirmation_prompt_denied(self, runner):
|
||||
"""Should handle denied confirmation"""
|
||||
result = runner.invoke(
|
||||
cli,
|
||||
['delete', 'resource-id'],
|
||||
input='n\n'
|
||||
)
|
||||
assert result.exit_code == 1
|
||||
assert 'cancelled' in result.output.lower()
|
||||
|
||||
def test_multiple_prompts(self, runner):
|
||||
"""Should handle multiple prompts in sequence"""
|
||||
result = runner.invoke(
|
||||
cli,
|
||||
['init'],
|
||||
input='my-project\nJohn Doe\njohn@example.com\n'
|
||||
)
|
||||
assert result.exit_code == 0
|
||||
assert 'my-project' in result.output
|
||||
assert 'John Doe' in result.output
|
||||
|
||||
|
||||
class TestOutputFormatting:
|
||||
"""Test output formatting options"""
|
||||
|
||||
def test_json_output(self, runner):
|
||||
"""Should output JSON format"""
|
||||
result = runner.invoke(cli, ['status', '--format', 'json'])
|
||||
assert result.exit_code == 0
|
||||
import json
|
||||
try:
|
||||
json.loads(result.output)
|
||||
except json.JSONDecodeError:
|
||||
pytest.fail("Output is not valid JSON")
|
||||
|
||||
def test_yaml_output(self, runner):
|
||||
"""Should output YAML format"""
|
||||
result = runner.invoke(cli, ['status', '--format', 'yaml'])
|
||||
assert result.exit_code == 0
|
||||
assert ':' in result.output
|
||||
|
||||
def test_table_output(self, runner):
|
||||
"""Should output table format by default"""
|
||||
result = runner.invoke(cli, ['list'])
|
||||
assert result.exit_code == 0
|
||||
assert '│' in result.output or '|' in result.output
|
||||
|
||||
def test_quiet_mode(self, runner):
|
||||
"""Should suppress output in quiet mode"""
|
||||
result = runner.invoke(cli, ['deploy', 'production', '--quiet'])
|
||||
assert result.exit_code == 0
|
||||
assert len(result.output.strip()) == 0
|
||||
|
||||
|
||||
class TestFileOperations:
|
||||
"""Test file-based operations"""
|
||||
|
||||
def test_file_input(self, runner):
|
||||
"""Should read from file"""
|
||||
with runner.isolated_filesystem():
|
||||
with open('input.txt', 'w') as f:
|
||||
f.write('test data\n')
|
||||
|
||||
result = runner.invoke(cli, ['process', '--input', 'input.txt'])
|
||||
assert result.exit_code == 0
|
||||
|
||||
def test_file_output(self, runner):
|
||||
"""Should write to file"""
|
||||
with runner.isolated_filesystem():
|
||||
result = runner.invoke(cli, ['export', '--output', 'output.txt'])
|
||||
assert result.exit_code == 0
|
||||
with open('output.txt', 'r') as f:
|
||||
content = f.read()
|
||||
assert len(content) > 0
|
||||
|
||||
|
||||
class TestIsolation:
|
||||
"""Test isolated filesystem operations"""
|
||||
|
||||
def test_isolated_filesystem(self, runner):
|
||||
"""Should work in isolated filesystem"""
|
||||
with runner.isolated_filesystem():
|
||||
result = runner.invoke(cli, ['init', 'test-project'])
|
||||
assert result.exit_code == 0
|
||||
|
||||
import os
|
||||
assert os.path.exists('test-project')
|
||||
Reference in New Issue
Block a user