Initial commit
This commit is contained in:
353
skills/cli-testing-patterns/examples/pytest-click/README.md
Normal file
353
skills/cli-testing-patterns/examples/pytest-click/README.md
Normal file
@@ -0,0 +1,353 @@
|
||||
# Pytest Click Testing Example
|
||||
|
||||
Comprehensive examples for testing Click-based CLI applications using pytest and CliRunner.
|
||||
|
||||
## Basic Setup
|
||||
|
||||
```python
|
||||
import pytest
|
||||
from click.testing import CliRunner
|
||||
from mycli.cli import cli
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def runner():
|
||||
return CliRunner()
|
||||
```
|
||||
|
||||
## Basic Command Testing
|
||||
|
||||
```python
|
||||
class TestBasicCommands:
|
||||
"""Test basic CLI commands"""
|
||||
|
||||
def test_version(self, runner):
|
||||
"""Test version command"""
|
||||
result = runner.invoke(cli, ['--version'])
|
||||
assert result.exit_code == 0
|
||||
assert '1.0.0' in result.output
|
||||
|
||||
def test_help(self, runner):
|
||||
"""Test help command"""
|
||||
result = runner.invoke(cli, ['--help'])
|
||||
assert result.exit_code == 0
|
||||
assert 'Usage:' in result.output
|
||||
|
||||
def test_unknown_command(self, runner):
|
||||
"""Test unknown command handling"""
|
||||
result = runner.invoke(cli, ['unknown'])
|
||||
assert result.exit_code != 0
|
||||
assert 'no such command' in result.output.lower()
|
||||
```
|
||||
|
||||
## Testing with Arguments
|
||||
|
||||
```python
|
||||
class TestArgumentParsing:
|
||||
"""Test argument parsing"""
|
||||
|
||||
def test_required_argument(self, runner):
|
||||
"""Test command with required argument"""
|
||||
result = runner.invoke(cli, ['deploy', 'production'])
|
||||
assert result.exit_code == 0
|
||||
assert 'production' in result.output
|
||||
|
||||
def test_missing_required_argument(self, runner):
|
||||
"""Test missing required argument"""
|
||||
result = runner.invoke(cli, ['deploy'])
|
||||
assert result.exit_code != 0
|
||||
assert 'missing argument' in result.output.lower()
|
||||
|
||||
def test_optional_argument(self, runner):
|
||||
"""Test optional argument"""
|
||||
result = runner.invoke(cli, ['build', '--output', 'dist'])
|
||||
assert result.exit_code == 0
|
||||
assert 'dist' in result.output
|
||||
```
|
||||
|
||||
## Testing with Options
|
||||
|
||||
```python
|
||||
class TestOptionParsing:
|
||||
"""Test option parsing"""
|
||||
|
||||
def test_boolean_flag(self, runner):
|
||||
"""Test boolean flag option"""
|
||||
result = runner.invoke(cli, ['deploy', 'staging', '--force'])
|
||||
assert result.exit_code == 0
|
||||
assert 'force' in result.output.lower()
|
||||
|
||||
def test_option_with_value(self, runner):
|
||||
"""Test option with value"""
|
||||
result = runner.invoke(cli, ['config', 'set', '--key', 'api_key', '--value', 'test'])
|
||||
assert result.exit_code == 0
|
||||
|
||||
def test_multiple_options(self, runner):
|
||||
"""Test multiple options"""
|
||||
result = runner.invoke(
|
||||
cli,
|
||||
['deploy', 'production', '--verbose', '--dry-run', '--timeout', '60']
|
||||
)
|
||||
assert result.exit_code == 0
|
||||
```
|
||||
|
||||
## Testing Interactive Prompts
|
||||
|
||||
```python
|
||||
class TestInteractivePrompts:
|
||||
"""Test interactive prompt handling"""
|
||||
|
||||
def test_simple_prompt(self, runner):
|
||||
"""Test simple text prompt"""
|
||||
result = runner.invoke(cli, ['init'], input='my-project\n')
|
||||
assert result.exit_code == 0
|
||||
assert 'my-project' in result.output
|
||||
|
||||
def test_confirmation_prompt(self, runner):
|
||||
"""Test confirmation prompt (yes)"""
|
||||
result = runner.invoke(cli, ['delete', 'resource-id'], input='y\n')
|
||||
assert result.exit_code == 0
|
||||
assert 'deleted' in result.output.lower()
|
||||
|
||||
def test_confirmation_prompt_no(self, runner):
|
||||
"""Test confirmation prompt (no)"""
|
||||
result = runner.invoke(cli, ['delete', 'resource-id'], input='n\n')
|
||||
assert result.exit_code == 1
|
||||
assert 'cancelled' in result.output.lower()
|
||||
|
||||
def test_multiple_prompts(self, runner):
|
||||
"""Test multiple prompts in sequence"""
|
||||
inputs = 'my-project\nJohn Doe\njohn@example.com\n'
|
||||
result = runner.invoke(cli, ['init', '--interactive'], input=inputs)
|
||||
assert result.exit_code == 0
|
||||
assert 'my-project' in result.output
|
||||
assert 'John Doe' in result.output
|
||||
|
||||
def test_choice_prompt(self, runner):
|
||||
"""Test choice prompt"""
|
||||
result = runner.invoke(cli, ['deploy'], input='1\n') # Select option 1
|
||||
assert result.exit_code == 0
|
||||
```
|
||||
|
||||
## Testing with Isolated Filesystem
|
||||
|
||||
```python
|
||||
class TestFileOperations:
|
||||
"""Test file operations with isolated filesystem"""
|
||||
|
||||
def test_create_file(self, runner):
|
||||
"""Test file creation"""
|
||||
with runner.isolated_filesystem():
|
||||
result = runner.invoke(cli, ['init', 'test-project'])
|
||||
assert result.exit_code == 0
|
||||
|
||||
import os
|
||||
assert os.path.exists('test-project')
|
||||
|
||||
def test_read_file(self, runner):
|
||||
"""Test reading from file"""
|
||||
with runner.isolated_filesystem():
|
||||
# Create test file
|
||||
with open('input.txt', 'w') as f:
|
||||
f.write('test data')
|
||||
|
||||
result = runner.invoke(cli, ['process', '--input', 'input.txt'])
|
||||
assert result.exit_code == 0
|
||||
|
||||
def test_write_file(self, runner):
|
||||
"""Test writing to file"""
|
||||
with runner.isolated_filesystem():
|
||||
result = runner.invoke(cli, ['export', '--output', 'output.txt'])
|
||||
assert result.exit_code == 0
|
||||
|
||||
import os
|
||||
assert os.path.exists('output.txt')
|
||||
with open('output.txt', 'r') as f:
|
||||
content = f.read()
|
||||
assert len(content) > 0
|
||||
```
|
||||
|
||||
## Testing Environment Variables
|
||||
|
||||
```python
|
||||
class TestEnvironmentVariables:
|
||||
"""Test environment variable handling"""
|
||||
|
||||
def test_with_env_var(self, runner):
|
||||
"""Test command with environment variable"""
|
||||
result = runner.invoke(
|
||||
cli,
|
||||
['status'],
|
||||
env={'API_KEY': 'test_key_123'}
|
||||
)
|
||||
assert result.exit_code == 0
|
||||
|
||||
def test_without_env_var(self, runner):
|
||||
"""Test command without required environment variable"""
|
||||
result = runner.invoke(cli, ['status'])
|
||||
# Assuming API_KEY is required
|
||||
if 'API_KEY' not in result.output:
|
||||
assert result.exit_code != 0
|
||||
|
||||
def test_env_var_override(self, runner, monkeypatch):
|
||||
"""Test environment variable override"""
|
||||
monkeypatch.setenv('API_KEY', 'overridden_key')
|
||||
result = runner.invoke(cli, ['status'])
|
||||
assert result.exit_code == 0
|
||||
```
|
||||
|
||||
## Testing Output Formats
|
||||
|
||||
```python
|
||||
class TestOutputFormats:
|
||||
"""Test different output formats"""
|
||||
|
||||
def test_json_output(self, runner):
|
||||
"""Test JSON output format"""
|
||||
result = runner.invoke(cli, ['status', '--format', 'json'])
|
||||
assert result.exit_code == 0
|
||||
|
||||
import json
|
||||
try:
|
||||
data = json.loads(result.output)
|
||||
assert isinstance(data, dict)
|
||||
except json.JSONDecodeError:
|
||||
pytest.fail("Output is not valid JSON")
|
||||
|
||||
def test_yaml_output(self, runner):
|
||||
"""Test YAML output format"""
|
||||
result = runner.invoke(cli, ['status', '--format', 'yaml'])
|
||||
assert result.exit_code == 0
|
||||
assert ':' in result.output
|
||||
|
||||
def test_table_output(self, runner):
|
||||
"""Test table output format"""
|
||||
result = runner.invoke(cli, ['list'])
|
||||
assert result.exit_code == 0
|
||||
assert '│' in result.output or '|' in result.output
|
||||
```
|
||||
|
||||
## Testing Exit Codes
|
||||
|
||||
```python
|
||||
class TestExitCodes:
|
||||
"""Test exit codes"""
|
||||
|
||||
def test_success_exit_code(self, runner):
|
||||
"""Test success returns 0"""
|
||||
result = runner.invoke(cli, ['status'])
|
||||
assert result.exit_code == 0
|
||||
|
||||
def test_error_exit_code(self, runner):
|
||||
"""Test error returns non-zero"""
|
||||
result = runner.invoke(cli, ['invalid-command'])
|
||||
assert result.exit_code != 0
|
||||
|
||||
def test_validation_error_exit_code(self, runner):
|
||||
"""Test validation error returns 2"""
|
||||
result = runner.invoke(cli, ['deploy', '--invalid-option'])
|
||||
assert result.exit_code == 2 # Click uses 2 for usage errors
|
||||
|
||||
def test_exception_exit_code(self, runner):
|
||||
"""Test uncaught exception returns 1"""
|
||||
result = runner.invoke(cli, ['command-that-throws'])
|
||||
assert result.exit_code == 1
|
||||
```
|
||||
|
||||
## Testing with Fixtures
|
||||
|
||||
```python
|
||||
@pytest.fixture
|
||||
def sample_config(tmp_path):
|
||||
"""Create sample config file"""
|
||||
config_file = tmp_path / '.myclirc'
|
||||
config_file.write_text('''
|
||||
api_key: your_test_key_here
|
||||
environment: development
|
||||
verbose: false
|
||||
''')
|
||||
return config_file
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_api(monkeypatch):
|
||||
"""Mock external API calls"""
|
||||
class MockAPI:
|
||||
def __init__(self):
|
||||
self.calls = []
|
||||
|
||||
def get(self, endpoint):
|
||||
self.calls.append(('GET', endpoint))
|
||||
return {'status': 'success'}
|
||||
|
||||
mock = MockAPI()
|
||||
monkeypatch.setattr('mycli.api.client', mock)
|
||||
return mock
|
||||
|
||||
|
||||
class TestWithFixtures:
|
||||
"""Test using fixtures"""
|
||||
|
||||
def test_with_config_file(self, runner, sample_config):
|
||||
"""Test with config file"""
|
||||
result = runner.invoke(
|
||||
cli,
|
||||
['status', '--config', str(sample_config)]
|
||||
)
|
||||
assert result.exit_code == 0
|
||||
|
||||
def test_with_mock_api(self, runner, mock_api):
|
||||
"""Test with mocked API"""
|
||||
result = runner.invoke(cli, ['deploy', 'production'])
|
||||
assert result.exit_code == 0
|
||||
assert len(mock_api.calls) > 0
|
||||
```
|
||||
|
||||
## Testing Error Handling
|
||||
|
||||
```python
|
||||
class TestErrorHandling:
|
||||
"""Test error handling"""
|
||||
|
||||
def test_network_error(self, runner, monkeypatch):
|
||||
"""Test network error handling"""
|
||||
def mock_request(*args, **kwargs):
|
||||
raise ConnectionError("Network unreachable")
|
||||
|
||||
monkeypatch.setattr('requests.get', mock_request)
|
||||
result = runner.invoke(cli, ['status'])
|
||||
assert result.exit_code != 0
|
||||
assert 'network' in result.output.lower()
|
||||
|
||||
def test_file_not_found(self, runner):
|
||||
"""Test file not found error"""
|
||||
result = runner.invoke(cli, ['process', '--input', 'nonexistent.txt'])
|
||||
assert result.exit_code != 0
|
||||
assert 'not found' in result.output.lower()
|
||||
|
||||
def test_invalid_json(self, runner):
|
||||
"""Test invalid JSON handling"""
|
||||
with runner.isolated_filesystem():
|
||||
with open('config.json', 'w') as f:
|
||||
f.write('invalid json {[}')
|
||||
|
||||
result = runner.invoke(cli, ['config', 'load', 'config.json'])
|
||||
assert result.exit_code != 0
|
||||
assert 'invalid' in result.output.lower()
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Use Fixtures**: Share common setup across tests
|
||||
2. **Isolated Filesystem**: Use `runner.isolated_filesystem()` for file operations
|
||||
3. **Test Exit Codes**: Always check exit codes
|
||||
4. **Clear Test Names**: Use descriptive test method names
|
||||
5. **Test Edge Cases**: Test boundary conditions and error cases
|
||||
6. **Mock External Dependencies**: Don't make real API calls
|
||||
7. **Use Markers**: Mark tests as unit, integration, slow, etc.
|
||||
|
||||
## Resources
|
||||
|
||||
- [Click Testing Documentation](https://click.palletsprojects.com/en/8.1.x/testing/)
|
||||
- [Pytest Documentation](https://docs.pytest.org/)
|
||||
- [CliRunner API](https://click.palletsprojects.com/en/8.1.x/api/#click.testing.CliRunner)
|
||||
Reference in New Issue
Block a user