Initial commit
This commit is contained in:
410
skills/testing-r-packages/references/advanced.md
Normal file
410
skills/testing-r-packages/references/advanced.md
Normal file
@@ -0,0 +1,410 @@
|
||||
# Advanced Testing Topics
|
||||
|
||||
## Skipping Tests
|
||||
|
||||
Skip tests conditionally when requirements aren't met:
|
||||
|
||||
### Built-in Skip Functions
|
||||
|
||||
```r
|
||||
test_that("API integration works", {
|
||||
skip_if_offline()
|
||||
skip_if_not_installed("httr2")
|
||||
skip_on_cran()
|
||||
skip_on_os("windows")
|
||||
|
||||
result <- call_external_api()
|
||||
expect_true(result$success)
|
||||
})
|
||||
```
|
||||
|
||||
**Common skip functions:**
|
||||
- `skip()` - Skip unconditionally with message
|
||||
- `skip_if()` - Skip if condition is TRUE
|
||||
- `skip_if_not()` - Skip if condition is FALSE
|
||||
- `skip_if_offline()` - Skip if no internet
|
||||
- `skip_if_not_installed(pkg)` - Skip if package unavailable
|
||||
- `skip_on_cran()` - Skip on CRAN checks
|
||||
- `skip_on_os(os)` - Skip on specific OS
|
||||
- `skip_on_ci()` - Skip on continuous integration
|
||||
- `skip_unless_r(version)` - Skip unless R version requirement met (testthat 3.3.0+)
|
||||
|
||||
### Custom Skip Conditions
|
||||
|
||||
```r
|
||||
skip_if_no_api_key <- function() {
|
||||
if (Sys.getenv("API_KEY") == "") {
|
||||
skip("API_KEY not available")
|
||||
}
|
||||
}
|
||||
|
||||
skip_if_slow <- function() {
|
||||
if (!identical(Sys.getenv("RUN_SLOW_TESTS"), "true")) {
|
||||
skip("Slow tests not enabled")
|
||||
}
|
||||
}
|
||||
|
||||
test_that("authenticated endpoint works", {
|
||||
skip_if_no_api_key()
|
||||
|
||||
result <- call_authenticated_endpoint()
|
||||
expect_equal(result$status, "success")
|
||||
})
|
||||
```
|
||||
|
||||
## Testing Flaky Code
|
||||
|
||||
### retry with `try_again()`
|
||||
|
||||
Test code that may fail occasionally (network calls, timing-dependent code):
|
||||
|
||||
```r
|
||||
test_that("flaky network call succeeds eventually", {
|
||||
result <- try_again(
|
||||
times = 3,
|
||||
{
|
||||
response <- make_network_request()
|
||||
expect_equal(response$status, 200)
|
||||
response
|
||||
}
|
||||
)
|
||||
|
||||
expect_type(result, "list")
|
||||
})
|
||||
```
|
||||
|
||||
### Mark Tests as Flaky
|
||||
|
||||
```r
|
||||
test_that("timing-sensitive operation", {
|
||||
skip_on_cran() # Too unreliable for CRAN
|
||||
|
||||
start <- Sys.time()
|
||||
result <- async_operation()
|
||||
duration <- as.numeric(Sys.time() - start)
|
||||
|
||||
expect_lt(duration, 2) # Should complete in < 2 seconds
|
||||
})
|
||||
```
|
||||
|
||||
## Managing Secrets in Tests
|
||||
|
||||
### Environment Variables
|
||||
|
||||
```r
|
||||
test_that("authenticated API works", {
|
||||
# Skip if credentials unavailable
|
||||
api_key <- Sys.getenv("MY_API_KEY")
|
||||
skip_if(api_key == "", "MY_API_KEY not set")
|
||||
|
||||
result <- call_api(api_key)
|
||||
expect_true(result$authenticated)
|
||||
})
|
||||
```
|
||||
|
||||
### Local Configuration Files
|
||||
|
||||
```r
|
||||
test_that("service integration works", {
|
||||
config_path <- test_path("fixtures", "test_config.yml")
|
||||
skip_if_not(file.exists(config_path), "Test config not found")
|
||||
|
||||
config <- yaml::read_yaml(config_path)
|
||||
result <- connect_to_service(config)
|
||||
expect_true(result$connected)
|
||||
})
|
||||
```
|
||||
|
||||
**Never commit secrets:**
|
||||
- Add config files with secrets to `.gitignore`
|
||||
- Use environment variables in CI/CD
|
||||
- Provide example config files: `test_config.yml.example`
|
||||
|
||||
### Testing Without Secrets
|
||||
|
||||
Design tests to degrade gracefully:
|
||||
|
||||
```r
|
||||
test_that("API client works", {
|
||||
api_key <- Sys.getenv("API_KEY")
|
||||
|
||||
if (api_key == "") {
|
||||
# Mock the API when credentials unavailable
|
||||
local_mocked_bindings(
|
||||
make_api_call = function(...) list(status = "success", data = "mocked")
|
||||
)
|
||||
}
|
||||
|
||||
result <- my_api_wrapper()
|
||||
expect_equal(result$status, "success")
|
||||
})
|
||||
```
|
||||
|
||||
## Custom Expectations
|
||||
|
||||
Create domain-specific expectations for clearer tests:
|
||||
|
||||
### Simple Custom Expectations
|
||||
|
||||
```r
|
||||
# In helper-expectations.R
|
||||
expect_valid_email <- function(email) {
|
||||
expect_match(email, "^[^@]+@[^@]+\\.[^@]+$")
|
||||
}
|
||||
|
||||
expect_positive <- function(x) {
|
||||
expect_true(all(x > 0), info = "All values should be positive")
|
||||
}
|
||||
|
||||
expect_named_list <- function(object, names) {
|
||||
expect_type(object, "list")
|
||||
expect_named(object, names, ignore.order = TRUE)
|
||||
}
|
||||
```
|
||||
|
||||
Usage:
|
||||
|
||||
```r
|
||||
test_that("user validation works", {
|
||||
user <- create_user("test@example.com")
|
||||
expect_valid_email(user$email)
|
||||
})
|
||||
```
|
||||
|
||||
### Complex Custom Expectations
|
||||
|
||||
```r
|
||||
expect_valid_model <- function(model) {
|
||||
act <- quasi_label(rlang::enquo(model))
|
||||
|
||||
expect(
|
||||
inherits(act$val, "lm") && !is.null(act$val$coefficients),
|
||||
sprintf("%s is not a valid linear model", act$lab)
|
||||
)
|
||||
|
||||
invisible(act$val)
|
||||
}
|
||||
```
|
||||
|
||||
## State Inspection
|
||||
|
||||
Detect unintended global state changes:
|
||||
|
||||
```r
|
||||
# In setup-state.R
|
||||
set_state_inspector(function() {
|
||||
list(
|
||||
options = options(),
|
||||
env_vars = Sys.getenv(),
|
||||
search = search()
|
||||
)
|
||||
})
|
||||
```
|
||||
|
||||
testthat will warn if state changes between tests.
|
||||
|
||||
## CRAN-Specific Considerations
|
||||
|
||||
### Time Limits
|
||||
|
||||
Tests must complete in under 1 minute:
|
||||
|
||||
```r
|
||||
test_that("slow operation completes", {
|
||||
skip_on_cran() # Takes 2 minutes
|
||||
|
||||
result <- expensive_computation()
|
||||
expect_equal(result$status, "complete")
|
||||
})
|
||||
```
|
||||
|
||||
### File System Discipline
|
||||
|
||||
Only write to temp directory:
|
||||
|
||||
```r
|
||||
test_that("file output works", {
|
||||
# Good
|
||||
output <- withr::local_tempfile(fileext = ".csv")
|
||||
write.csv(data, output)
|
||||
|
||||
# Bad - writes to package directory
|
||||
# write.csv(data, "output.csv")
|
||||
})
|
||||
```
|
||||
|
||||
### No External Dependencies
|
||||
|
||||
Avoid relying on:
|
||||
- Network access
|
||||
- External processes
|
||||
- System commands
|
||||
- Clipboard access
|
||||
|
||||
```r
|
||||
test_that("external dependency", {
|
||||
skip_on_cran()
|
||||
|
||||
# Code requiring network or system calls
|
||||
})
|
||||
```
|
||||
|
||||
### Platform Differences
|
||||
|
||||
Use `expect_equal()` for numeric comparisons (allows tolerance):
|
||||
|
||||
```r
|
||||
test_that("calculation works", {
|
||||
result <- complex_calculation()
|
||||
|
||||
# Good: tolerant to floating point differences
|
||||
expect_equal(result, 1.234567)
|
||||
|
||||
# Bad: fails due to platform differences
|
||||
# expect_identical(result, 1.234567)
|
||||
})
|
||||
```
|
||||
|
||||
## Test Performance
|
||||
|
||||
### Identify Slow Tests
|
||||
|
||||
```r
|
||||
devtools::test(reporter = "slow")
|
||||
```
|
||||
|
||||
The `SlowReporter` highlights performance bottlenecks.
|
||||
|
||||
### Test Shuffling
|
||||
|
||||
Detect unintended test dependencies:
|
||||
|
||||
```r
|
||||
# Randomly reorder tests
|
||||
devtools::test(shuffle = TRUE)
|
||||
|
||||
# In test file
|
||||
test_dir("tests/testthat", shuffle = TRUE)
|
||||
```
|
||||
|
||||
If tests fail when shuffled, they have unintended dependencies on execution order.
|
||||
|
||||
## Parallel Testing
|
||||
|
||||
Enable parallel test execution in `DESCRIPTION`:
|
||||
|
||||
```
|
||||
Config/testthat/parallel: true
|
||||
```
|
||||
|
||||
**Requirements for parallel tests:**
|
||||
- Tests must be independent
|
||||
- No shared state between tests
|
||||
- Use `local_*()` functions for all side effects
|
||||
- Snapshot tests work correctly in parallel (testthat 3.2.0+)
|
||||
|
||||
## Testing Edge Cases
|
||||
|
||||
### Boundary Conditions
|
||||
|
||||
```r
|
||||
test_that("handles boundary conditions", {
|
||||
expect_equal(my_func(0), expected_at_zero)
|
||||
expect_equal(my_func(-1), expected_negative)
|
||||
expect_equal(my_func(Inf), expected_infinite)
|
||||
expect_true(is.nan(my_func(NaN)))
|
||||
})
|
||||
```
|
||||
|
||||
### Empty Inputs
|
||||
|
||||
```r
|
||||
test_that("handles empty inputs", {
|
||||
expect_equal(process(character()), character())
|
||||
expect_equal(process(NULL), NULL)
|
||||
expect_equal(process(data.frame()), data.frame())
|
||||
})
|
||||
```
|
||||
|
||||
### Type Validation
|
||||
|
||||
```r
|
||||
test_that("validates input types", {
|
||||
expect_error(my_func("string"), class = "vctrs_error_cast")
|
||||
expect_error(my_func(list()), "must be atomic")
|
||||
expect_no_error(my_func(1:10))
|
||||
})
|
||||
```
|
||||
|
||||
## Debugging Failed Tests
|
||||
|
||||
### Interactive Debugging
|
||||
|
||||
```r
|
||||
# Run test interactively
|
||||
devtools::load_all()
|
||||
test_that("problematic test", {
|
||||
# Add browser() to pause execution
|
||||
browser()
|
||||
|
||||
result <- problematic_function()
|
||||
expect_equal(result, expected)
|
||||
})
|
||||
```
|
||||
|
||||
### Print Debugging in Tests
|
||||
|
||||
```r
|
||||
test_that("debug output", {
|
||||
data <- prepare_data()
|
||||
print(str(data)) # Visible when test fails
|
||||
|
||||
result <- process(data)
|
||||
print(result)
|
||||
|
||||
expect_equal(result, expected)
|
||||
})
|
||||
```
|
||||
|
||||
### Capture Output for Inspection
|
||||
|
||||
```r
|
||||
test_that("inspect messages", {
|
||||
messages <- capture_messages(
|
||||
result <- function_with_messages()
|
||||
)
|
||||
|
||||
print(messages) # See all messages when test fails
|
||||
expect_match(messages, "Processing complete")
|
||||
})
|
||||
```
|
||||
|
||||
## Testing R6 Classes
|
||||
|
||||
```r
|
||||
test_that("R6 class works", {
|
||||
obj <- MyClass$new(value = 10)
|
||||
|
||||
expect_r6_class(obj, "MyClass") # testthat 3.3.0+
|
||||
expect_equal(obj$value, 10)
|
||||
|
||||
obj$increment()
|
||||
expect_equal(obj$value, 11)
|
||||
})
|
||||
```
|
||||
|
||||
## Testing S4 Classes
|
||||
|
||||
```r
|
||||
test_that("S4 validity works", {
|
||||
obj <- new("MyClass", slot1 = 10, slot2 = "test")
|
||||
|
||||
expect_s4_class(obj, "MyClass")
|
||||
expect_equal(obj@slot1, 10)
|
||||
|
||||
expect_error(
|
||||
new("MyClass", slot1 = -1),
|
||||
"slot1 must be positive"
|
||||
)
|
||||
})
|
||||
```
|
||||
448
skills/testing-r-packages/references/bdd.md
Normal file
448
skills/testing-r-packages/references/bdd.md
Normal file
@@ -0,0 +1,448 @@
|
||||
# BDD-Style Testing with describe() and it()
|
||||
|
||||
Behavior-Driven Development (BDD) testing uses `describe()` and `it()` to create specification-style tests that read like natural language descriptions of behavior.
|
||||
|
||||
## When to Use BDD Syntax
|
||||
|
||||
**Use BDD (`describe`/`it`) when:**
|
||||
- Documenting intended behavior of new features
|
||||
- Testing complex components with multiple related facets
|
||||
- Following test-first development workflows
|
||||
- Tests serve as specification documentation
|
||||
- You want hierarchical organization of related tests
|
||||
- A group of tests (in `it()` statements) rely on a single fixture or local options/envvars (set up in `describe()`)
|
||||
|
||||
**Use standard syntax (`test_that`) when:**
|
||||
- Writing straightforward unit tests
|
||||
- Testing implementation details
|
||||
- Converting existing test_that() tests (no need to change working code)
|
||||
|
||||
**Key insight from testthat:** "Use `describe()` to verify you implement the right things, use `test_that()` to ensure you do things right."
|
||||
|
||||
## Basic BDD Syntax
|
||||
|
||||
### Simple Specifications
|
||||
|
||||
```r
|
||||
describe("matrix()", {
|
||||
it("can be multiplied by a scalar", {
|
||||
m1 <- matrix(1:4, 2, 2)
|
||||
m2 <- m1 * 2
|
||||
expect_equal(matrix(1:4 * 2, 2, 2), m2)
|
||||
})
|
||||
|
||||
it("can be transposed", {
|
||||
m <- matrix(1:4, 2, 2)
|
||||
expect_equal(t(m), matrix(c(1, 3, 2, 4), 2, 2))
|
||||
})
|
||||
|
||||
it("can compute determinant", {
|
||||
m <- matrix(c(1, 2, 3, 4), 2, 2)
|
||||
expect_equal(det(m), -2)
|
||||
})
|
||||
})
|
||||
```
|
||||
|
||||
Each `it()` block:
|
||||
- Defines one specification (like `test_that()`)
|
||||
- Runs in its own environment
|
||||
- Has access to all expectations
|
||||
- Can use withr and other testing tools
|
||||
|
||||
## Nested Specifications
|
||||
|
||||
Group related specifications hierarchically:
|
||||
|
||||
```r
|
||||
describe("User authentication", {
|
||||
describe("login()", {
|
||||
it("accepts valid credentials", {
|
||||
result <- login("user@example.com", "password123")
|
||||
expect_true(result$authenticated)
|
||||
expect_type(result$token, "character")
|
||||
})
|
||||
|
||||
it("rejects invalid email", {
|
||||
expect_error(
|
||||
login("invalid-email", "password"),
|
||||
class = "validation_error"
|
||||
)
|
||||
})
|
||||
|
||||
it("rejects wrong password", {
|
||||
expect_error(
|
||||
login("user@example.com", "wrong"),
|
||||
class = "auth_error"
|
||||
)
|
||||
})
|
||||
})
|
||||
|
||||
describe("logout()", {
|
||||
it("clears session token", {
|
||||
session <- create_session()
|
||||
logout(session)
|
||||
expect_null(session$token)
|
||||
})
|
||||
|
||||
it("invalidates refresh token", {
|
||||
session <- create_session()
|
||||
logout(session)
|
||||
expect_error(refresh(session), "Invalid token")
|
||||
})
|
||||
})
|
||||
|
||||
describe("password_reset()", {
|
||||
it("sends reset email", {
|
||||
local_mocked_bindings(send_email = function(...) TRUE)
|
||||
result <- password_reset("user@example.com")
|
||||
expect_true(result$email_sent)
|
||||
})
|
||||
|
||||
it("generates secure token", {
|
||||
result <- password_reset("user@example.com")
|
||||
expect_gte(nchar(result$token), 32)
|
||||
})
|
||||
})
|
||||
})
|
||||
```
|
||||
|
||||
Nesting creates clear hierarchies:
|
||||
- Top level: Component or module
|
||||
- Second level: Functions or features
|
||||
- Third level: Specific behaviors
|
||||
|
||||
## Pending Specifications
|
||||
|
||||
Mark unimplemented tests by omitting the code:
|
||||
|
||||
```r
|
||||
describe("division()", {
|
||||
it("divides two numbers", {
|
||||
expect_equal(division(10, 2), 5)
|
||||
})
|
||||
|
||||
it("returns Inf for division by zero") # Pending
|
||||
|
||||
it("handles complex numbers") # Pending
|
||||
})
|
||||
```
|
||||
|
||||
Pending tests:
|
||||
- Show up in test output as "SKIPPED"
|
||||
- Document planned functionality
|
||||
- Serve as TODO markers
|
||||
- Don't cause test failures
|
||||
|
||||
## Complete Test File Example
|
||||
|
||||
```r
|
||||
# tests/testthat/test-data-processor.R
|
||||
|
||||
describe("DataProcessor", {
|
||||
describe("initialization", {
|
||||
it("creates processor with default config", {
|
||||
proc <- DataProcessor$new()
|
||||
expect_r6_class(proc, "DataProcessor")
|
||||
expect_equal(proc$config$timeout, 30)
|
||||
})
|
||||
|
||||
it("accepts custom configuration", {
|
||||
proc <- DataProcessor$new(config = list(timeout = 60))
|
||||
expect_equal(proc$config$timeout, 60)
|
||||
})
|
||||
|
||||
it("validates configuration options", {
|
||||
expect_error(
|
||||
DataProcessor$new(config = list(timeout = -1)),
|
||||
"timeout must be positive"
|
||||
)
|
||||
})
|
||||
})
|
||||
|
||||
describe("process()", {
|
||||
describe("with valid data", {
|
||||
it("processes numeric data", {
|
||||
proc <- DataProcessor$new()
|
||||
result <- proc$process(data.frame(x = 1:10))
|
||||
expect_s3_class(result, "data.frame")
|
||||
expect_equal(nrow(result), 10)
|
||||
})
|
||||
|
||||
it("handles missing values", {
|
||||
proc <- DataProcessor$new()
|
||||
data <- data.frame(x = c(1, NA, 3))
|
||||
result <- proc$process(data)
|
||||
expect_false(anyNA(result$x))
|
||||
})
|
||||
|
||||
it("preserves column names", {
|
||||
proc <- DataProcessor$new()
|
||||
data <- data.frame(foo = 1:3, bar = 4:6)
|
||||
result <- proc$process(data)
|
||||
expect_named(result, c("foo", "bar"))
|
||||
})
|
||||
})
|
||||
|
||||
describe("with invalid data", {
|
||||
it("rejects NULL input", {
|
||||
proc <- DataProcessor$new()
|
||||
expect_error(proc$process(NULL), "data cannot be NULL")
|
||||
})
|
||||
|
||||
it("rejects empty data frame", {
|
||||
proc <- DataProcessor$new()
|
||||
expect_error(proc$process(data.frame()), "data cannot be empty")
|
||||
})
|
||||
|
||||
it("rejects non-data.frame input", {
|
||||
proc <- DataProcessor$new()
|
||||
expect_error(proc$process(list()), class = "type_error")
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe("summary()", {
|
||||
it("returns summary statistics", {
|
||||
proc <- DataProcessor$new()
|
||||
data <- data.frame(x = 1:10, y = 11:20)
|
||||
proc$process(data)
|
||||
|
||||
summary <- proc$summary()
|
||||
expect_type(summary, "list")
|
||||
expect_named(summary, c("rows", "cols", "processed_at"))
|
||||
})
|
||||
|
||||
it("throws error if no data processed", {
|
||||
proc <- DataProcessor$new()
|
||||
expect_error(proc$summary(), "No data has been processed")
|
||||
})
|
||||
})
|
||||
})
|
||||
```
|
||||
|
||||
## Organizing Files with BDD
|
||||
|
||||
### Single Component per File
|
||||
|
||||
```r
|
||||
# tests/testthat/test-user-model.R
|
||||
describe("User model", {
|
||||
describe("validation", { ... })
|
||||
describe("persistence", { ... })
|
||||
describe("relationships", { ... })
|
||||
})
|
||||
```
|
||||
|
||||
### Multiple Related Components
|
||||
|
||||
```r
|
||||
# tests/testthat/test-math-operations.R
|
||||
describe("arithmetic operations", {
|
||||
describe("addition()", { ... })
|
||||
describe("subtraction()", { ... })
|
||||
describe("multiplication()", { ... })
|
||||
describe("division()", { ... })
|
||||
})
|
||||
```
|
||||
|
||||
### Hierarchical Domain Organization
|
||||
|
||||
```r
|
||||
# tests/testthat/test-api-endpoints.R
|
||||
describe("API endpoints", {
|
||||
describe("/users", {
|
||||
describe("GET /users", { ... })
|
||||
describe("POST /users", { ... })
|
||||
describe("GET /users/:id", { ... })
|
||||
})
|
||||
|
||||
describe("/posts", {
|
||||
describe("GET /posts", { ... })
|
||||
describe("POST /posts", { ... })
|
||||
})
|
||||
})
|
||||
```
|
||||
|
||||
## Mixing BDD and Standard Syntax
|
||||
|
||||
You can use both styles in the same test file:
|
||||
|
||||
```r
|
||||
# tests/testthat/test-calculator.R
|
||||
|
||||
# BDD style for user-facing functionality
|
||||
describe("Calculator user interface", {
|
||||
describe("button clicks", {
|
||||
it("registers numeric input", { ... })
|
||||
it("handles operator keys", { ... })
|
||||
})
|
||||
})
|
||||
|
||||
# Standard style for internal helpers
|
||||
test_that("parse_expression() tokenizes correctly", {
|
||||
tokens <- parse_expression("2 + 3")
|
||||
expect_equal(tokens, c("2", "+", "3"))
|
||||
})
|
||||
|
||||
test_that("evaluate_tokens() handles operator precedence", {
|
||||
result <- evaluate_tokens(c("2", "+", "3", "*", "4"))
|
||||
expect_equal(result, 14)
|
||||
})
|
||||
```
|
||||
|
||||
**Mixing guidelines:**
|
||||
- Use BDD for behavioral specifications
|
||||
- Use `test_that()` for implementation details
|
||||
- Keep related tests in the same style within a section
|
||||
- Don't nest `test_that()` inside `describe()` or vice versa
|
||||
|
||||
## BDD with Test Fixtures
|
||||
|
||||
Use the same fixture patterns as standard tests:
|
||||
|
||||
```r
|
||||
describe("File processor", {
|
||||
# Helper function for tests
|
||||
new_test_file <- function(content) {
|
||||
path <- withr::local_tempfile(lines = content)
|
||||
path
|
||||
}
|
||||
|
||||
describe("read_file()", {
|
||||
it("reads text files", {
|
||||
file <- new_test_file(c("line1", "line2"))
|
||||
result <- read_file(file)
|
||||
expect_length(result, 2)
|
||||
})
|
||||
|
||||
it("handles empty files", {
|
||||
file <- new_test_file(character())
|
||||
result <- read_file(file)
|
||||
expect_equal(result, character())
|
||||
})
|
||||
})
|
||||
})
|
||||
```
|
||||
|
||||
## BDD with Snapshot Tests
|
||||
|
||||
Snapshots work naturally with BDD:
|
||||
|
||||
```r
|
||||
describe("error messages", {
|
||||
it("provides helpful validation errors", {
|
||||
expect_snapshot(error = TRUE, {
|
||||
validate_user(NULL)
|
||||
validate_user(list())
|
||||
validate_user(list(email = "invalid"))
|
||||
})
|
||||
})
|
||||
|
||||
it("shows clear permission errors", {
|
||||
expect_snapshot(error = TRUE, {
|
||||
check_permission("guest", "admin")
|
||||
})
|
||||
})
|
||||
})
|
||||
```
|
||||
|
||||
## BDD with Mocking
|
||||
|
||||
```r
|
||||
describe("API client", {
|
||||
describe("fetch_user()", {
|
||||
it("handles successful response", {
|
||||
local_mocked_bindings(
|
||||
http_get = function(url) {
|
||||
list(status = 200, body = '{"id": 1, "name": "Test"}')
|
||||
}
|
||||
)
|
||||
|
||||
user <- fetch_user(1)
|
||||
expect_equal(user$name, "Test")
|
||||
})
|
||||
|
||||
it("handles 404 errors", {
|
||||
local_mocked_bindings(
|
||||
http_get = function(url) list(status = 404)
|
||||
)
|
||||
|
||||
expect_error(fetch_user(999), class = "not_found_error")
|
||||
})
|
||||
})
|
||||
})
|
||||
```
|
||||
|
||||
## Test-First Workflow with BDD
|
||||
|
||||
1. **Write specifications first:**
|
||||
|
||||
```r
|
||||
describe("order_total()", {
|
||||
it("sums item prices")
|
||||
it("applies tax")
|
||||
it("applies discount codes")
|
||||
it("handles free shipping threshold")
|
||||
})
|
||||
```
|
||||
|
||||
2. **Implement one specification at a time:**
|
||||
|
||||
```r
|
||||
describe("order_total()", {
|
||||
it("sums item prices", {
|
||||
order <- list(items = list(
|
||||
list(price = 10),
|
||||
list(price = 20)
|
||||
))
|
||||
expect_equal(order_total(order), 30)
|
||||
})
|
||||
|
||||
it("applies tax")
|
||||
it("applies discount codes")
|
||||
it("handles free shipping threshold")
|
||||
})
|
||||
```
|
||||
|
||||
3. **Continue until all specs have implementations**
|
||||
|
||||
This workflow ensures you:
|
||||
- Think about requirements before implementation
|
||||
- Have clear success criteria
|
||||
- Build incrementally
|
||||
- Maintain focus on behavior
|
||||
|
||||
## Comparison: describe/it vs test_that
|
||||
|
||||
**describe/it:**
|
||||
```r
|
||||
describe("str_length()", {
|
||||
it("counts characters in string", {
|
||||
expect_equal(str_length("abc"), 3)
|
||||
})
|
||||
|
||||
it("handles empty strings", {
|
||||
expect_equal(str_length(""), 0)
|
||||
})
|
||||
})
|
||||
```
|
||||
|
||||
**test_that:**
|
||||
```r
|
||||
test_that("str_length() counts characters", {
|
||||
expect_equal(str_length("abc"), 3)
|
||||
})
|
||||
|
||||
test_that("str_length() handles empty strings", {
|
||||
expect_equal(str_length(""), 0)
|
||||
})
|
||||
```
|
||||
|
||||
Key differences:
|
||||
- BDD groups related specs under `describe()`
|
||||
- BDD uses "it" instead of "test_that"
|
||||
- BDD enables nesting for hierarchy
|
||||
- BDD supports pending specs without code
|
||||
- Both produce identical test results
|
||||
|
||||
Choose based on your preferences and project style.
|
||||
333
skills/testing-r-packages/references/fixtures.md
Normal file
333
skills/testing-r-packages/references/fixtures.md
Normal file
@@ -0,0 +1,333 @@
|
||||
# Test Fixtures and Data Management
|
||||
|
||||
Test fixtures arrange the environment into a known state for testing. testthat provides several approaches for managing test data and state.
|
||||
|
||||
## Fixture Approaches
|
||||
|
||||
### Constructor Helper Functions
|
||||
|
||||
Create reusable test objects on-demand:
|
||||
|
||||
```r
|
||||
# In tests/testthat/helper-fixtures.R or within test file
|
||||
new_sample_data <- function(n = 10) {
|
||||
data.frame(
|
||||
id = seq_len(n),
|
||||
value = rnorm(n),
|
||||
category = sample(letters[1:3], n, replace = TRUE)
|
||||
)
|
||||
}
|
||||
|
||||
test_that("function handles data correctly", {
|
||||
data <- new_sample_data(5)
|
||||
result <- process_data(data)
|
||||
expect_equal(nrow(result), 5)
|
||||
})
|
||||
```
|
||||
|
||||
**Advantages:**
|
||||
- Fresh data for each test
|
||||
- Parameterizable
|
||||
- No file I/O
|
||||
|
||||
**Use when:**
|
||||
- Data is cheap to create
|
||||
- Multiple tests need similar but not identical data
|
||||
- Data should vary between tests
|
||||
|
||||
### Local Functions with Cleanup
|
||||
|
||||
Handle side effects using `withr::defer()`:
|
||||
|
||||
```r
|
||||
local_temp_csv <- function(data, pattern = "test", env = parent.frame()) {
|
||||
path <- withr::local_tempfile(pattern = pattern, fileext = ".csv", .local_envir = env)
|
||||
write.csv(data, path, row.names = FALSE)
|
||||
path
|
||||
}
|
||||
|
||||
test_that("CSV reading works", {
|
||||
data <- data.frame(x = 1:3, y = letters[1:3])
|
||||
csv_path <- local_temp_csv(data)
|
||||
|
||||
result <- read_my_csv(csv_path)
|
||||
expect_equal(result, data)
|
||||
# File automatically cleaned up after test
|
||||
})
|
||||
```
|
||||
|
||||
**Advantages:**
|
||||
- Automatic cleanup
|
||||
- Encapsulates setup and teardown
|
||||
- Composable
|
||||
|
||||
**Use when:**
|
||||
- Tests create side effects (files, connections)
|
||||
- Setup requires multiple steps
|
||||
- Cleanup logic is non-trivial
|
||||
|
||||
### Static Fixture Files
|
||||
|
||||
Store pre-created data files in `tests/testthat/fixtures/`:
|
||||
|
||||
```
|
||||
tests/testthat/
|
||||
├── fixtures/
|
||||
│ ├── sample_data.rds
|
||||
│ ├── config.json
|
||||
│ └── reference_output.csv
|
||||
└── test-processing.R
|
||||
```
|
||||
|
||||
Access with `test_path()`:
|
||||
|
||||
```r
|
||||
test_that("function processes real data", {
|
||||
data <- readRDS(test_path("fixtures", "sample_data.rds"))
|
||||
result <- process_data(data)
|
||||
|
||||
expected <- readRDS(test_path("fixtures", "expected_output.rds"))
|
||||
expect_equal(result, expected)
|
||||
})
|
||||
```
|
||||
|
||||
**Advantages:**
|
||||
- Tests against real data
|
||||
- Expensive-to-create data computed once
|
||||
- Human-readable (for JSON, CSV, etc.)
|
||||
|
||||
**Use when:**
|
||||
- Data is expensive to create
|
||||
- Data represents real-world cases
|
||||
- Multiple tests use identical data
|
||||
- Data is complex or represents edge cases
|
||||
|
||||
## Helper Files
|
||||
|
||||
Files in `tests/testthat/` starting with `helper-` are automatically sourced before tests run.
|
||||
|
||||
```r
|
||||
# tests/testthat/helper-fixtures.R
|
||||
|
||||
# Custom expectations
|
||||
expect_valid_user <- function(user) {
|
||||
expect_type(user, "list")
|
||||
expect_named(user, c("id", "name", "email"))
|
||||
expect_type(user$id, "integer")
|
||||
}
|
||||
|
||||
# Test data constructors
|
||||
new_user <- function(id = 1L, name = "Test User", email = "test@example.com") {
|
||||
list(id = id, name = name, email = email)
|
||||
}
|
||||
|
||||
# Shared fixtures
|
||||
standard_config <- function() {
|
||||
list(
|
||||
timeout = 30,
|
||||
retries = 3,
|
||||
verbose = FALSE
|
||||
)
|
||||
}
|
||||
```
|
||||
|
||||
Then use in tests:
|
||||
|
||||
```r
|
||||
test_that("user validation works", {
|
||||
user <- new_user()
|
||||
expect_valid_user(user)
|
||||
})
|
||||
```
|
||||
|
||||
## Setup Files
|
||||
|
||||
Files starting with `setup-` run only during `R CMD check` and `devtools::test()`, not during `devtools::load_all()`.
|
||||
|
||||
```r
|
||||
# tests/testthat/setup-options.R
|
||||
|
||||
# Set options for test suite
|
||||
withr::local_options(
|
||||
list(
|
||||
reprex.clipboard = FALSE,
|
||||
reprex.html_preview = FALSE,
|
||||
usethis.quiet = TRUE
|
||||
),
|
||||
.local_envir = teardown_env()
|
||||
)
|
||||
```
|
||||
|
||||
**Use setup files for:**
|
||||
- Package-wide test options
|
||||
- Environment variable configuration
|
||||
- One-time expensive operations
|
||||
- Test suite initialization
|
||||
|
||||
## Managing File System State
|
||||
|
||||
### Use temp directories exclusively
|
||||
|
||||
```r
|
||||
test_that("file writing works", {
|
||||
# Good: write to temp directory
|
||||
path <- withr::local_tempfile(lines = c("line1", "line2"))
|
||||
|
||||
# Bad: write to working directory
|
||||
# writeLines(c("line1", "line2"), "test_file.txt")
|
||||
|
||||
result <- process_file(path)
|
||||
expect_equal(result, expected)
|
||||
})
|
||||
```
|
||||
|
||||
### Clean up automatically with withr
|
||||
|
||||
```r
|
||||
test_that("directory operations work", {
|
||||
# Create temp dir that auto-cleans
|
||||
dir <- withr::local_tempdir()
|
||||
|
||||
# Create files in it
|
||||
file.create(file.path(dir, "file1.txt"))
|
||||
file.create(file.path(dir, "file2.txt"))
|
||||
|
||||
result <- process_directory(dir)
|
||||
expect_length(result, 2)
|
||||
# Directory automatically removed after test
|
||||
})
|
||||
```
|
||||
|
||||
### Test files stored in fixtures
|
||||
|
||||
```r
|
||||
test_that("file parsing handles edge cases", {
|
||||
# Read from committed fixture
|
||||
malformed <- test_path("fixtures", "malformed.csv")
|
||||
|
||||
expect_warning(
|
||||
result <- robust_read_csv(malformed),
|
||||
"Malformed"
|
||||
)
|
||||
expect_true(nrow(result) > 0)
|
||||
})
|
||||
```
|
||||
|
||||
## Database Fixtures
|
||||
|
||||
### In-memory SQLite
|
||||
|
||||
```r
|
||||
test_that("database queries work", {
|
||||
con <- DBI::dbConnect(RSQLite::SQLite(), ":memory:")
|
||||
withr::defer(DBI::dbDisconnect(con))
|
||||
|
||||
# Create schema
|
||||
DBI::dbExecute(con, "CREATE TABLE users (id INTEGER, name TEXT)")
|
||||
DBI::dbExecute(con, "INSERT INTO users VALUES (1, 'Alice'), (2, 'Bob')")
|
||||
|
||||
result <- query_users(con)
|
||||
expect_equal(nrow(result), 2)
|
||||
})
|
||||
```
|
||||
|
||||
### Fixture SQL scripts
|
||||
|
||||
```r
|
||||
test_that("complex queries work", {
|
||||
con <- DBI::dbConnect(RSQLite::SQLite(), ":memory:")
|
||||
withr::defer(DBI::dbDisconnect(con))
|
||||
|
||||
# Load schema from fixture
|
||||
schema <- readLines(test_path("fixtures", "schema.sql"))
|
||||
DBI::dbExecute(con, paste(schema, collapse = "\n"))
|
||||
|
||||
result <- complex_query(con)
|
||||
expect_s3_class(result, "data.frame")
|
||||
})
|
||||
```
|
||||
|
||||
## Complex Object Fixtures
|
||||
|
||||
### Save and load complex objects
|
||||
|
||||
Create fixtures interactively:
|
||||
|
||||
```r
|
||||
# Run once to create fixture
|
||||
complex_model <- expensive_training_function(data)
|
||||
saveRDS(complex_model, "tests/testthat/fixtures/trained_model.rds")
|
||||
```
|
||||
|
||||
Use in tests:
|
||||
|
||||
```r
|
||||
test_that("predictions work", {
|
||||
model <- readRDS(test_path("fixtures", "trained_model.rds"))
|
||||
|
||||
new_data <- data.frame(x = 1:5, y = 6:10)
|
||||
predictions <- predict(model, new_data)
|
||||
|
||||
expect_length(predictions, 5)
|
||||
expect_type(predictions, "double")
|
||||
})
|
||||
```
|
||||
|
||||
## Fixture Organization
|
||||
|
||||
```
|
||||
tests/testthat/
|
||||
├── fixtures/
|
||||
│ ├── data/ # Input data
|
||||
│ │ ├── sample.csv
|
||||
│ │ └── users.json
|
||||
│ ├── expected/ # Expected outputs
|
||||
│ │ ├── processed.rds
|
||||
│ │ └── summary.txt
|
||||
│ ├── models/ # Trained models
|
||||
│ │ └── classifier.rds
|
||||
│ └── sql/ # Database schemas
|
||||
│ └── schema.sql
|
||||
├── helper-constructors.R # Data constructors
|
||||
├── helper-expectations.R # Custom expectations
|
||||
├── setup-options.R # Test suite config
|
||||
└── test-*.R # Test files
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
**Keep fixtures small:**
|
||||
- Store minimal data needed for tests
|
||||
- Use constructors for variations
|
||||
- Commit fixtures to version control
|
||||
|
||||
**Document fixture origins:**
|
||||
```r
|
||||
# tests/testthat/fixtures/README.md
|
||||
# sample_data.rds
|
||||
Created from production data on 2024-01-15
|
||||
Contains 100 representative records with PII removed
|
||||
|
||||
# malformed.csv
|
||||
Edge case discovered in issue #123
|
||||
Contains intentional formatting errors
|
||||
```
|
||||
|
||||
**Use consistent paths:**
|
||||
```r
|
||||
# Always use test_path() for portability
|
||||
data <- readRDS(test_path("fixtures", "data.rds"))
|
||||
|
||||
# Never use relative paths
|
||||
# data <- readRDS("fixtures/data.rds") # Bad
|
||||
```
|
||||
|
||||
**Prefer deterministic fixtures:**
|
||||
```r
|
||||
# Good: reproducible
|
||||
set.seed(123)
|
||||
data <- data.frame(x = rnorm(10))
|
||||
|
||||
# Better: no randomness
|
||||
data <- data.frame(x = seq(-2, 2, length.out = 10))
|
||||
```
|
||||
251
skills/testing-r-packages/references/mocking.md
Normal file
251
skills/testing-r-packages/references/mocking.md
Normal file
@@ -0,0 +1,251 @@
|
||||
# Mocking in testthat
|
||||
|
||||
Mocking temporarily replaces function implementations during testing, enabling tests when dependencies are unavailable or impractical (databases, APIs, file systems, expensive computations).
|
||||
|
||||
## Core Mocking Functions
|
||||
|
||||
### `local_mocked_bindings()`
|
||||
|
||||
Replace function implementations within a test:
|
||||
|
||||
```r
|
||||
test_that("function works with mocked dependency", {
|
||||
local_mocked_bindings(
|
||||
get_user_data = function(id) {
|
||||
list(id = id, name = "Test User", role = "admin")
|
||||
}
|
||||
)
|
||||
|
||||
result <- process_user(123)
|
||||
expect_equal(result$name, "Test User")
|
||||
})
|
||||
```
|
||||
|
||||
### `with_mocked_bindings()`
|
||||
|
||||
Replace functions for a specific code block:
|
||||
|
||||
```r
|
||||
test_that("handles API failures gracefully", {
|
||||
result <- with_mocked_bindings(
|
||||
api_call = function(...) stop("Network error"),
|
||||
{
|
||||
tryCatch(
|
||||
fetch_data(),
|
||||
error = function(e) "fallback"
|
||||
)
|
||||
}
|
||||
)
|
||||
|
||||
expect_equal(result, "fallback")
|
||||
})
|
||||
```
|
||||
|
||||
## S3 Method Mocking
|
||||
|
||||
Use `local_mocked_s3_method()` to mock S3 methods:
|
||||
|
||||
```r
|
||||
test_that("custom print method is used", {
|
||||
local_mocked_s3_method(
|
||||
print, "myclass",
|
||||
function(x, ...) cat("Mocked output\n")
|
||||
)
|
||||
|
||||
obj <- structure(list(), class = "myclass")
|
||||
expect_output(print(obj), "Mocked output")
|
||||
})
|
||||
```
|
||||
|
||||
## S4 Method Mocking
|
||||
|
||||
Use `local_mocked_s4_method()` for S4 methods:
|
||||
|
||||
```r
|
||||
test_that("S4 method override works", {
|
||||
local_mocked_s4_method(
|
||||
"show", "MyS4Class",
|
||||
function(object) cat("Mocked S4 output\n")
|
||||
)
|
||||
|
||||
# Test code using the mocked method
|
||||
})
|
||||
```
|
||||
|
||||
## R6 Class Mocking
|
||||
|
||||
Use `local_mocked_r6_class()` to mock R6 classes:
|
||||
|
||||
```r
|
||||
test_that("R6 mock works", {
|
||||
MockDatabase <- R6::R6Class("MockDatabase",
|
||||
public = list(
|
||||
query = function(sql) data.frame(result = "mocked")
|
||||
)
|
||||
)
|
||||
|
||||
local_mocked_r6_class("Database", MockDatabase)
|
||||
|
||||
db <- Database$new()
|
||||
expect_equal(db$query("SELECT *"), data.frame(result = "mocked"))
|
||||
})
|
||||
```
|
||||
|
||||
## Common Mocking Patterns
|
||||
|
||||
### Database Connections
|
||||
|
||||
```r
|
||||
test_that("database queries work", {
|
||||
local_mocked_bindings(
|
||||
dbConnect = function(...) list(connected = TRUE),
|
||||
dbGetQuery = function(conn, sql) {
|
||||
data.frame(id = 1:3, value = c("a", "b", "c"))
|
||||
}
|
||||
)
|
||||
|
||||
result <- fetch_from_db("SELECT * FROM table")
|
||||
expect_equal(nrow(result), 3)
|
||||
})
|
||||
```
|
||||
|
||||
### API Calls
|
||||
|
||||
```r
|
||||
test_that("API integration works", {
|
||||
local_mocked_bindings(
|
||||
httr2::request = function(url) list(url = url),
|
||||
httr2::req_perform = function(req) {
|
||||
list(status_code = 200, content = '{"success": true}')
|
||||
}
|
||||
)
|
||||
|
||||
result <- call_external_api()
|
||||
expect_true(result$success)
|
||||
})
|
||||
```
|
||||
|
||||
### File System Operations
|
||||
|
||||
```r
|
||||
test_that("file processing works", {
|
||||
local_mocked_bindings(
|
||||
file.exists = function(path) TRUE,
|
||||
readLines = function(path) c("line1", "line2", "line3")
|
||||
)
|
||||
|
||||
result <- process_file("dummy.txt")
|
||||
expect_length(result, 3)
|
||||
})
|
||||
```
|
||||
|
||||
### Random Number Generation
|
||||
|
||||
```r
|
||||
test_that("randomized algorithm is deterministic", {
|
||||
local_mocked_bindings(
|
||||
runif = function(n, ...) rep(0.5, n),
|
||||
rnorm = function(n, ...) rep(0, n)
|
||||
)
|
||||
|
||||
result <- randomized_function()
|
||||
expect_equal(result, expected_value)
|
||||
})
|
||||
```
|
||||
|
||||
## Advanced Mocking Packages
|
||||
|
||||
### webfakes
|
||||
|
||||
Create fake web servers for HTTP testing:
|
||||
|
||||
```r
|
||||
test_that("API client handles responses", {
|
||||
app <- webfakes::new_app()
|
||||
app$get("/users/:id", function(req, res) {
|
||||
res$send_json(list(id = req$params$id, name = "Test"))
|
||||
})
|
||||
|
||||
web <- webfakes::local_app_process(app)
|
||||
|
||||
result <- get_user(web$url("/users/123"))
|
||||
expect_equal(result$name, "Test")
|
||||
})
|
||||
```
|
||||
|
||||
### httptest2
|
||||
|
||||
Record and replay HTTP interactions:
|
||||
|
||||
```r
|
||||
test_that("API call returns expected data", {
|
||||
httptest2::with_mock_dir("fixtures/api", {
|
||||
result <- call_real_api()
|
||||
expect_equal(result$status, "success")
|
||||
})
|
||||
})
|
||||
```
|
||||
|
||||
First run records real responses; subsequent runs replay them.
|
||||
|
||||
## Mocking Best Practices
|
||||
|
||||
**Mock at the right level:**
|
||||
- Mock external dependencies (APIs, databases)
|
||||
- Don't mock internal package functions excessively
|
||||
- Keep mocks simple and focused
|
||||
|
||||
**Verify mock behavior:**
|
||||
```r
|
||||
test_that("mock is called correctly", {
|
||||
calls <- list()
|
||||
local_mocked_bindings(
|
||||
external_func = function(...) {
|
||||
calls <<- append(calls, list(list(...)))
|
||||
"mocked"
|
||||
}
|
||||
)
|
||||
|
||||
my_function()
|
||||
|
||||
expect_length(calls, 1)
|
||||
expect_equal(calls[[1]]$arg, "expected")
|
||||
})
|
||||
```
|
||||
|
||||
**Prefer real fixtures when possible:**
|
||||
- Use test data files instead of mocking file reads
|
||||
- Use webfakes for full HTTP testing instead of mocking individual functions
|
||||
- Mock only when fixtures are impractical
|
||||
|
||||
**Document what's being mocked:**
|
||||
```r
|
||||
test_that("handles unavailable service", {
|
||||
# Mock the external authentication service
|
||||
# which is unavailable in test environment
|
||||
local_mocked_bindings(
|
||||
auth_check = function(token) list(valid = TRUE)
|
||||
)
|
||||
|
||||
# test code
|
||||
})
|
||||
```
|
||||
|
||||
## Migration from Deprecated Functions
|
||||
|
||||
**Old (deprecated):**
|
||||
```r
|
||||
with_mock(
|
||||
pkg::func = function(...) "mocked"
|
||||
)
|
||||
```
|
||||
|
||||
**New (recommended):**
|
||||
```r
|
||||
local_mocked_bindings(
|
||||
func = function(...) "mocked",
|
||||
.package = "pkg"
|
||||
)
|
||||
```
|
||||
|
||||
The new functions work with byte-compiled code and are more reliable across platforms.
|
||||
184
skills/testing-r-packages/references/snapshots.md
Normal file
184
skills/testing-r-packages/references/snapshots.md
Normal file
@@ -0,0 +1,184 @@
|
||||
# Snapshot Testing
|
||||
|
||||
Snapshot tests record expected output in human-readable files rather than inline code. They are ideal for:
|
||||
|
||||
- Complex output that's difficult to verify programmatically
|
||||
- User-facing messages, warnings, and errors
|
||||
- Mixed output types (printed text + messages + warnings)
|
||||
- Binary formats like plots
|
||||
- Text with complex formatting
|
||||
|
||||
## Basic Usage
|
||||
|
||||
```r
|
||||
test_that("error messages are helpful", {
|
||||
expect_snapshot(
|
||||
my_function(bad_input)
|
||||
)
|
||||
})
|
||||
```
|
||||
|
||||
The first run creates `tests/testthat/_snaps/{test-file}/{test-name}.md` containing the captured output.
|
||||
|
||||
## Snapshot Workflow
|
||||
|
||||
**Initial creation:**
|
||||
```r
|
||||
devtools::test() # Creates new snapshots
|
||||
```
|
||||
|
||||
**Review changes:**
|
||||
```r
|
||||
testthat::snapshot_review('test-name')
|
||||
```
|
||||
|
||||
**Accept changes:**
|
||||
```r
|
||||
testthat::snapshot_accept('test-name')
|
||||
```
|
||||
|
||||
**Reject changes:**
|
||||
```r
|
||||
testthat::snapshot_reject('test-name')
|
||||
```
|
||||
|
||||
**Download snapshots from GitHub CI:**
|
||||
```r
|
||||
testthat::snapshot_download_gh()
|
||||
```
|
||||
|
||||
## Snapshot Types
|
||||
|
||||
### Output Snapshots
|
||||
|
||||
Capture printed output, messages, warnings, and errors:
|
||||
|
||||
```r
|
||||
test_that("function produces expected output", {
|
||||
expect_snapshot({
|
||||
print(my_data)
|
||||
message("Processing complete")
|
||||
warning("Non-critical issue")
|
||||
})
|
||||
})
|
||||
```
|
||||
|
||||
### Value Snapshots
|
||||
|
||||
Capture the structure of R objects:
|
||||
|
||||
```r
|
||||
test_that("data structure is correct", {
|
||||
expect_snapshot(str(complex_object))
|
||||
})
|
||||
```
|
||||
|
||||
### Error Snapshots
|
||||
|
||||
Capture error messages with call information:
|
||||
|
||||
```r
|
||||
test_that("errors are informative", {
|
||||
expect_snapshot(
|
||||
error = TRUE,
|
||||
my_function(invalid_input)
|
||||
)
|
||||
})
|
||||
```
|
||||
|
||||
## Transform Function
|
||||
|
||||
Use `transform` to remove variable elements before comparison:
|
||||
|
||||
```r
|
||||
test_that("output is stable", {
|
||||
expect_snapshot(
|
||||
my_api_call(),
|
||||
transform = function(lines) {
|
||||
# Remove timestamps
|
||||
gsub("\\d{4}-\\d{2}-\\d{2}", "[DATE]", lines)
|
||||
}
|
||||
)
|
||||
})
|
||||
```
|
||||
|
||||
Common uses:
|
||||
- Remove timestamps or session IDs
|
||||
- Normalize file paths
|
||||
- Strip API keys or tokens
|
||||
- Remove stochastic elements
|
||||
|
||||
## Variants
|
||||
|
||||
Use `variant` for platform-specific or R-version-specific snapshots:
|
||||
|
||||
```r
|
||||
test_that("platform-specific behavior", {
|
||||
expect_snapshot(
|
||||
system_specific_function(),
|
||||
variant = tolower(Sys.info()[["sysname"]])
|
||||
)
|
||||
})
|
||||
```
|
||||
|
||||
Variants save to `_snaps/{variant}/{test}.md` instead of `_snaps/{test}.md`.
|
||||
|
||||
## Best Practices
|
||||
|
||||
- **Commit snapshots to git** - They are part of your test suite
|
||||
- **Review snapshot diffs carefully** - Ensure changes are intentional
|
||||
- **Keep snapshots focused** - One concept per snapshot
|
||||
- **Use transform for stability** - Remove variable elements
|
||||
- **Update snapshots explicitly** - Never auto-accept in CI
|
||||
- **Fail on new snapshots in CI** - testthat 3.3.0+ does this automatically
|
||||
|
||||
## Snapshot Files
|
||||
|
||||
Snapshots are stored as markdown files in `tests/testthat/_snaps/`:
|
||||
|
||||
```
|
||||
tests/testthat/
|
||||
├── test-utils.R
|
||||
└── _snaps/
|
||||
├── test-utils.md
|
||||
└── windows/ # variant snapshots
|
||||
└── test-utils.md
|
||||
```
|
||||
|
||||
Each snapshot includes:
|
||||
- Test name as heading
|
||||
- Code that generated the output
|
||||
- Captured output
|
||||
|
||||
## Common Patterns
|
||||
|
||||
**Testing error messages:**
|
||||
```r
|
||||
test_that("validation errors are clear", {
|
||||
expect_snapshot(error = TRUE, {
|
||||
validate_input(NULL)
|
||||
validate_input("wrong type")
|
||||
validate_input(numeric())
|
||||
})
|
||||
})
|
||||
```
|
||||
|
||||
**Testing side-by-side comparisons:**
|
||||
```r
|
||||
test_that("diff output is readable", {
|
||||
withr::local_options(width = 80)
|
||||
expect_snapshot(
|
||||
waldo::compare(expected, actual)
|
||||
)
|
||||
})
|
||||
```
|
||||
|
||||
**Testing printed output with messages:**
|
||||
```r
|
||||
test_that("function provides feedback", {
|
||||
expect_snapshot({
|
||||
result <- process_data(sample_data)
|
||||
print(result)
|
||||
})
|
||||
})
|
||||
```
|
||||
Reference in New Issue
Block a user