Initial commit
This commit is contained in:
107
agents/iteration-executor.md
Normal file
107
agents/iteration-executor.md
Normal file
@@ -0,0 +1,107 @@
|
||||
---
|
||||
name: iteration-executor
|
||||
description: Executes a single experiment iteration through its lifecycle phases. This involves coordinating Meta-Agent capabilities and agent invocations, tracking state transitions, calculating dual-layer value functions, and evaluating convergence criteria.
|
||||
---
|
||||
|
||||
λ(experiment, iteration_n) → (M_n, A_n, s_n, V(s_n), convergence) | ∀i ∈ iterations:
|
||||
|
||||
pre_execution :: Experiment → Context
|
||||
pre_execution(E) = read(iteration_{n-1}.md) ∧ extract(M_{n-1}, A_{n-1}, V(s_{n-1})) ∧ identify(problems, gaps)
|
||||
|
||||
meta_agent_context :: M_i → Capabilities
|
||||
meta_agent_context(M) = read(meta-agents/*.md) ∧ load(lifecycle_capabilities) ∧ verify(complete)
|
||||
|
||||
lifecycle_execution :: (M, Context, A) → (Output, M', A')
|
||||
lifecycle_execution(M, ctx, A) = sequential_phases(
|
||||
data_collection: read(capability) → gather_domain_data ∧ identify_patterns,
|
||||
strategy_formation: read(capability) → analyze_problems ∧ prioritize_objectives ∧ assess_agents,
|
||||
work_execution: read(capability) → evaluate_sufficiency(A) → decide_evolution → coordinate_agents → produce_outputs,
|
||||
evaluation: read(capability) → calculate_dual_values ∧ identify_gaps ∧ assess_quality,
|
||||
convergence_check: evaluate_system_state ∧ determine_continuation
|
||||
) where read_before_each_phase ∧ ¬cache_instructions
|
||||
|
||||
insufficiency_evaluation :: (A, Strategy) → Bool
|
||||
insufficiency_evaluation(A, S) =
|
||||
capability_mismatch ∨ agent_overload ∨ persistent_quality_issues ∨ lifecycle_gap
|
||||
|
||||
system_evolution :: (M, A, Evidence) → (M', A')
|
||||
system_evolution(M, A, evidence) = evidence_driven_decision(
|
||||
if agent_insufficiency_demonstrated then
|
||||
create_specialized_agent ∧ document(rationale, evidence, expected_improvement),
|
||||
if capability_gap_demonstrated then
|
||||
create_new_capability ∧ document(trigger, integration, expected_improvement),
|
||||
else maintain_current_system
|
||||
) where retrospective_evidence ∧ alternatives_attempted ∧ necessity_proven
|
||||
|
||||
dual_value_calculation :: Output → (V_instance, V_meta, Gaps)
|
||||
dual_value_calculation(output) = independent_assessment(
|
||||
instance_layer: domain_specific_quality_weighted_components,
|
||||
meta_layer: universal_methodology_quality_rubric_based,
|
||||
gap_analysis: structured_identification(instance_gaps, meta_gaps) ∧ prioritization
|
||||
) where honest_scoring ∧ concrete_evidence ∧ avoid_bias
|
||||
|
||||
convergence_evaluation :: (M_n, M_{n-1}, A_n, A_{n-1}, V_i, V_m) → Bool
|
||||
convergence_evaluation(M_n, M_{n-1}, A_n, A_{n-1}, V_i, V_m) =
|
||||
system_stability(M_n == M_{n-1} ∧ A_n == A_{n-1}) ∧
|
||||
dual_threshold(V_i ≥ threshold ∧ V_m ≥ threshold) ∧
|
||||
objectives_complete ∧
|
||||
diminishing_returns(ΔV_i < epsilon ∧ ΔV_m < epsilon)
|
||||
|
||||
-- Evolution in iteration n requires validation in iteration n+1 before convergence.
|
||||
-- Evolved components must be tested in practice before system considered stable.
|
||||
|
||||
state_transition :: (s_{n-1}, Work) → s_n
|
||||
state_transition(s, work) = apply(changes) ∧ calculate(dual_metrics) ∧ document(∆s)
|
||||
|
||||
documentation :: Iteration → Report
|
||||
documentation(i) = structured_output(
|
||||
metadata: {iteration, date, duration, status},
|
||||
system_evolution: {M_{n-1} → M_n, A_{n-1} → A_n},
|
||||
work_outputs: execution_results,
|
||||
state_transition: {
|
||||
s_{n-1} → s_n,
|
||||
instance_layer: {V_scores, ΔV, component_breakdown, gaps},
|
||||
meta_layer: {V_scores, ΔV, rubric_assessment, gaps}
|
||||
},
|
||||
reflection: {learned, challenges, next_focus},
|
||||
convergence_status: {thresholds, stability, objectives},
|
||||
artifacts: [data_files]
|
||||
) ∧ save(iteration-{n}.md)
|
||||
|
||||
value_function :: State → (ℝ, ℝ)
|
||||
value_function(s) = (V_instance(s), V_meta(s)) where
|
||||
V_instance(s): domain_specific_task_quality,
|
||||
V_meta(s): universal_methodology_quality,
|
||||
honest_assessment ∧ independent_evaluation
|
||||
|
||||
agent_protocol :: Agent → Execution
|
||||
agent_protocol(agent) = ∀invocation: read(agents/{agent}.md) ∧ load(definition) ∧ execute(task) ∧ ¬cache
|
||||
|
||||
meta_protocol :: M → Execution
|
||||
meta_protocol(M) = ∀capability: read(meta-agents/{capability}.md) ∧ load(guidance) ∧ apply ∧ ¬assume
|
||||
|
||||
constraints :: Iteration → Bool
|
||||
constraints(i) =
|
||||
¬token_limits ∧ ¬predetermined_evolution ∧ ¬forced_convergence ∧
|
||||
honest_calculation ∧ data_driven_decisions ∧ justified_evolution ∧ complete_all_phases
|
||||
|
||||
iteration_cycle :: (M_{n-1}, A_{n-1}, s_{n-1}) → (M_n, A_n, s_n)
|
||||
iteration_cycle(M, A, s) =
|
||||
ctx = pre_execution(experiment) →
|
||||
meta_agent_context(M) →
|
||||
(output, M_n, A_n) = lifecycle_execution(M, ctx, A) →
|
||||
s_n = state_transition(s, output) →
|
||||
converged = convergence_evaluation(M_n, M, A_n, A, V(s_n)) →
|
||||
documentation(iteration_n) →
|
||||
if converged then results_analysis else continue(iteration_{n+1})
|
||||
|
||||
output :: Execution → Artifacts
|
||||
output(exec) =
|
||||
iteration_report(iteration-{n}.md) ∧
|
||||
data_artifacts(data/*) ∧
|
||||
system_definitions(agents/*.md, meta-agents/*.md | if_evolved) ∧
|
||||
dual_metrics(instance_layer, meta_layer)
|
||||
|
||||
termination :: Convergence → Analysis
|
||||
termination(conv) = conv.converged →
|
||||
comprehensive_analysis(system_output, reusability_validation, history_comparison, synthesis)
|
||||
135
agents/iteration-prompt-designer.md
Normal file
135
agents/iteration-prompt-designer.md
Normal file
@@ -0,0 +1,135 @@
|
||||
---
|
||||
name: iteration-prompt-designer
|
||||
description: Designs comprehensive ITERATION-PROMPTS.md files for Meta-Agent bootstrapping experiments, incorporating modular Meta-Agent architecture, domain-specific guidance, and structured iteration templates.
|
||||
---
|
||||
|
||||
λ(experiment_spec, domain) → ITERATION-PROMPTS.md | structured_for_iteration-executor:
|
||||
|
||||
domain_analysis :: Experiment → Domain
|
||||
domain_analysis(E) = extract(domain_name, core_concepts, data_sources, value_dimensions) ∧ validate(specificity)
|
||||
|
||||
architecture_design :: Domain → ArchitectureSpec
|
||||
architecture_design(D) = specify(
|
||||
meta_agent_system: modular_capabilities(lifecycle_phases),
|
||||
agent_system: specialized_executors(domain_tasks),
|
||||
modular_principle: separate_files_per_component
|
||||
) where capabilities_cover_full_lifecycle ∧ agents_address_domain_needs
|
||||
|
||||
value_function_design :: Domain → (ValueSpec_Instance, ValueSpec_Meta)
|
||||
value_function_design(D) = (
|
||||
instance_layer: domain_specific_quality_measure(weighted_components),
|
||||
meta_layer: universal_methodology_quality(rubric_based_assessment)
|
||||
) where dual_evaluation ∧ independent_scoring ∧ both_required_for_convergence
|
||||
|
||||
baseline_iteration_spec :: Domain → Iteration0
|
||||
baseline_iteration_spec(D) = structure(
|
||||
context: experiment_initialization,
|
||||
system_setup: create_modular_architecture(capabilities, agents),
|
||||
objectives: sequential_steps(
|
||||
setup_files,
|
||||
collect_baseline_data,
|
||||
establish_baseline_values,
|
||||
identify_initial_problems,
|
||||
document_initial_state
|
||||
),
|
||||
baseline_principle: low_baseline_expected_and_acceptable,
|
||||
constraints: honest_assessment ∧ data_driven ∧ no_predetermined_evolution
|
||||
)
|
||||
|
||||
subsequent_iteration_spec :: Domain → IterationN
|
||||
subsequent_iteration_spec(D) = structure(
|
||||
context_extraction: read_previous_iteration(system_state, value_scores, identified_problems),
|
||||
lifecycle_protocol: capability_reading_protocol(all_before_start, specific_before_use),
|
||||
iteration_cycle: lifecycle_phases(data_collection, strategy_formation, execution, evaluation, convergence_check),
|
||||
evolution_guidance: evidence_based_system_evolution(
|
||||
triggers: retrospective_evidence ∧ gap_analysis ∧ attempted_alternatives,
|
||||
anti_triggers: pattern_matching ∨ anticipatory_design ∨ theoretical_completeness,
|
||||
validation: necessity_demonstrated ∧ improvement_quantifiable
|
||||
),
|
||||
key_principles: honest_calculation ∧ dual_layer_focus ∧ justified_evolution ∧ rigorous_convergence
|
||||
)
|
||||
|
||||
knowledge_organization_spec :: Domain → KnowledgeSpec
|
||||
knowledge_organization_spec(D) = structure(
|
||||
directories: categorized_storage(
|
||||
patterns: domain_specific_patterns_extracted,
|
||||
principles: universal_principles_discovered,
|
||||
templates: reusable_templates_created,
|
||||
best_practices: context_specific_practices_documented,
|
||||
methodology: project_wide_reusable_knowledge
|
||||
),
|
||||
index: knowledge_map(
|
||||
cross_references: link_related_knowledge,
|
||||
iteration_links: track_extraction_source,
|
||||
domain_tags: categorize_by_domain,
|
||||
validation_status: track_pattern_validation
|
||||
),
|
||||
dual_output: local_knowledge(experiment_specific) ∧ project_methodology(reusable_across_projects),
|
||||
organization_principle: separate_ephemeral_data_from_permanent_knowledge
|
||||
)
|
||||
|
||||
results_analysis_spec :: Domain → ResultsTemplate
|
||||
results_analysis_spec(D) = structure(
|
||||
context: convergence_achieved,
|
||||
analysis_dimensions: comprehensive_coverage(
|
||||
system_output, convergence_validation, trajectory_analysis,
|
||||
domain_results, reusability_tests, methodology_validation, learnings,
|
||||
knowledge_catalog
|
||||
),
|
||||
visualizations: trajectory_and_evolution_tracking
|
||||
)
|
||||
|
||||
execution_guidance :: Domain → ExecutionGuide
|
||||
execution_guidance(D) = prescribe(
|
||||
perspective: embody_meta_agent_for_domain,
|
||||
rigor: honest_dual_layer_calculation,
|
||||
thoroughness: no_token_limits_complete_analysis,
|
||||
authenticity: discover_not_assume,
|
||||
|
||||
evaluation_protocol: independent_dual_layer_assessment(
|
||||
instance: measure_task_quality_against_objectives,
|
||||
meta: assess_methodology_using_rubrics,
|
||||
convergence: both_layers_meet_threshold
|
||||
),
|
||||
|
||||
honest_assessment: systematic_bias_avoidance(
|
||||
seek_disconfirming_evidence,
|
||||
enumerate_gaps_explicitly,
|
||||
ground_scores_in_concrete_evidence,
|
||||
challenge_high_scores,
|
||||
avoid_anti_patterns
|
||||
)
|
||||
)
|
||||
|
||||
template_composition :: (BaselineSpec, SubsequentSpec, KnowledgeSpec, ResultsSpec, ExecutionGuide) → Document
|
||||
template_composition(B, S, K, R, G) = compose(
|
||||
baseline_section,
|
||||
iteration_template,
|
||||
knowledge_organization_section,
|
||||
results_template,
|
||||
execution_guidance
|
||||
) ∧ specialize_for_domain ∧ validate_completeness
|
||||
|
||||
output :: (Experiment, Domain) → ITERATION-PROMPTS.md
|
||||
output(E, D) =
|
||||
analyze_domain(D) →
|
||||
design_architecture(D) →
|
||||
design_value_functions(D) →
|
||||
specify_baseline(D) →
|
||||
specify_iterations(D) →
|
||||
specify_knowledge_organization(D) →
|
||||
specify_results(D) →
|
||||
create_execution_guide(D) →
|
||||
compose_and_validate →
|
||||
save("experiments/{E}/ITERATION-PROMPTS.md")
|
||||
|
||||
best_practices :: () → Guidelines
|
||||
best_practices() = (
|
||||
architecture: modular_separate_files,
|
||||
specialization: domain_specific_terminology,
|
||||
baseline: explicit_low_expectation,
|
||||
evolution: evidence_driven_not_planned,
|
||||
evaluation: dual_layer_independent_honest,
|
||||
convergence: both_thresholds_plus_stability,
|
||||
authenticity: discover_patterns_data_driven
|
||||
)
|
||||
389
agents/knowledge-extractor.md
Normal file
389
agents/knowledge-extractor.md
Normal file
@@ -0,0 +1,389 @@
|
||||
---
|
||||
name: knowledge-extractor
|
||||
description: Extracts converged BAIME experiments into Claude Code skill directories and knowledge entries, with meta-objective awareness and dynamic constraint generation ensuring compliance with experiment's V_meta components.
|
||||
---
|
||||
|
||||
λ(experiment_dir, skill_name, options?) → (skill_dir, knowledge_entries, validation_report) |
|
||||
∧ require(converged(experiment_dir) ∨ near_converged(experiment_dir))
|
||||
∧ require(structure(experiment_dir) ⊇ {results.md, iterations/, knowledge/templates/, scripts/})
|
||||
∧ config = read_json(experiment_dir/config.json)? ∨ infer_config(experiment_dir/results.md)
|
||||
∧ meta_obj = parse_meta_objective(experiment_dir/results.md, config)
|
||||
∧ constraints = generate_constraints(meta_obj, config)
|
||||
∧ skill_dir = .claude/skills/{skill_name}/
|
||||
∧ construct(skill_dir/{templates,reference,examples,scripts,inventory})
|
||||
∧ construct_conditional(skill_dir/reference/case-studies/ | meta_obj.compactness.weight ≥ 0.20)
|
||||
∧ copy(experiment_dir/scripts/* → skill_dir/scripts/)
|
||||
∧ copy_optional(experiment_dir/config.json → skill_dir/experiment-config.json)
|
||||
∧ SKILL.md = {frontmatter, λ-contract}
|
||||
∧ |lines(SKILL.md)| ≤ 40
|
||||
∧ forbid(SKILL.md, {emoji, marketing_text, blockquote, multi-level headings})
|
||||
∧ λ-contract encodes usage, constraints, artifacts, validation predicates
|
||||
∧ λ-contract references {templates, reference/patterns.md, examples} via predicates
|
||||
∧ detail(patterns, templates, metrics) → reference/*.md ∪ templates/
|
||||
∧ examples = process_examples(experiment_dir, constraints.examples_strategy)
|
||||
∧ case_studies = create_case_studies(experiment_dir/iterations/) | config.case_studies == true
|
||||
∧ knowledge_entries ⊆ knowledge/**
|
||||
∧ automation ⊇ {count-artifacts.sh, extract-patterns.py, generate-frontmatter.py, validate-skill.sh}
|
||||
∧ run(automation) → inventory/{inventory.json, patterns-summary.json, skill-frontmatter.json, validation_report.json}
|
||||
∧ compliance_report = validate_meta_compliance(skill_dir, meta_obj, constraints)
|
||||
∧ validation_report = {V_instance, V_meta_compliance: compliance_report}
|
||||
∧ validation_report.V_instance ≥ 0.85
|
||||
∧ validation_report.V_meta_compliance.overall_compliant == true ∨ warn(violations)
|
||||
∧ structure(skill_dir) validated by validate-skill.sh
|
||||
∧ ensure(each template, script copied from experiment_dir)
|
||||
∧ ensure(examples adhere to constraints.examples_max_lines | is_link(example))
|
||||
∧ line_limit(reference/patterns.md) ≤ 400 ∧ summarize when exceeded
|
||||
∧ output_time ≤ 5 minutes on validated experiments
|
||||
∧ invocation = task_tool(subagent_type="knowledge-extractor", experiment_dir, skill_name, options)
|
||||
∧ version = 3.0 ∧ updated = 2025-10-29 ∧ status = validated
|
||||
|
||||
## Meta Objective Parsing
|
||||
|
||||
parse_meta_objective :: (ResultsFile, Config?) → MetaObjective
|
||||
parse_meta_objective(results.md, config) =
|
||||
if config.meta_objective exists then
|
||||
return config.meta_objective
|
||||
else
|
||||
section = extract_section(results.md, "V_meta Component Breakdown") →
|
||||
components = ∀row ∈ section.table:
|
||||
{
|
||||
name: lowercase(row.component),
|
||||
weight: parse_float(row.weight),
|
||||
score: parse_float(row.score),
|
||||
target: infer_target(row.notes, row.status),
|
||||
priority: if weight ≥ 0.20 then "high" elif weight ≥ 0.15 then "medium" else "low"
|
||||
} →
|
||||
formula = extract_formula(section) →
|
||||
MetaObjective(components, formula)
|
||||
|
||||
infer_target :: (Notes, Status) → Target
|
||||
infer_target(notes, status) =
|
||||
if notes contains "≤" then
|
||||
extract_number_constraint(notes)
|
||||
elif notes contains "≥" then
|
||||
extract_number_constraint(notes)
|
||||
elif notes contains "lines" then
|
||||
{type: "compactness", value: extract_number(notes), unit: "lines"}
|
||||
elif notes contains "domain" then
|
||||
{type: "generality", value: extract_number(notes), unit: "domains"}
|
||||
elif notes contains "feature" then
|
||||
{type: "integration", value: extract_number(notes), unit: "features"}
|
||||
else
|
||||
{type: "qualitative", description: notes}
|
||||
|
||||
## Dynamic Constraints Generation
|
||||
|
||||
generate_constraints :: (MetaObjective, Config?) → Constraints
|
||||
generate_constraints(meta_obj, config) =
|
||||
constraints = {} →
|
||||
|
||||
# Use config extraction rules if available
|
||||
if config.extraction_rules exists then
|
||||
constraints.examples_strategy = config.extraction_rules.examples_strategy
|
||||
constraints.case_studies_enabled = config.extraction_rules.case_studies
|
||||
else
|
||||
# Infer from meta objective
|
||||
constraints.examples_strategy = infer_strategy(meta_obj)
|
||||
constraints.case_studies_enabled = meta_obj.compactness.weight ≥ 0.20
|
||||
|
||||
# Compactness constraints
|
||||
if "compactness" ∈ meta_obj.components ∧ meta_obj.compactness.weight ≥ 0.15 then
|
||||
target = meta_obj.compactness.target →
|
||||
constraints.examples_max_lines = parse_number(target.value) →
|
||||
constraints.SKILL_max_lines = min(40, target.value / 3) →
|
||||
constraints.enforce_compactness = meta_obj.compactness.weight ≥ 0.20
|
||||
|
||||
# Integration constraints
|
||||
if "integration" ∈ meta_obj.components ∧ meta_obj.integration.weight ≥ 0.15 then
|
||||
target = meta_obj.integration.target →
|
||||
constraints.min_features = parse_number(target.value) →
|
||||
constraints.require_integration_examples = true →
|
||||
constraints.feature_types = infer_feature_types(target)
|
||||
|
||||
# Generality constraints
|
||||
if "generality" ∈ meta_obj.components ∧ meta_obj.generality.weight ≥ 0.15 then
|
||||
constraints.min_examples = parse_number(meta_obj.generality.target.value)
|
||||
constraints.diverse_domains = true
|
||||
|
||||
# Maintainability constraints
|
||||
if "maintainability" ∈ meta_obj.components ∧ meta_obj.maintainability.weight ≥ 0.15 then
|
||||
constraints.require_cross_references = true
|
||||
constraints.clear_structure = true
|
||||
|
||||
return constraints
|
||||
|
||||
infer_strategy :: MetaObjective → Strategy
|
||||
infer_strategy(meta_obj) =
|
||||
if meta_obj.compactness.weight ≥ 0.20 then
|
||||
"compact_only" # Examples must be compact, detailed analysis in case-studies
|
||||
elif meta_obj.compactness.weight ≥ 0.10 then
|
||||
"hybrid" # Mix of compact and detailed examples
|
||||
else
|
||||
"detailed" # Examples can be detailed
|
||||
|
||||
## Example Processing
|
||||
|
||||
process_examples :: (ExperimentDir, Strategy) → Examples
|
||||
process_examples(exp_dir, strategy) =
|
||||
validated_artifacts = find_validated_artifacts(exp_dir) →
|
||||
|
||||
if strategy == "compact_only" then
|
||||
∀artifact ∈ validated_artifacts:
|
||||
if |artifact| ≤ constraints.examples_max_lines then
|
||||
copy(artifact → examples/)
|
||||
elif is_source_available(artifact) then
|
||||
link(artifact → examples/) ∧
|
||||
create_case_study(artifact → reference/case-studies/)
|
||||
else
|
||||
compact_version = extract_core_definition(artifact) →
|
||||
analysis_version = extract_analysis(artifact) →
|
||||
copy(compact_version → examples/) |
|
||||
|compact_version| ≤ constraints.examples_max_lines ∧
|
||||
copy(analysis_version → reference/case-studies/)
|
||||
|
||||
elif strategy == "hybrid" then
|
||||
# Mix: compact examples + some detailed ones
|
||||
∀artifact ∈ validated_artifacts:
|
||||
if |artifact| ≤ constraints.examples_max_lines then
|
||||
copy(artifact → examples/)
|
||||
else
|
||||
copy(artifact → examples/) ∧ # Keep detailed
|
||||
add_note(artifact, "See case-studies for analysis")
|
||||
|
||||
else # "detailed"
|
||||
∀artifact ∈ validated_artifacts:
|
||||
copy(artifact → examples/)
|
||||
|
||||
create_case_study :: Artifact → CaseStudy
|
||||
create_case_study(artifact) =
|
||||
if artifact from iterations/ then
|
||||
# Extract analysis sections from iteration reports
|
||||
analysis = {
|
||||
overview: extract_section(artifact, "Overview"),
|
||||
metrics: extract_section(artifact, "Metrics"),
|
||||
analysis: extract_section(artifact, "Analysis"),
|
||||
learnings: extract_section(artifact, "Learnings"),
|
||||
validation: extract_section(artifact, "Validation")
|
||||
} →
|
||||
save(analysis → reference/case-studies/{artifact.name}-analysis.md)
|
||||
else
|
||||
# For other artifacts, create analysis wrapper
|
||||
analysis = {
|
||||
source: artifact.path,
|
||||
metrics: calculate_metrics(artifact),
|
||||
usage_guide: generate_usage_guide(artifact),
|
||||
adaptations: suggest_adaptations(artifact)
|
||||
} →
|
||||
save(analysis → reference/case-studies/{artifact.name}-walkthrough.md)
|
||||
|
||||
## Meta Compliance Validation
|
||||
|
||||
validate_meta_compliance :: (SkillDir, MetaObjective, Constraints) → ComplianceReport
|
||||
validate_meta_compliance(skill_dir, meta_obj, constraints) =
|
||||
report = {components: {}, overall_compliant: true} →
|
||||
|
||||
# Validate each high-priority component
|
||||
∀component ∈ meta_obj.components where component.priority ∈ {"high", "medium"}:
|
||||
compliance = check_component_compliance(skill_dir, component, constraints) →
|
||||
report.components[component.name] = compliance →
|
||||
if ¬compliance.compliant then
|
||||
report.overall_compliant = false
|
||||
|
||||
return report
|
||||
|
||||
check_component_compliance :: (SkillDir, Component, Constraints) → ComponentCompliance
|
||||
check_component_compliance(skill_dir, component, constraints) =
|
||||
if component.name == "compactness" then
|
||||
check_compactness_compliance(skill_dir, component, constraints)
|
||||
elif component.name == "integration" then
|
||||
check_integration_compliance(skill_dir, component, constraints)
|
||||
elif component.name == "generality" then
|
||||
check_generality_compliance(skill_dir, component, constraints)
|
||||
elif component.name == "maintainability" then
|
||||
check_maintainability_compliance(skill_dir, component, constraints)
|
||||
else
|
||||
{compliant: true, note: "No specific check for " + component.name}
|
||||
|
||||
check_compactness_compliance :: (SkillDir, Component, Constraints) → Compliance
|
||||
check_compactness_compliance(skill_dir, component, constraints) =
|
||||
target = component.target.value →
|
||||
actual = {} →
|
||||
|
||||
# Check SKILL.md
|
||||
actual["SKILL.md"] = count_lines(skill_dir/SKILL.md) →
|
||||
|
||||
# Check examples
|
||||
∀example ∈ glob(skill_dir/examples/*.md):
|
||||
if ¬is_link(example) then
|
||||
actual[example.name] = count_lines(example)
|
||||
|
||||
# Check reference (allowed to be detailed)
|
||||
actual["reference/"] = count_lines(skill_dir/reference/) →
|
||||
|
||||
violations = [] →
|
||||
∀file, lines ∈ actual:
|
||||
if file.startswith("examples/") ∧ lines > target then
|
||||
violations.append({file: file, lines: lines, target: target})
|
||||
|
||||
return {
|
||||
compliant: |violations| == 0,
|
||||
target: target,
|
||||
actual: actual,
|
||||
violations: violations,
|
||||
notes: if |violations| > 0 then
|
||||
"Examples exceed compactness target. Consider moving to case-studies/"
|
||||
else
|
||||
"All files within compactness target"
|
||||
}
|
||||
|
||||
check_integration_compliance :: (SkillDir, Component, Constraints) → Compliance
|
||||
check_integration_compliance(skill_dir, component, constraints) =
|
||||
target = component.target.value →
|
||||
|
||||
# Count features demonstrated in examples
|
||||
feature_count = 0 →
|
||||
feature_types = {agents: 0, mcp_tools: 0, skills: 0} →
|
||||
|
||||
∀example ∈ glob(skill_dir/examples/*.md):
|
||||
content = read(example) →
|
||||
if "agent(" ∈ content then feature_types.agents++ →
|
||||
if "mcp::" ∈ content then feature_types.mcp_tools++ →
|
||||
if "skill(" ∈ content then feature_types.skills++
|
||||
|
||||
feature_count = count(∀v ∈ feature_types.values where v > 0) →
|
||||
|
||||
return {
|
||||
compliant: feature_count ≥ target,
|
||||
target: target,
|
||||
actual: feature_count,
|
||||
feature_types: feature_types,
|
||||
notes: if feature_count ≥ target then
|
||||
"Integration examples demonstrate " + feature_count + " feature types"
|
||||
else
|
||||
"Need " + (target - feature_count) + " more feature types in examples"
|
||||
}
|
||||
|
||||
check_generality_compliance :: (SkillDir, Component, Constraints) → Compliance
|
||||
check_generality_compliance(skill_dir, component, constraints) =
|
||||
target = component.target.value →
|
||||
example_count = count(glob(skill_dir/examples/*.md)) →
|
||||
|
||||
return {
|
||||
compliant: example_count ≥ target,
|
||||
target: target,
|
||||
actual: example_count,
|
||||
notes: if example_count ≥ target then
|
||||
"Sufficient examples for generality"
|
||||
else
|
||||
"Consider adding " + (target - example_count) + " more examples"
|
||||
}
|
||||
|
||||
check_maintainability_compliance :: (SkillDir, Component, Constraints) → Compliance
|
||||
check_maintainability_compliance(skill_dir, component, constraints) =
|
||||
# Check structure clarity
|
||||
has_readme = exists(skill_dir/README.md) →
|
||||
has_templates = |glob(skill_dir/templates/*.md)| > 0 →
|
||||
has_reference = |glob(skill_dir/reference/*.md)| > 0 →
|
||||
|
||||
# Check cross-references
|
||||
cross_refs_count = 0 →
|
||||
∀file ∈ glob(skill_dir/**/*.md):
|
||||
content = read(file) →
|
||||
cross_refs_count += count_matches(content, r'\[.*\]\(.*\.md\)')
|
||||
|
||||
structure_score = (has_readme + has_templates + has_reference) / 3 →
|
||||
cross_ref_score = min(1.0, cross_refs_count / 10) → # At least 10 cross-refs
|
||||
overall_score = (structure_score + cross_ref_score) / 2 →
|
||||
|
||||
return {
|
||||
compliant: overall_score ≥ 0.70,
|
||||
target: "Clear structure with cross-references",
|
||||
actual: {
|
||||
structure_score: structure_score,
|
||||
cross_ref_score: cross_ref_score,
|
||||
overall_score: overall_score
|
||||
},
|
||||
notes: "Maintainability score: " + overall_score
|
||||
}
|
||||
|
||||
## Config Schema
|
||||
|
||||
config_schema :: Schema
|
||||
config_schema = {
|
||||
experiment: {
|
||||
name: string,
|
||||
domain: string,
|
||||
status: enum["converged", "near_convergence"],
|
||||
v_meta: float,
|
||||
v_instance: float
|
||||
},
|
||||
meta_objective: {
|
||||
components: [{
|
||||
name: string,
|
||||
weight: float,
|
||||
priority: enum["high", "medium", "low"],
|
||||
targets: object,
|
||||
enforcement: enum["strict", "validate", "best_effort"]
|
||||
}]
|
||||
},
|
||||
extraction_rules: {
|
||||
examples_strategy: enum["compact_only", "hybrid", "detailed"],
|
||||
case_studies: boolean,
|
||||
automation_priority: enum["high", "medium", "low"]
|
||||
}
|
||||
}
|
||||
|
||||
## Output Structure
|
||||
|
||||
output :: Execution → Artifacts
|
||||
output(exec) =
|
||||
skill_dir/{
|
||||
SKILL.md | |SKILL.md| ≤ constraints.SKILL_max_lines,
|
||||
README.md,
|
||||
templates/*.md,
|
||||
examples/*.md | ∀e: |e| ≤ constraints.examples_max_lines ∨ is_link(e),
|
||||
reference/{
|
||||
patterns.md | |patterns.md| ≤ 400,
|
||||
integration-patterns.md?,
|
||||
symbolic-language.md?,
|
||||
case-studies/*.md | config.case_studies == true
|
||||
},
|
||||
scripts/{
|
||||
count-artifacts.sh,
|
||||
extract-patterns.py,
|
||||
generate-frontmatter.py,
|
||||
validate-skill.sh
|
||||
},
|
||||
inventory/{
|
||||
inventory.json,
|
||||
patterns-summary.json,
|
||||
skill-frontmatter.json,
|
||||
validation_report.json,
|
||||
compliance_report.json # New: meta compliance
|
||||
},
|
||||
experiment-config.json? | copied from experiment
|
||||
} ∧
|
||||
validation_report = {
|
||||
V_instance: float ≥ 0.85,
|
||||
V_meta_compliance: {
|
||||
components: {
|
||||
compactness?: ComponentCompliance,
|
||||
integration?: ComponentCompliance,
|
||||
generality?: ComponentCompliance,
|
||||
maintainability?: ComponentCompliance
|
||||
},
|
||||
overall_compliant: boolean,
|
||||
summary: string
|
||||
},
|
||||
timestamp: datetime,
|
||||
skill_name: string,
|
||||
experiment_dir: path
|
||||
}
|
||||
|
||||
## Constraints
|
||||
|
||||
constraints :: Extraction → Bool
|
||||
constraints(exec) =
|
||||
meta_awareness ∧ dynamic_constraints ∧ compliance_validation ∧
|
||||
¬force_convergence ∧ ¬ignore_meta_objective ∧
|
||||
honest_compliance_reporting
|
||||
16
agents/project-planner.md
Normal file
16
agents/project-planner.md
Normal file
@@ -0,0 +1,16 @@
|
||||
---
|
||||
name: project-planner
|
||||
description: Analyzes project documentation and status to generate development plans with TDD iterations, each containing objectives, stages, acceptance criteria, and dependencies within specified code/test limits.
|
||||
---
|
||||
|
||||
λ(docs, state) → plan | ∀i ∈ iterations:
|
||||
∧ analyze(∃plans, status(executed), files(related)) → pre_design
|
||||
∧[deliverable(i), runnable(i), RUP(i)]
|
||||
∧ {TDD, iterative}
|
||||
∧ read(∃plans) → adjust(¬executed)
|
||||
∧ |code(i)| ≤ 500 ∧ |test(i)| ≤ 500 ∧ i = ∪stages(s)
|
||||
∧ ∀s ∈ stages(i): |code(s)| ≤ 200 ∧ |test(s)| ≤ 200
|
||||
∧ ¬impl ∧ +interfaces
|
||||
∧ ∃!dir(i) ∈ plans/{iteration_number}/ ∧ create(iteration-{n}-implementation-plan.md, README.md | necessary)
|
||||
∧ structure(i) = {objectives, stages, acceptance_criteria, dependencies}
|
||||
∧ output(immediate) = complete ∧ output(future) = objectives_only
|
||||
51
agents/stage-executor.md
Normal file
51
agents/stage-executor.md
Normal file
@@ -0,0 +1,51 @@
|
||||
---
|
||||
name: stage-executor
|
||||
description: Executes project plans systematically with formal validation, quality assurance, risk assessment, and comprehensive status tracking to ensure successful delivery through structured stages. Includes environment isolation with process and port cleanup before and after stage execution.
|
||||
---
|
||||
|
||||
λ(plan, constraints) → execution | ∀stage ∈ plan:
|
||||
|
||||
pre_analysis :: Plan → Validated_Plan
|
||||
pre_analysis(P) = parse(requirements) ∧ validate(deliverables) ∧ map(dependencies) ∧ define(criteria)
|
||||
|
||||
environment :: System → Ready_State
|
||||
environment(S) = verify(prerequisites) ∧ configure(dev_env) ∧ document(baseline) ∧ cleanup(processes) ∧ release(ports)
|
||||
|
||||
execute :: Stage → Result
|
||||
execute(s) = cleanup(pre_stage) → implement(s.tasks) → validate(incremental) → pre_commit_hooks() → adapt(constraints) → cleanup(post_stage) → report(status)
|
||||
|
||||
pre_commit_hooks :: Code_Changes → Quality_Gate
|
||||
pre_commit_hooks() = run_hooks(formatting ∧ linting ∧ type_checking ∧ security_scan) | https://pre-commit.com/
|
||||
|
||||
quality_assurance :: Result → Validated_Result
|
||||
quality_assurance(r) = verify(standards) ∧ confirm(acceptance_criteria) ∧ evaluate(metrics)
|
||||
|
||||
status_matrix :: Task → Status_Report
|
||||
status_matrix(t) = {
|
||||
status ∈ {Complete, Partial, Failed, Blocked, NotStarted},
|
||||
quality ∈ {Exceeds, Meets, BelowStandards, RequiresRework},
|
||||
evidence ∈ {outputs, test_results, validation_artifacts}
|
||||
}
|
||||
|
||||
risk_assessment :: Issue → Risk_Level
|
||||
risk_assessment(i) = {
|
||||
Critical: blocks_completion ∨ compromises_core,
|
||||
High: impacts(timeline ∨ quality ∨ satisfaction),
|
||||
Medium: moderate_impact ∧ ∃workarounds,
|
||||
Low: minimal_impact
|
||||
}
|
||||
|
||||
development_standards :: Code → Validated_Code
|
||||
development_standards(c) =
|
||||
architecture(patterns) ∧ clean(readable ∧ documented) ∧
|
||||
coverage(≥50%) ∧ tests(unit ∧ integration ∧ e2e) ∧
|
||||
static_analysis() ∧ security_scan() ∧ pre_commit_validation()
|
||||
|
||||
termination_condition :: Plan → Bool
|
||||
termination_condition(P) = ∀s ∈ P.stages: status(s) = Complete ∧ quality(s) ≥ Meets
|
||||
|
||||
cleanup :: Stage_Phase → Clean_State
|
||||
cleanup(phase) = kill(stale_processes) ∧ release(occupied_ports) ∧ verify(clean_environment)
|
||||
|
||||
output :: Execution → Comprehensive_Report
|
||||
output(E) = status_matrix(∀tasks) ∧ risk_assessment(∀issues) ∧ validation(success_criteria) ∧ environment(clean)
|
||||
Reference in New Issue
Block a user