Initial commit
This commit is contained in:
85
skills/pocketflow/assets/examples/01_chat.py
Normal file
85
skills/pocketflow/assets/examples/01_chat.py
Normal file
@@ -0,0 +1,85 @@
|
||||
"""
|
||||
PocketFlow Cookbook Example: Interactive Chat Bot
|
||||
|
||||
Difficulty: ☆☆☆ Dummy Level
|
||||
Source: https://github.com/The-Pocket/PocketFlow/tree/main/cookbook/pocketflow-chat
|
||||
|
||||
Description:
|
||||
A basic chat bot with conversation history. Demonstrates:
|
||||
- Self-looping nodes for continuous interaction
|
||||
- Message history management
|
||||
- User input handling
|
||||
- Graceful exit conditions
|
||||
"""
|
||||
|
||||
from pocketflow import Node, Flow
|
||||
# from utils import call_llm # You need to implement this
|
||||
|
||||
|
||||
class ChatNode(Node):
|
||||
"""Interactive chat node that maintains conversation history"""
|
||||
|
||||
def prep(self, shared):
|
||||
"""Get user input and maintain message history"""
|
||||
# Initialize messages if this is the first run
|
||||
if "messages" not in shared:
|
||||
shared["messages"] = []
|
||||
print("Welcome to the chat! Type 'exit' to end the conversation.")
|
||||
|
||||
# Get user input
|
||||
user_input = input("\nYou: ")
|
||||
|
||||
# Check if user wants to exit
|
||||
if user_input.lower() == 'exit':
|
||||
return None
|
||||
|
||||
# Add user message to history
|
||||
shared["messages"].append({"role": "user", "content": user_input})
|
||||
|
||||
# Return all messages for the LLM
|
||||
return shared["messages"]
|
||||
|
||||
def exec(self, messages):
|
||||
"""Call LLM with conversation history"""
|
||||
if messages is None:
|
||||
return None
|
||||
|
||||
# Call LLM with the entire conversation history
|
||||
# response = call_llm(messages)
|
||||
response = "This is a placeholder response. Implement call_llm()."
|
||||
return response
|
||||
|
||||
def post(self, shared, prep_res, exec_res):
|
||||
"""Display response and continue or end conversation"""
|
||||
if prep_res is None or exec_res is None:
|
||||
print("\nGoodbye!")
|
||||
return None # End the conversation
|
||||
|
||||
# Print the assistant's response
|
||||
print(f"\nAssistant: {exec_res}")
|
||||
|
||||
# Add assistant message to history
|
||||
shared["messages"].append({"role": "assistant", "content": exec_res})
|
||||
|
||||
# Loop back to continue the conversation
|
||||
return "continue"
|
||||
|
||||
|
||||
# Build the flow with self-loop
|
||||
def create_chat_flow():
|
||||
"""Create a chat flow that loops back to itself"""
|
||||
chat_node = ChatNode()
|
||||
chat_node - "continue" >> chat_node # Loop back to continue conversation
|
||||
|
||||
flow = Flow(start=chat_node)
|
||||
return flow
|
||||
|
||||
|
||||
# Example usage
|
||||
if __name__ == "__main__":
|
||||
shared = {}
|
||||
flow = create_chat_flow()
|
||||
flow.run(shared)
|
||||
|
||||
# Conversation history is preserved in shared["messages"]
|
||||
print(f"\n\nTotal messages: {len(shared.get('messages', []))}")
|
||||
120
skills/pocketflow/assets/examples/02_workflow.py
Normal file
120
skills/pocketflow/assets/examples/02_workflow.py
Normal file
@@ -0,0 +1,120 @@
|
||||
"""
|
||||
PocketFlow Cookbook Example: Article Writing Workflow
|
||||
|
||||
Difficulty: ☆☆☆ Dummy Level
|
||||
Source: https://github.com/The-Pocket/PocketFlow/tree/main/cookbook/pocketflow-workflow
|
||||
|
||||
Description:
|
||||
A writing workflow that outlines, writes content, and applies styling.
|
||||
Demonstrates:
|
||||
- Sequential multi-step workflow
|
||||
- Progressive content generation
|
||||
- Task decomposition pattern
|
||||
"""
|
||||
|
||||
from pocketflow import Node, Flow
|
||||
# from utils import call_llm # You need to implement this
|
||||
|
||||
|
||||
class GenerateOutlineNode(Node):
|
||||
"""Generate article outline from topic"""
|
||||
|
||||
def prep(self, shared):
|
||||
return shared["topic"]
|
||||
|
||||
def exec(self, topic):
|
||||
"""Create outline with LLM"""
|
||||
prompt = f"Create a detailed outline for an article about: {topic}"
|
||||
# outline = call_llm(prompt)
|
||||
outline = f"Outline for {topic}:\n1. Introduction\n2. Main Points\n3. Conclusion"
|
||||
print(f"\n📋 Outline Generated ({len(outline)} chars)")
|
||||
return outline
|
||||
|
||||
def post(self, shared, prep_res, exec_res):
|
||||
shared["outline"] = exec_res
|
||||
return "default"
|
||||
|
||||
|
||||
class WriteDraftNode(Node):
|
||||
"""Write article draft from outline"""
|
||||
|
||||
def prep(self, shared):
|
||||
return shared["outline"]
|
||||
|
||||
def exec(self, outline):
|
||||
"""Generate content based on outline"""
|
||||
prompt = f"Write content based on this outline:\n{outline}"
|
||||
# draft = call_llm(prompt)
|
||||
draft = f"Draft article based on outline:\n\n{outline}\n\n[Article content here...]"
|
||||
print(f"\n✍️ Draft Written ({len(draft)} chars)")
|
||||
return draft
|
||||
|
||||
def post(self, shared, prep_res, exec_res):
|
||||
shared["draft"] = exec_res
|
||||
return "default"
|
||||
|
||||
|
||||
class RefineArticleNode(Node):
|
||||
"""Polish and refine the draft"""
|
||||
|
||||
def prep(self, shared):
|
||||
return shared["draft"]
|
||||
|
||||
def exec(self, draft):
|
||||
"""Improve draft quality"""
|
||||
prompt = f"Review and improve this draft:\n{draft}"
|
||||
# final = call_llm(prompt)
|
||||
final = f"Refined version:\n\n{draft}\n\n[Enhanced with better flow and clarity]"
|
||||
print(f"\n✨ Article Refined ({len(final)} chars)")
|
||||
return final
|
||||
|
||||
def post(self, shared, prep_res, exec_res):
|
||||
shared["final_article"] = exec_res
|
||||
print("\n✅ Article Complete!")
|
||||
return "default"
|
||||
|
||||
|
||||
# Build the workflow
|
||||
def create_article_flow():
|
||||
"""Create sequential article writing workflow"""
|
||||
outline = GenerateOutlineNode()
|
||||
draft = WriteDraftNode()
|
||||
refine = RefineArticleNode()
|
||||
|
||||
# Sequential pipeline
|
||||
outline >> draft >> refine
|
||||
|
||||
flow = Flow(start=outline)
|
||||
return flow
|
||||
|
||||
|
||||
# Example usage
|
||||
def run_flow(topic="AI Safety"):
|
||||
"""Run the article writing workflow"""
|
||||
shared = {"topic": topic}
|
||||
|
||||
print(f"\n=== Starting Article Workflow: {topic} ===\n")
|
||||
|
||||
flow = create_article_flow()
|
||||
flow.run(shared)
|
||||
|
||||
# Output summary
|
||||
print("\n=== Workflow Statistics ===")
|
||||
print(f"Topic: {shared['topic']}")
|
||||
print(f"Outline: {len(shared['outline'])} characters")
|
||||
print(f"Draft: {len(shared['draft'])} characters")
|
||||
print(f"Final: {len(shared['final_article'])} characters")
|
||||
|
||||
return shared
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import sys
|
||||
|
||||
# Get topic from command line or use default
|
||||
topic = " ".join(sys.argv[1:]) if len(sys.argv) > 1 else "AI Safety"
|
||||
result = run_flow(topic)
|
||||
|
||||
# Print final article
|
||||
print("\n=== Final Article ===")
|
||||
print(result["final_article"])
|
||||
165
skills/pocketflow/assets/examples/03_agent.py
Normal file
165
skills/pocketflow/assets/examples/03_agent.py
Normal file
@@ -0,0 +1,165 @@
|
||||
"""
|
||||
PocketFlow Cookbook Example: Research Agent
|
||||
|
||||
Difficulty: ☆☆☆ Dummy Level
|
||||
Source: https://github.com/The-Pocket/PocketFlow/tree/main/cookbook/pocketflow-agent
|
||||
|
||||
Description:
|
||||
A research agent that can search the web and answer questions.
|
||||
Demonstrates:
|
||||
- Agent pattern with dynamic action selection
|
||||
- Branching based on decisions
|
||||
- Loop-back for iterative research
|
||||
- Tool usage (web search)
|
||||
"""
|
||||
|
||||
from pocketflow import Node, Flow
|
||||
# from utils import call_llm, search_web # You need to implement these
|
||||
|
||||
|
||||
class DecideActionNode(Node):
|
||||
"""Agent decides whether to search or answer"""
|
||||
|
||||
def prep(self, shared):
|
||||
return {
|
||||
"question": shared["question"],
|
||||
"context": shared.get("context", "No information gathered yet")
|
||||
}
|
||||
|
||||
def exec(self, inputs):
|
||||
"""Decide next action using LLM"""
|
||||
question = inputs["question"]
|
||||
context = inputs["context"]
|
||||
|
||||
prompt = f"""
|
||||
Given:
|
||||
Question: {question}
|
||||
Current Context: {context}
|
||||
|
||||
Should I:
|
||||
1. Search web for more information
|
||||
2. Answer with current knowledge
|
||||
|
||||
Output in format:
|
||||
Action: search/answer
|
||||
Reasoning: [why]
|
||||
Search Query: [if action is search]
|
||||
"""
|
||||
# response = call_llm(prompt)
|
||||
# Parse response to get action
|
||||
|
||||
# Placeholder logic
|
||||
if not context or "No information" in context:
|
||||
action = "search"
|
||||
search_query = question
|
||||
else:
|
||||
action = "answer"
|
||||
search_query = None
|
||||
|
||||
print(f"\n🤔 Agent decided: {action}")
|
||||
|
||||
return {
|
||||
"action": action,
|
||||
"search_query": search_query
|
||||
}
|
||||
|
||||
def post(self, shared, prep_res, exec_res):
|
||||
shared["decision"] = exec_res
|
||||
# Branch based on action
|
||||
return exec_res["action"]
|
||||
|
||||
|
||||
class SearchWebNode(Node):
|
||||
"""Search the web for information"""
|
||||
|
||||
def prep(self, shared):
|
||||
return shared["decision"]["search_query"]
|
||||
|
||||
def exec(self, query):
|
||||
"""Perform web search"""
|
||||
print(f"\n🔍 Searching: {query}")
|
||||
# results = search_web(query)
|
||||
results = f"Search results for '{query}':\n- Result 1\n- Result 2\n- Result 3"
|
||||
return results
|
||||
|
||||
def post(self, shared, prep_res, exec_res):
|
||||
# Add to context
|
||||
current_context = shared.get("context", "")
|
||||
shared["context"] = current_context + "\n\n" + exec_res
|
||||
print(f"\n📚 Context updated ({len(shared['context'])} chars)")
|
||||
# Loop back to decide again
|
||||
return "continue"
|
||||
|
||||
|
||||
class AnswerNode(Node):
|
||||
"""Generate final answer"""
|
||||
|
||||
def prep(self, shared):
|
||||
return {
|
||||
"question": shared["question"],
|
||||
"context": shared.get("context", "")
|
||||
}
|
||||
|
||||
def exec(self, inputs):
|
||||
"""Generate answer from context"""
|
||||
prompt = f"""
|
||||
Context: {inputs['context']}
|
||||
|
||||
Question: {inputs['question']}
|
||||
|
||||
Provide a comprehensive answer:
|
||||
"""
|
||||
# answer = call_llm(prompt)
|
||||
answer = f"Based on the research, here's the answer to '{inputs['question']}':\n\n[Answer based on context]"
|
||||
return answer
|
||||
|
||||
def post(self, shared, prep_res, exec_res):
|
||||
shared["final_answer"] = exec_res
|
||||
print(f"\n✅ Answer generated")
|
||||
return "done"
|
||||
|
||||
|
||||
# Build the agent flow
|
||||
def create_agent_flow():
|
||||
"""Create research agent with branching and looping"""
|
||||
decide = DecideActionNode()
|
||||
search = SearchWebNode()
|
||||
answer = AnswerNode()
|
||||
|
||||
# Branching: decide can lead to search or answer
|
||||
decide - "search" >> search
|
||||
decide - "answer" >> answer
|
||||
|
||||
# Loop: search leads back to decide
|
||||
search - "continue" >> decide
|
||||
|
||||
flow = Flow(start=decide)
|
||||
return flow
|
||||
|
||||
|
||||
# Example usage
|
||||
def main():
|
||||
"""Run the research agent"""
|
||||
# Default question
|
||||
question = "Who won the Nobel Prize in Physics 2024?"
|
||||
|
||||
# Get question from command line if provided
|
||||
import sys
|
||||
if len(sys.argv) > 1:
|
||||
question = " ".join(sys.argv[1:])
|
||||
|
||||
shared = {"question": question}
|
||||
|
||||
print(f"\n🤔 Processing question: {question}")
|
||||
print("="*50)
|
||||
|
||||
flow = create_agent_flow()
|
||||
flow.run(shared)
|
||||
|
||||
print("\n" + "="*50)
|
||||
print("\n🎯 Final Answer:")
|
||||
print(shared.get("final_answer", "No answer found"))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
226
skills/pocketflow/assets/examples/04_rag.py
Normal file
226
skills/pocketflow/assets/examples/04_rag.py
Normal file
@@ -0,0 +1,226 @@
|
||||
"""
|
||||
PocketFlow Cookbook Example: RAG (Retrieval Augmented Generation)
|
||||
|
||||
Difficulty: ☆☆☆ Dummy Level
|
||||
Source: https://github.com/The-Pocket/PocketFlow/tree/main/cookbook/pocketflow-rag
|
||||
|
||||
Description:
|
||||
A simple RAG system with offline indexing and online querying.
|
||||
Demonstrates:
|
||||
- Two-stage RAG pipeline (offline + online)
|
||||
- Document embedding and indexing
|
||||
- Similarity search
|
||||
- Context-based answer generation
|
||||
"""
|
||||
|
||||
from pocketflow import Node, Flow
|
||||
# from utils import call_llm, get_embedding, build_index, search_index
|
||||
import sys
|
||||
|
||||
|
||||
# ============================================================
|
||||
# OFFLINE FLOW: Index Documents
|
||||
# ============================================================
|
||||
|
||||
class EmbedDocumentsNode(Node):
|
||||
"""Embed all documents for indexing"""
|
||||
|
||||
def prep(self, shared):
|
||||
return shared["texts"]
|
||||
|
||||
def exec(self, texts):
|
||||
"""Generate embeddings for all texts"""
|
||||
print(f"\n📊 Embedding {len(texts)} documents...")
|
||||
# embeddings = [get_embedding(text) for text in texts]
|
||||
embeddings = [[0.1] * 128 for _ in texts] # Placeholder
|
||||
return embeddings
|
||||
|
||||
def post(self, shared, prep_res, exec_res):
|
||||
shared["embeddings"] = exec_res
|
||||
print(f"✅ Embedded {len(exec_res)} documents")
|
||||
return "default"
|
||||
|
||||
|
||||
class BuildIndexNode(Node):
|
||||
"""Build search index from embeddings"""
|
||||
|
||||
def prep(self, shared):
|
||||
return shared["embeddings"]
|
||||
|
||||
def exec(self, embeddings):
|
||||
"""Create vector index"""
|
||||
print(f"\n🔨 Building index...")
|
||||
# index = build_faiss_index(embeddings)
|
||||
index = "placeholder_index" # Placeholder
|
||||
return index
|
||||
|
||||
def post(self, shared, prep_res, exec_res):
|
||||
shared["index"] = exec_res
|
||||
print("✅ Index built")
|
||||
return "default"
|
||||
|
||||
|
||||
# Build offline flow
|
||||
embed_docs = EmbedDocumentsNode()
|
||||
build_index = BuildIndexNode()
|
||||
embed_docs >> build_index
|
||||
offline_flow = Flow(start=embed_docs)
|
||||
|
||||
|
||||
# ============================================================
|
||||
# ONLINE FLOW: Query and Answer
|
||||
# ============================================================
|
||||
|
||||
class EmbedQueryNode(Node):
|
||||
"""Embed the user query"""
|
||||
|
||||
def prep(self, shared):
|
||||
return shared["query"]
|
||||
|
||||
def exec(self, query):
|
||||
"""Generate query embedding"""
|
||||
print(f"\n🔍 Processing query: {query}")
|
||||
# query_embedding = get_embedding(query)
|
||||
query_embedding = [0.1] * 128 # Placeholder
|
||||
return query_embedding
|
||||
|
||||
def post(self, shared, prep_res, exec_res):
|
||||
shared["query_embedding"] = exec_res
|
||||
return "default"
|
||||
|
||||
|
||||
class RetrieveDocumentNode(Node):
|
||||
"""Search index and retrieve most relevant document"""
|
||||
|
||||
def prep(self, shared):
|
||||
return {
|
||||
"query_embedding": shared["query_embedding"],
|
||||
"index": shared["index"],
|
||||
"texts": shared["texts"]
|
||||
}
|
||||
|
||||
def exec(self, inputs):
|
||||
"""Find most similar document"""
|
||||
print(f"\n📚 Searching index...")
|
||||
# I, D = search_index(inputs["index"], inputs["query_embedding"], top_k=1)
|
||||
# best_doc = inputs["texts"][I[0][0]]
|
||||
|
||||
# Placeholder: return first document
|
||||
best_doc = inputs["texts"][0]
|
||||
|
||||
print(f"✅ Retrieved document ({len(best_doc)} chars)")
|
||||
return best_doc
|
||||
|
||||
def post(self, shared, prep_res, exec_res):
|
||||
shared["retrieved_document"] = exec_res
|
||||
return "default"
|
||||
|
||||
|
||||
class GenerateAnswerNode(Node):
|
||||
"""Generate answer using retrieved context"""
|
||||
|
||||
def prep(self, shared):
|
||||
return {
|
||||
"query": shared["query"],
|
||||
"context": shared["retrieved_document"]
|
||||
}
|
||||
|
||||
def exec(self, inputs):
|
||||
"""Generate answer with context"""
|
||||
print(f"\n✍️ Generating answer...")
|
||||
|
||||
prompt = f"""
|
||||
Context: {inputs['context']}
|
||||
|
||||
Question: {inputs['query']}
|
||||
|
||||
Answer the question using only the information from the context:
|
||||
"""
|
||||
# answer = call_llm(prompt)
|
||||
answer = f"Based on the context, the answer is: [Answer would be generated here]"
|
||||
return answer
|
||||
|
||||
def post(self, shared, prep_res, exec_res):
|
||||
shared["generated_answer"] = exec_res
|
||||
print(f"✅ Answer generated")
|
||||
return "default"
|
||||
|
||||
|
||||
# Build online flow
|
||||
embed_query = EmbedQueryNode()
|
||||
retrieve = RetrieveDocumentNode()
|
||||
generate = GenerateAnswerNode()
|
||||
embed_query >> retrieve >> generate
|
||||
online_flow = Flow(start=embed_query)
|
||||
|
||||
|
||||
# ============================================================
|
||||
# Main Demo
|
||||
# ============================================================
|
||||
|
||||
def run_rag_demo():
|
||||
"""Run complete RAG demonstration"""
|
||||
|
||||
# Sample documents
|
||||
texts = [
|
||||
"""Pocket Flow is a 100-line minimalist LLM framework.
|
||||
Lightweight: Just 100 lines. Zero bloat, zero dependencies, zero vendor lock-in.
|
||||
Expressive: Everything you love—(Multi-)Agents, Workflow, RAG, and more.
|
||||
Agentic Coding: Let AI Agents (e.g., Cursor AI) build Agents—10x productivity boost!
|
||||
To install, pip install pocketflow or just copy the source code (only 100 lines).""",
|
||||
|
||||
"""NeurAlign M7 is a revolutionary non-invasive neural alignment device.
|
||||
Targeted magnetic resonance technology increases neuroplasticity in specific brain regions.
|
||||
Clinical trials showed 72% improvement in PTSD treatment outcomes.
|
||||
Developed by Cortex Medical in 2024 as an adjunct to standard cognitive therapy.
|
||||
Portable design allows for in-home use with remote practitioner monitoring.""",
|
||||
|
||||
"""Q-Mesh is QuantumLeap Technologies' instantaneous data synchronization protocol.
|
||||
Utilizes directed acyclic graph consensus for 500,000 transactions per second.
|
||||
Consumes 95% less energy than traditional blockchain systems.
|
||||
Adopted by three central banks for secure financial data transfer.
|
||||
Released in February 2024 after five years of development in stealth mode.""",
|
||||
]
|
||||
|
||||
# Get query from command line or use default
|
||||
default_query = "How to install PocketFlow?"
|
||||
query = default_query
|
||||
|
||||
for arg in sys.argv[1:]:
|
||||
if arg.startswith("--"):
|
||||
query = arg[2:]
|
||||
break
|
||||
|
||||
print("=" * 60)
|
||||
print("PocketFlow RAG Demo")
|
||||
print("=" * 60)
|
||||
|
||||
# Single shared store for both flows
|
||||
shared = {
|
||||
"texts": texts,
|
||||
"query": query
|
||||
}
|
||||
|
||||
# Stage 1: Index documents (offline)
|
||||
print("\n📥 STAGE 1: Indexing Documents")
|
||||
print("-" * 60)
|
||||
offline_flow.run(shared)
|
||||
|
||||
# Stage 2: Query and answer (online)
|
||||
print("\n🔍 STAGE 2: Query and Answer")
|
||||
print("-" * 60)
|
||||
online_flow.run(shared)
|
||||
|
||||
# Display results
|
||||
print("\n" + "=" * 60)
|
||||
print("✅ RAG Complete")
|
||||
print("=" * 60)
|
||||
print(f"\nQuery: {shared['query']}")
|
||||
print(f"\nRetrieved Context Preview:")
|
||||
print(shared["retrieved_document"][:150] + "...")
|
||||
print(f"\nGenerated Answer:")
|
||||
print(shared["generated_answer"])
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
run_rag_demo()
|
||||
175
skills/pocketflow/assets/examples/05_structured_output.py
Normal file
175
skills/pocketflow/assets/examples/05_structured_output.py
Normal file
@@ -0,0 +1,175 @@
|
||||
"""
|
||||
PocketFlow Cookbook Example: Structured Output (Resume Parser)
|
||||
|
||||
Difficulty: ☆☆☆ Dummy Level
|
||||
Source: https://github.com/The-Pocket/PocketFlow/tree/main/cookbook/pocketflow-structured-output
|
||||
|
||||
Description:
|
||||
Extract structured data from resumes using YAML prompting.
|
||||
Demonstrates:
|
||||
- Structured LLM output with YAML
|
||||
- Schema validation with assertions
|
||||
- Retry logic for parsing errors
|
||||
- Index-based skill matching
|
||||
"""
|
||||
|
||||
import yaml
|
||||
from pocketflow import Node, Flow
|
||||
# from utils import call_llm # You need to implement this
|
||||
|
||||
|
||||
class ResumeParserNode(Node):
|
||||
"""Parse resume text into structured YAML format"""
|
||||
|
||||
def prep(self, shared):
|
||||
return {
|
||||
"resume_text": shared["resume_text"],
|
||||
"target_skills": shared.get("target_skills", [])
|
||||
}
|
||||
|
||||
def exec(self, prep_res):
|
||||
"""Extract structured data from resume"""
|
||||
resume_text = prep_res["resume_text"]
|
||||
target_skills = prep_res["target_skills"]
|
||||
|
||||
# Create skill list with indexes for prompt
|
||||
skill_list_for_prompt = "\n".join(
|
||||
[f"{i}: {skill}" for i, skill in enumerate(target_skills)]
|
||||
)
|
||||
|
||||
prompt = f"""
|
||||
Analyze the resume below. Output ONLY the requested information in YAML format.
|
||||
|
||||
**Resume:**
|
||||
```
|
||||
{resume_text}
|
||||
```
|
||||
|
||||
**Target Skills (use these indexes):**
|
||||
```
|
||||
{skill_list_for_prompt}
|
||||
```
|
||||
|
||||
**YAML Output Requirements:**
|
||||
- Extract `name` (string)
|
||||
- Extract `email` (string)
|
||||
- Extract `experience` (list of objects with `title` and `company`)
|
||||
- Extract `skill_indexes` (list of integers found from the Target Skills list)
|
||||
- **Add a YAML comment (`#`) explaining the source BEFORE each field**
|
||||
|
||||
Generate the YAML output now:
|
||||
"""
|
||||
|
||||
# Get LLM response
|
||||
# response = call_llm(prompt)
|
||||
|
||||
# Placeholder response
|
||||
response = """
|
||||
```yaml
|
||||
# Extracted from header
|
||||
name: John Smith
|
||||
|
||||
# Found in contact section
|
||||
email: john.smith@email.com
|
||||
|
||||
# Work history section
|
||||
experience:
|
||||
- title: Senior Developer
|
||||
company: Tech Corp
|
||||
- title: Software Engineer
|
||||
company: StartupXYZ
|
||||
|
||||
# Skills matching target list
|
||||
skill_indexes: [0, 2, 5] # Team leadership, Project management, Python
|
||||
```
|
||||
"""
|
||||
|
||||
# Parse YAML from response
|
||||
yaml_str = response.split("```yaml")[1].split("```")[0].strip()
|
||||
structured_result = yaml.safe_load(yaml_str)
|
||||
|
||||
# Validate structure
|
||||
assert structured_result is not None, "Parsed YAML is None"
|
||||
assert "name" in structured_result, "Missing 'name'"
|
||||
assert "email" in structured_result, "Missing 'email'"
|
||||
assert "experience" in structured_result, "Missing 'experience'"
|
||||
assert isinstance(structured_result.get("experience"), list), "'experience' is not a list"
|
||||
assert "skill_indexes" in structured_result, "Missing 'skill_indexes'"
|
||||
|
||||
return structured_result
|
||||
|
||||
def post(self, shared, prep_res, exec_res):
|
||||
"""Store and display structured data"""
|
||||
shared["structured_data"] = exec_res
|
||||
|
||||
print("\n=== STRUCTURED RESUME DATA ===\n")
|
||||
print(yaml.dump(exec_res, sort_keys=False, allow_unicode=True,
|
||||
default_flow_style=None))
|
||||
print("\n✅ Extracted resume information.\n")
|
||||
|
||||
return "default"
|
||||
|
||||
|
||||
# Example usage
|
||||
def run_parser():
|
||||
"""Run resume parser demo"""
|
||||
|
||||
# Sample resume text
|
||||
sample_resume = """
|
||||
JOHN SMITH
|
||||
Email: john.smith@email.com | Phone: (555) 123-4567
|
||||
|
||||
EXPERIENCE
|
||||
Senior Developer - Tech Corp (2020-Present)
|
||||
- Led team of 5 developers
|
||||
- Built scalable Python applications
|
||||
- Managed multiple projects simultaneously
|
||||
|
||||
Software Engineer - StartupXYZ (2018-2020)
|
||||
- Developed web applications
|
||||
- Collaborated with cross-functional teams
|
||||
- Presented technical solutions to stakeholders
|
||||
|
||||
SKILLS
|
||||
- Team Leadership & Management
|
||||
- Python, JavaScript, SQL
|
||||
- Project Management
|
||||
- Public Speaking
|
||||
- CRM Software
|
||||
- Data Analysis
|
||||
"""
|
||||
|
||||
# Target skills to match
|
||||
target_skills = [
|
||||
"Team leadership & management",
|
||||
"CRM software",
|
||||
"Project management",
|
||||
"Public speaking",
|
||||
"Microsoft Office",
|
||||
"Python",
|
||||
"Data Analysis"
|
||||
]
|
||||
|
||||
# Prepare shared store
|
||||
shared = {
|
||||
"resume_text": sample_resume,
|
||||
"target_skills": target_skills
|
||||
}
|
||||
|
||||
# Create and run flow
|
||||
parser_node = ResumeParserNode(max_retries=3, wait=10)
|
||||
flow = Flow(start=parser_node)
|
||||
flow.run(shared)
|
||||
|
||||
# Display matched skills
|
||||
if "structured_data" in shared:
|
||||
found_indexes = shared["structured_data"].get("skill_indexes", [])
|
||||
if found_indexes:
|
||||
print("\n--- Matched Target Skills ---")
|
||||
for index in found_indexes:
|
||||
if 0 <= index < len(target_skills):
|
||||
print(f"✓ {target_skills[index]} (Index: {index})")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
run_parser()
|
||||
153
skills/pocketflow/assets/examples/06_multi_agent.py
Normal file
153
skills/pocketflow/assets/examples/06_multi_agent.py
Normal file
@@ -0,0 +1,153 @@
|
||||
"""
|
||||
PocketFlow Cookbook Example: Multi-Agent (Taboo Game)
|
||||
|
||||
Difficulty: ★☆☆ Beginner Level
|
||||
Source: https://github.com/The-Pocket/PocketFlow/tree/main/cookbook/pocketflow-multi-agent
|
||||
|
||||
Description:
|
||||
Two agents playing Taboo word game with async communication.
|
||||
Demonstrates:
|
||||
- Multi-agent systems
|
||||
- Async message queues for inter-agent communication
|
||||
- AsyncNode and AsyncFlow
|
||||
- Self-looping async nodes
|
||||
- Game logic with termination conditions
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
from pocketflow import AsyncNode, AsyncFlow
|
||||
# from utils import call_llm # You need to implement this
|
||||
|
||||
|
||||
class AsyncHinter(AsyncNode):
|
||||
"""Agent that provides hints for the target word"""
|
||||
|
||||
async def prep_async(self, shared):
|
||||
"""Wait for guess from guesser"""
|
||||
guess = await shared["hinter_queue"].get()
|
||||
|
||||
if guess == "GAME_OVER":
|
||||
return None
|
||||
|
||||
return (
|
||||
shared["target_word"],
|
||||
shared["forbidden_words"],
|
||||
shared.get("past_guesses", [])
|
||||
)
|
||||
|
||||
async def exec_async(self, inputs):
|
||||
"""Generate hint avoiding forbidden words"""
|
||||
if inputs is None:
|
||||
return None
|
||||
|
||||
target, forbidden, past_guesses = inputs
|
||||
|
||||
prompt = f"Generate hint for '{target}'\nForbidden words: {forbidden}"
|
||||
if past_guesses:
|
||||
prompt += f"\nPrevious wrong guesses: {past_guesses}\nMake hint more specific."
|
||||
prompt += "\nUse at most 5 words."
|
||||
|
||||
# hint = call_llm(prompt)
|
||||
hint = "Thinking of childhood summer days" # Placeholder
|
||||
|
||||
print(f"\nHinter: Here's your hint - {hint}")
|
||||
return hint
|
||||
|
||||
async def post_async(self, shared, prep_res, exec_res):
|
||||
"""Send hint to guesser"""
|
||||
if exec_res is None:
|
||||
return "end"
|
||||
|
||||
# Send hint to guesser's queue
|
||||
await shared["guesser_queue"].put(exec_res)
|
||||
return "continue"
|
||||
|
||||
|
||||
class AsyncGuesser(AsyncNode):
|
||||
"""Agent that guesses the target word from hints"""
|
||||
|
||||
async def prep_async(self, shared):
|
||||
"""Wait for hint from hinter"""
|
||||
hint = await shared["guesser_queue"].get()
|
||||
return hint, shared.get("past_guesses", [])
|
||||
|
||||
async def exec_async(self, inputs):
|
||||
"""Make a guess based on hint"""
|
||||
hint, past_guesses = inputs
|
||||
|
||||
prompt = f"""
|
||||
Given hint: {hint}
|
||||
Past wrong guesses: {past_guesses}
|
||||
Make a new guess. Reply with a single word:
|
||||
"""
|
||||
# guess = call_llm(prompt)
|
||||
guess = "memories" # Placeholder
|
||||
|
||||
print(f"Guesser: I guess it's - {guess}")
|
||||
return guess
|
||||
|
||||
async def post_async(self, shared, prep_res, exec_res):
|
||||
"""Check guess and update game state"""
|
||||
# Check if correct
|
||||
if exec_res.lower() == shared["target_word"].lower():
|
||||
print("\n✅ Game Over - Correct guess!")
|
||||
await shared["hinter_queue"].put("GAME_OVER")
|
||||
return "end"
|
||||
|
||||
# Store wrong guess
|
||||
if "past_guesses" not in shared:
|
||||
shared["past_guesses"] = []
|
||||
shared["past_guesses"].append(exec_res)
|
||||
|
||||
# Send guess to hinter
|
||||
await shared["hinter_queue"].put(exec_res)
|
||||
return "continue"
|
||||
|
||||
|
||||
async def main():
|
||||
"""Run the Taboo game"""
|
||||
|
||||
# Game setup
|
||||
shared = {
|
||||
"target_word": "nostalgia",
|
||||
"forbidden_words": ["memory", "past", "remember", "feeling", "longing"],
|
||||
"hinter_queue": asyncio.Queue(),
|
||||
"guesser_queue": asyncio.Queue()
|
||||
}
|
||||
|
||||
print("\n" + "="*50)
|
||||
print("🎮 Taboo Game Starting!")
|
||||
print("="*50)
|
||||
print(f"Target word: {shared['target_word']}")
|
||||
print(f"Forbidden words: {shared['forbidden_words']}")
|
||||
print("="*50 + "\n")
|
||||
|
||||
# Initialize game with empty guess
|
||||
await shared["hinter_queue"].put("")
|
||||
|
||||
# Create agents
|
||||
hinter = AsyncHinter()
|
||||
guesser = AsyncGuesser()
|
||||
|
||||
# Setup self-loops
|
||||
hinter - "continue" >> hinter
|
||||
guesser - "continue" >> guesser
|
||||
|
||||
# Create flows
|
||||
hinter_flow = AsyncFlow(start=hinter)
|
||||
guesser_flow = AsyncFlow(start=guesser)
|
||||
|
||||
# Run both agents concurrently
|
||||
await asyncio.gather(
|
||||
hinter_flow.run_async(shared),
|
||||
guesser_flow.run_async(shared)
|
||||
)
|
||||
|
||||
print("\n" + "="*50)
|
||||
print("🏁 Game Complete!")
|
||||
print(f"Total guesses: {len(shared.get('past_guesses', []))}")
|
||||
print("="*50 + "\n")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
Reference in New Issue
Block a user