Initial commit

This commit is contained in:
Zhongwei Li
2025-11-29 18:28:34 +08:00
commit 390afca02b
220 changed files with 86013 additions and 0 deletions

View File

@@ -0,0 +1,534 @@
# LangChain4j AI Services - Practical Examples
This document provides practical, production-ready examples for LangChain4j AI Services patterns.
## 1. Basic Chat Interface
**Scenario**: Simple conversational interface without memory.
```java
import dev.langchain4j.service.AiServices;
import dev.langchain4j.service.UserMessage;
import dev.langchain4j.model.openai.OpenAiChatModel;
interface SimpleChat {
String chat(String userMessage);
}
public class BasicChatExample {
public static void main(String[] args) {
var chatModel = OpenAiChatModel.builder()
.apiKey(System.getenv("OPENAI_API_KEY"))
.modelName("gpt-4o-mini")
.temperature(0.7)
.build();
var chat = AiServices.builder(SimpleChat.class)
.chatModel(chatModel)
.build();
String response = chat.chat("What is Spring Boot?");
System.out.println(response);
}
}
```
## 2. Stateful Assistant with Memory
**Scenario**: Multi-turn conversation with 10-message history.
```java
import dev.langchain4j.service.AiServices;
import dev.langchain4j.memory.chat.MessageWindowChatMemory;
import dev.langchain4j.model.openai.OpenAiChatModel;
interface ConversationalAssistant {
String chat(String userMessage);
}
public class StatefulAssistantExample {
public static void main(String[] args) {
var chatModel = OpenAiChatModel.builder()
.apiKey(System.getenv("OPENAI_API_KEY"))
.modelName("gpt-4o-mini")
.build();
var assistant = AiServices.builder(ConversationalAssistant.class)
.chatModel(chatModel)
.chatMemory(MessageWindowChatMemory.withMaxMessages(10))
.build();
// Multi-turn conversation
System.out.println(assistant.chat("My name is Alice"));
System.out.println(assistant.chat("What is my name?")); // Remembers: "Your name is Alice"
System.out.println(assistant.chat("What year was Spring Boot released?")); // Answers: "2014"
System.out.println(assistant.chat("Tell me more about it")); // Context aware
}
}
```
## 3. Multi-User Memory with @MemoryId
**Scenario**: Separate conversation history per user.
```java
import dev.langchain4j.service.AiServices;
import dev.langchain4j.service.MemoryId;
import dev.langchain4j.memory.chat.MessageWindowChatMemory;
import dev.langchain4j.model.openai.OpenAiChatModel;
interface MultiUserAssistant {
String chat(@MemoryId int userId, String userMessage);
}
public class MultiUserMemoryExample {
public static void main(String[] args) {
var chatModel = OpenAiChatModel.builder()
.apiKey(System.getenv("OPENAI_API_KEY"))
.modelName("gpt-4o-mini")
.build();
var assistant = AiServices.builder(MultiUserAssistant.class)
.chatModel(chatModel)
.chatMemoryProvider(memoryId -> MessageWindowChatMemory.withMaxMessages(20))
.build();
// User 1 conversation
System.out.println(assistant.chat(1, "I like Java"));
System.out.println(assistant.chat(1, "What language do I prefer?")); // Java
// User 2 conversation - separate memory
System.out.println(assistant.chat(2, "I prefer Python"));
System.out.println(assistant.chat(2, "What language do I prefer?")); // Python
// User 1 - still remembers Java
System.out.println(assistant.chat(1, "What about me?")); // Java
}
}
```
## 4. System Message & Template Variables
**Scenario**: Configurable system prompt with dynamic template variables.
```java
import dev.langchain4j.service.AiServices;
import dev.langchain4j.service.SystemMessage;
import dev.langchain4j.service.UserMessage;
import dev.langchain4j.service.V;
import dev.langchain4j.model.openai.OpenAiChatModel;
interface TemplatedAssistant {
@SystemMessage("You are a {{role}} expert. Be concise and professional.")
String chat(@V("role") String role, String userMessage);
@SystemMessage("You are a helpful assistant. Translate to {{language}}")
@UserMessage("Translate this: {{text}}")
String translate(@V("text") String text, @V("language") String language);
}
public class TemplatedAssistantExample {
public static void main(String[] args) {
var chatModel = OpenAiChatModel.builder()
.apiKey(System.getenv("OPENAI_API_KEY"))
.modelName("gpt-4o-mini")
.temperature(0.3)
.build();
var assistant = AiServices.create(TemplatedAssistant.class, chatModel);
// Dynamic role
System.out.println(assistant.chat("Java", "Explain dependency injection"));
System.out.println(assistant.chat("DevOps", "Explain Docker containers"));
// Translation with template
System.out.println(assistant.translate("Hello, how are you?", "Spanish"));
System.out.println(assistant.translate("Good morning", "French"));
}
}
```
## 5. Structured Output Extraction
**Scenario**: Extract structured data (POJO, enum, list) from LLM responses.
```java
import dev.langchain4j.service.AiServices;
import dev.langchain4j.service.UserMessage;
import dev.langchain4j.model.output.structured.Description;
import dev.langchain4j.model.openai.OpenAiChatModel;
import java.util.List;
enum Sentiment {
POSITIVE, NEGATIVE, NEUTRAL
}
class ContactInfo {
@Description("Person's full name")
String fullName;
@Description("Email address")
String email;
@Description("Phone number with country code")
String phone;
}
interface DataExtractor {
@UserMessage("Analyze sentiment: {{text}}")
Sentiment extractSentiment(String text);
@UserMessage("Extract contact from: {{text}}")
ContactInfo extractContact(String text);
@UserMessage("List all technologies in: {{text}}")
List<String> extractTechnologies(String text);
@UserMessage("Count items in: {{text}}")
int countItems(String text);
}
public class StructuredOutputExample {
public static void main(String[] args) {
var chatModel = OpenAiChatModel.builder()
.apiKey(System.getenv("OPENAI_API_KEY"))
.modelName("gpt-4o-mini")
.responseFormat("json_object")
.build();
var extractor = AiServices.create(DataExtractor.class, chatModel);
// Enum extraction
Sentiment sentiment = extractor.extractSentiment("This product is amazing!");
System.out.println("Sentiment: " + sentiment); // POSITIVE
// POJO extraction
ContactInfo contact = extractor.extractContact(
"John Smith, john@example.com, +1-555-1234");
System.out.println("Name: " + contact.fullName);
System.out.println("Email: " + contact.email);
// List extraction
List<String> techs = extractor.extractTechnologies(
"We use Java, Spring Boot, PostgreSQL, and Docker");
System.out.println("Technologies: " + techs); // [Java, Spring Boot, PostgreSQL, Docker]
// Primitive type
int count = extractor.countItems("I have 3 apples, 5 oranges, and 2 bananas");
System.out.println("Total items: " + count); // 10
}
}
```
## 6. Tool Calling / Function Calling
**Scenario**: LLM calls Java methods to solve problems.
```java
import dev.langchain4j.agent.tool.Tool;
import dev.langchain4j.agent.tool.P;
import dev.langchain4j.service.AiServices;
import dev.langchain4j.memory.chat.MessageWindowChatMemory;
import dev.langchain4j.model.openai.OpenAiChatModel;
import java.time.LocalDate;
class Calculator {
@Tool("Add two numbers")
int add(@P("first number") int a, @P("second number") int b) {
return a + b;
}
@Tool("Multiply two numbers")
int multiply(@P("first") int a, @P("second") int b) {
return a * b;
}
}
class WeatherService {
@Tool("Get weather for a city")
String getWeather(@P("city name") String city) {
// Simulate API call
return "Weather in " + city + ": 22°C, Sunny";
}
}
class DateService {
@Tool("Get current date")
String getCurrentDate() {
return LocalDate.now().toString();
}
}
interface ToolUsingAssistant {
String chat(String userMessage);
}
public class ToolCallingExample {
public static void main(String[] args) {
var chatModel = OpenAiChatModel.builder()
.apiKey(System.getenv("OPENAI_API_KEY"))
.modelName("gpt-4o-mini")
.temperature(0.0)
.build();
var assistant = AiServices.builder(ToolUsingAssistant.class)
.chatModel(chatModel)
.chatMemory(MessageWindowChatMemory.withMaxMessages(10))
.tools(new Calculator(), new WeatherService(), new DateService())
.build();
// LLM calls tools automatically
System.out.println(assistant.chat("What is 25 + 37?"));
// Uses Calculator.add() → "25 + 37 equals 62"
System.out.println(assistant.chat("What's the weather in Paris?"));
// Uses WeatherService.getWeather() → "Weather in Paris: 22°C, Sunny"
System.out.println(assistant.chat("Calculate (5 + 3) * 4"));
// Uses add() and multiply() → "Result is 32"
System.out.println(assistant.chat("What's today's date?"));
// Uses getCurrentDate() → Shows current date
}
}
```
## 7. Streaming Responses
**Scenario**: Real-time token-by-token streaming for UI responsiveness.
```java
import dev.langchain4j.service.AiServices;
import dev.langchain4j.service.TokenStream;
import dev.langchain4j.model.openai.OpenAiStreamingChatModel;
interface StreamingAssistant {
TokenStream streamChat(String userMessage);
}
public class StreamingExample {
public static void main(String[] args) {
var streamingModel = OpenAiStreamingChatModel.builder()
.apiKey(System.getenv("OPENAI_API_KEY"))
.modelName("gpt-4o-mini")
.temperature(0.7)
.build();
var assistant = AiServices.builder(StreamingAssistant.class)
.streamingChatModel(streamingModel)
.build();
// Stream response token by token
assistant.streamChat("Tell me a short story about a robot")
.onNext(token -> System.out.print(token)) // Print each token
.onCompleteResponse(response -> {
System.out.println("\n--- Complete ---");
System.out.println("Tokens used: " + response.tokenUsage().totalTokenCount());
})
.onError(error -> System.err.println("Error: " + error.getMessage()))
.start();
// Wait for completion
try {
Thread.sleep(5000);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
}
}
```
## 8. System Persona with Context
**Scenario**: Different assistants with distinct personalities and knowledge domains.
```java
import dev.langchain4j.service.AiServices;
import dev.langchain4j.service.SystemMessage;
import dev.langchain4j.model.openai.OpenAiChatModel;
interface JavaExpert {
@SystemMessage("""
You are a Java expert with 15+ years experience.
Focus on best practices, performance, and clean code.
Provide code examples when relevant.
""")
String answer(String question);
}
interface SecurityExpert {
@SystemMessage("""
You are a cybersecurity expert specializing in application security.
Always consider OWASP principles and threat modeling.
Provide practical security recommendations.
""")
String answer(String question);
}
interface DevOpsExpert {
@SystemMessage("""
You are a DevOps engineer with expertise in cloud deployment,
CI/CD pipelines, containerization, and infrastructure as code.
""")
String answer(String question);
}
public class PersonaExample {
public static void main(String[] args) {
var chatModel = OpenAiChatModel.builder()
.apiKey(System.getenv("OPENAI_API_KEY"))
.modelName("gpt-4o-mini")
.temperature(0.5)
.build();
var javaExpert = AiServices.create(JavaExpert.class, chatModel);
var securityExpert = AiServices.create(SecurityExpert.class, chatModel);
var devopsExpert = AiServices.create(DevOpsExpert.class, chatModel);
var question = "How should I handle database connections?";
System.out.println("=== Java Expert ===");
System.out.println(javaExpert.answer(question));
System.out.println("\n=== Security Expert ===");
System.out.println(securityExpert.answer(question));
System.out.println("\n=== DevOps Expert ===");
System.out.println(devopsExpert.answer(question));
}
}
```
## 9. Error Handling & Tool Execution Errors
**Scenario**: Graceful handling of tool failures and LLM errors.
```java
import dev.langchain4j.agent.tool.Tool;
import dev.langchain4j.agent.tool.ToolExecutionRequest;
import dev.langchain4j.service.AiServices;
import dev.langchain4j.model.openai.OpenAiChatModel;
class DataAccessService {
@Tool("Query database for user")
String queryUser(String userId) {
// Simulate potential error
if (!userId.matches("\\d+")) {
throw new IllegalArgumentException("Invalid user ID format");
}
return "User " + userId + ": John Doe";
}
@Tool("Update user email")
String updateEmail(String userId, String email) {
if (!email.contains("@")) {
throw new IllegalArgumentException("Invalid email format");
}
return "Updated email for user " + userId;
}
}
interface ResilientAssistant {
String execute(String command);
}
public class ErrorHandlingExample {
public static void main(String[] args) {
var chatModel = OpenAiChatModel.builder()
.apiKey(System.getenv("OPENAI_API_KEY"))
.modelName("gpt-4o-mini")
.build();
var assistant = AiServices.builder(ResilientAssistant.class)
.chatModel(chatModel)
.tools(new DataAccessService())
.toolExecutionErrorHandler((request, exception) -> {
System.err.println("Tool error: " + exception.getMessage());
return "Error: " + exception.getMessage();
})
.build();
// Will handle tool errors gracefully
System.out.println(assistant.execute("Get details for user abc"));
System.out.println(assistant.execute("Update user 123 with invalid-email"));
}
}
```
## 10. RAG Integration with AI Services
**Scenario**: AI Service with content retrieval for knowledge-based Q&A.
```java
import dev.langchain4j.service.AiServices;
import dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever;
import dev.langchain4j.store.embedding.inmemory.InMemoryEmbeddingStore;
import dev.langchain4j.data.segment.TextSegment;
import dev.langchain4j.data.document.Document;
import dev.langchain4j.store.embedding.EmbeddingStoreIngestor;
import dev.langchain4j.model.openai.OpenAiEmbeddingModel;
import dev.langchain4j.model.openai.OpenAiChatModel;
interface KnowledgeBaseAssistant {
String askAbout(String question);
}
public class RAGIntegrationExample {
public static void main(String[] args) {
// Setup embedding store
var embeddingStore = new InMemoryEmbeddingStore<TextSegment>();
// Setup models
var embeddingModel = OpenAiEmbeddingModel.builder()
.apiKey(System.getenv("OPENAI_API_KEY"))
.modelName("text-embedding-3-small")
.build();
var chatModel = OpenAiChatModel.builder()
.apiKey(System.getenv("OPENAI_API_KEY"))
.modelName("gpt-4o-mini")
.build();
// Ingest documents
var ingestor = EmbeddingStoreIngestor.builder()
.embeddingModel(embeddingModel)
.embeddingStore(embeddingStore)
.build();
ingestor.ingest(Document.from("Spring Boot is a framework for building Java applications."));
ingestor.ingest(Document.from("Spring Data JPA simplifies database access."));
// Create retriever
var contentRetriever = EmbeddingStoreContentRetriever.builder()
.embeddingStore(embeddingStore)
.embeddingModel(embeddingModel)
.maxResults(3)
.minScore(0.7)
.build();
// Create AI Service with RAG
var assistant = AiServices.builder(KnowledgeBaseAssistant.class)
.chatModel(chatModel)
.contentRetriever(contentRetriever)
.build();
String answer = assistant.askAbout("What is Spring Boot?");
System.out.println(answer);
}
}
```
## Best Practices Summary
1. **Always use @SystemMessage** for consistent behavior across different messages
2. **Enable temperature=0** for deterministic tasks (extraction, calculations)
3. **Use MessageWindowChatMemory** for conversation history management
4. **Implement error handling** for tool failures
5. **Use structured output** when you need typed responses
6. **Stream long responses** for better UX
7. **Use @MemoryId** for multi-user scenarios
8. **Template variables** for dynamic system prompts
9. **Tool descriptions** should be clear and actionable
10. **Always validate** tool parameters before execution

View File

@@ -0,0 +1,433 @@
# LangChain4j AI Services - API References
Complete API reference for LangChain4j AI Services patterns.
## Core Interfaces and Classes
### AiServices Builder
**Purpose**: Creates implementations of custom Java interfaces backed by LLM capabilities.
```java
public class AiServices {
static <T> AiServicesBuilder<T> builder(Class<T> aiService)
// Create builder for an AI service interface
static <T> T create(Class<T> aiService, ChatModel chatModel)
// Quick creation with just chat model
static <T> T builder(Class<T> aiService)
.chatModel(ChatModel chatModel) // Required for sync
.streamingChatModel(StreamingChatModel) // Required for streaming
.chatMemory(ChatMemory) // Single shared memory
.chatMemoryProvider(ChatMemoryProvider) // Per-user memory
.tools(Object... tools) // Register tool objects
.toolProvider(ToolProvider) // Dynamic tool selection
.contentRetriever(ContentRetriever) // For RAG
.retrievalAugmentor(RetrievalAugmentor) // Advanced RAG
.moderationModel(ModerationModel) // Content moderation
.build() // Build the implementation
}
```
### Core Annotations
**@SystemMessage**: Define system prompt for the AI service.
```java
@SystemMessage("You are a helpful Java developer")
String chat(String userMessage);
// Template variables
@SystemMessage("You are a {{expertise}} expert")
String explain(@V("expertise") String domain, String question);
```
**@UserMessage**: Define user message template.
```java
@UserMessage("Translate to {{language}}: {{text}}")
String translate(@V("language") String lang, @V("text") String text);
// With method parameters matching template
@UserMessage("Summarize: {{it}}")
String summarize(String text); // {{it}} refers to parameter
```
**@MemoryId**: Create separate memory context per identifier.
```java
interface MultiUserChat {
String chat(@MemoryId String userId, String message);
String chat(@MemoryId int sessionId, String message);
}
```
**@V**: Map method parameter to template variable.
```java
@UserMessage("Write {{type}} code for {{language}}")
String writeCode(@V("type") String codeType, @V("language") String lang);
```
### ChatMemory Implementations
**MessageWindowChatMemory**: Keeps last N messages.
```java
ChatMemory memory = MessageWindowChatMemory.withMaxMessages(10);
// Or with explicit builder
ChatMemory memory = MessageWindowChatMemory.builder()
.maxMessages(10)
.build();
```
**ChatMemoryProvider**: Factory for creating per-user memory.
```java
ChatMemoryProvider provider = memoryId ->
MessageWindowChatMemory.withMaxMessages(20);
```
### Tool Integration
**@Tool**: Mark methods that LLM can call.
```java
@Tool("Calculate sum of two numbers")
int add(@P("first number") int a, @P("second number") int b) {
return a + b;
}
```
**@P**: Parameter description for LLM.
```java
@Tool("Search documents")
List<Document> search(
@P("search query") String query,
@P("max results") int limit
) { ... }
```
**ToolProvider**: Dynamic tool selection based on context.
```java
interface DynamicToolAssistant {
String execute(String command);
}
ToolProvider provider = context ->
context.contains("calculate") ? new Calculator() : new DataService();
```
### Structured Output
**@Description**: Annotate output fields for extraction.
```java
class Person {
@Description("Person's full name")
String name;
@Description("Age in years")
int age;
}
interface Extractor {
@UserMessage("Extract person from: {{it}}")
Person extract(String text);
}
```
### Error Handling
**ToolExecutionErrorHandler**: Handle tool execution failures.
```java
.toolExecutionErrorHandler((request, exception) -> {
logger.error("Tool failed: " + request.name(), exception);
return "Tool execution failed: " + exception.getMessage();
})
```
**ToolArgumentsErrorHandler**: Handle malformed tool arguments.
```java
.toolArgumentsErrorHandler((request, exception) -> {
logger.warn("Invalid arguments for " + request.name());
return "Please provide valid arguments";
})
```
## Streaming APIs
### TokenStream
**Purpose**: Handle streaming LLM responses token-by-token.
```java
interface StreamingAssistant {
TokenStream streamChat(String message);
}
TokenStream stream = assistant.streamChat("Tell me a story");
stream
.onNext(token -> {
// Process each token
System.out.print(token);
})
.onCompleteResponse(response -> {
// Full response available
System.out.println("\nTokens used: " + response.tokenUsage());
})
.onError(error -> {
System.err.println("Error: " + error);
})
.onToolExecuted(toolExecution -> {
System.out.println("Tool: " + toolExecution.request().name());
})
.onRetrieved(contents -> {
// RAG content retrieved
contents.forEach(c -> System.out.println(c.textSegment()));
})
.start();
```
### StreamingChatResponseHandler
**Purpose**: Callback-based streaming without TokenStream.
```java
streamingModel.chat(request, new StreamingChatResponseHandler() {
@Override
public void onPartialResponse(String partialResponse) {
System.out.print(partialResponse);
}
@Override
public void onCompleteResponse(ChatResponse response) {
System.out.println("\nComplete!");
}
@Override
public void onError(Throwable error) {
error.printStackTrace();
}
});
```
## Content Retrieval
### ContentRetriever Interface
**Purpose**: Fetch relevant content for RAG.
```java
interface ContentRetriever {
Content retrieve(Query query);
List<Content> retrieveAll(List<Query> queries);
}
```
### EmbeddingStoreContentRetriever
```java
ContentRetriever retriever = EmbeddingStoreContentRetriever.builder()
.embeddingStore(embeddingStore)
.embeddingModel(embeddingModel)
.maxResults(5) // Default max results
.minScore(0.7) // Similarity threshold
.dynamicMaxResults(query -> 10) // Query-dependent
.dynamicMinScore(query -> 0.8) // Query-dependent
.filter(new IsEqualTo("userId", "123")) // Metadata filter
.dynamicFilter(query -> {...}) // Dynamic filter
.build();
```
### RetrievalAugmentor
**Purpose**: Advanced RAG pipeline with query transformation and re-ranking.
```java
RetrievalAugmentor augmentor = DefaultRetrievalAugmentor.builder()
.queryTransformer(new CompressingQueryTransformer(chatModel))
.contentRetriever(contentRetriever)
.contentAggregator(ReRankingContentAggregator.builder()
.scoringModel(scoringModel)
.minScore(0.8)
.build())
.build();
// Use with AI Service
var assistant = AiServices.builder(Assistant.class)
.chatModel(chatModel)
.retrievalAugmentor(augmentor)
.build();
```
## Request/Response Models
### ChatRequest
**Purpose**: Build complex chat requests with multiple messages.
```java
ChatRequest request = ChatRequest.builder()
.messages(
SystemMessage.from("You are helpful"),
UserMessage.from("What is AI?"),
AiMessage.from("AI is...")
)
.temperature(0.7)
.maxTokens(500)
.topP(0.95)
.build();
ChatResponse response = chatModel.chat(request);
```
### ChatResponse
**Purpose**: Access chat model responses and metadata.
```java
String content = response.aiMessage().text();
TokenUsage usage = response.tokenUsage();
System.out.println("Tokens: " + usage.totalTokenCount());
System.out.println("Prompt tokens: " + usage.inputTokenCount());
System.out.println("Completion tokens: " + usage.outputTokenCount());
System.out.println("Finish reason: " + response.finishReason());
```
## Query and Content
### Query
**Purpose**: Represent a user query in retrieval context.
```java
// Query object contains:
String text // The query text
Metadata metadata() // Query metadata (e.g., userId)
Object metadata(String key) // Get metadata value
Object metadata(String key, Object defaultValue)
```
### Content
**Purpose**: Retrieved content with metadata.
```java
String textSegment() // Retrieved text
double score() // Relevance score
Metadata metadata() // Content metadata (e.g., source)
Map<String, Object> source() // Original source data
```
## Message Types
### SystemMessage
```java
SystemMessage message = SystemMessage.from("You are a code reviewer");
```
### UserMessage
```java
UserMessage message = UserMessage.from("Review this code");
// With images
UserMessage message = UserMessage.from(
TextContent.from("Analyze this"),
ImageContent.from("http://...", "image/png")
);
```
### AiMessage
```java
AiMessage message = AiMessage.from("Here's my analysis");
// With tool calls
AiMessage message = AiMessage.from(
"Let me calculate",
ToolExecutionResultMessage.from(toolName, result)
);
```
## Configuration Patterns
### Chat Model Configuration
```java
ChatModel model = OpenAiChatModel.builder()
.apiKey(System.getenv("OPENAI_API_KEY"))
.modelName("gpt-4o-mini") // Model selection
.temperature(0.7) // Creativity (0-2)
.topP(0.95) // Diversity (0-1)
.topK(40) // Top K tokens
.maxTokens(2000) // Max generation
.frequencyPenalty(0.0) // Reduce repetition
.presencePenalty(0.0) // Reduce topic switching
.seed(42) // Reproducibility
.logRequests(true) // Debug logging
.logResponses(true) // Debug logging
.build();
```
### Embedding Model Configuration
```java
EmbeddingModel embedder = OpenAiEmbeddingModel.builder()
.apiKey(System.getenv("OPENAI_API_KEY"))
.modelName("text-embedding-3-small")
.dimensions(512) // Custom dimensions
.build();
```
## Best Practices for API Usage
1. **Type Safety**: Always define typed interfaces for type safety at compile time
2. **Separation of Concerns**: Use different interfaces for different domains
3. **Error Handling**: Always implement error handlers for tools
4. **Memory Management**: Choose appropriate memory implementation for use case
5. **Token Optimization**: Use temperature=0 for deterministic tasks
6. **Testing**: Mock ChatModel for unit tests
7. **Logging**: Enable request/response logging in development
8. **Rate Limiting**: Implement backoff strategies for API calls
9. **Caching**: Cache responses for frequently asked questions
10. **Monitoring**: Track token usage for cost management
## Common Patterns
### Factory Pattern for Multiple Assistants
```java
public class AssistantFactory {
static JavaExpert createJavaExpert() {
return AiServices.create(JavaExpert.class, chatModel);
}
static PythonExpert createPythonExpert() {
return AiServices.create(PythonExpert.class, chatModel);
}
}
```
### Decorator Pattern for Enhanced Functionality
```java
public class LoggingAssistant implements Assistant {
private final Assistant delegate;
public String chat(String message) {
logger.info("User: " + message);
String response = delegate.chat(message);
logger.info("Assistant: " + response);
return response;
}
}
```
### Builder Pattern for Complex Configurations
```java
var assistant = AiServices.builder(ComplexAssistant.class)
.chatModel(getChatModel())
.chatMemory(getMemory())
.tools(getTool1(), getTool2())
.contentRetriever(getRetriever())
.build();
```
## Resources
- [LangChain4j Documentation](https://docs.langchain4j.dev)
- [OpenAI API Reference](https://platform.openai.com/docs)
- [LangChain4j GitHub](https://github.com/langchain4j/langchain4j)
- [LangChain4j Examples](https://github.com/langchain4j/langchain4j-examples)