AI SDK
Builders
Master text, streaming, and structured generation with type-safe builders
Builders
The SDK provides three main builder patterns for AI generation, each optimized for specific use cases.
GenerateBuilder
Simple text generation with full parameter control.
Basic Usage
result, err := sdk.NewGenerateBuilder(ctx, llmManager, logger, metrics).
WithPrompt("Explain quantum computing in simple terms").
WithModel("gpt-4").
WithTemperature(0.7).
WithMaxTokens(500).
Execute()
if err != nil {
return err
}
fmt.Println(result.Content)
fmt.Printf("Tokens used: %d\n", result.Usage.TotalTokens)All Options
result, err := sdk.NewGenerateBuilder(ctx, llmManager, logger, metrics).
// Core parameters
WithPrompt("Your prompt here").
WithSystemMessage("You are a helpful assistant").
WithModel("gpt-4").
// Generation parameters
WithTemperature(0.7). // Randomness (0-2)
WithMaxTokens(1000). // Max output length
WithTopP(0.9). // Nucleus sampling
WithTopK(50). // Top-K sampling
WithStop([]string{"\n\n"}). // Stop sequences
// Advanced features
WithTools(tools). // Enable function calling
WithGuardrails(guard). // Safety checks
WithCache(cache). // Response caching
WithCost(costTracker). // Cost tracking
// Callbacks
OnStart(func() {
fmt.Println("Generation started")
}).
OnComplete(func(result *sdk.Result) {
fmt.Printf("Generated %d tokens\n", result.Usage.TotalTokens)
}).
OnError(func(err error) {
fmt.Printf("Error: %v\n", err)
}).
Execute()Conversation History
messages := []sdk.AgentMessage{
{Role: "user", Content: "What is Go?"},
{Role: "assistant", Content: "Go is a programming language..."},
{Role: "user", Content: "Show me an example"},
}
result, err := sdk.NewGenerateBuilder(ctx, llmManager, logger, metrics).
WithMessages(messages).
WithModel("gpt-4").
Execute()Tool/Function Calling
tools := []llm.ToolDefinition{
{
Type: "function",
Function: &llm.FunctionDefinition{
Name: "get_weather",
Description: "Get weather for a location",
Parameters: map[string]interface{}{
"type": "object",
"properties": map[string]interface{}{
"location": map[string]string{
"type": "string",
"description": "City name",
},
},
"required": []string{"location"},
},
},
},
}
result, err := sdk.NewGenerateBuilder(ctx, llmManager, logger, metrics).
WithPrompt("What's the weather in San Francisco?").
WithTools(tools).
Execute()
// Handle tool calls
for _, tc := range result.ToolCalls {
fmt.Printf("Tool: %s, Args: %s\n", tc.Name, tc.Arguments)
// Execute the tool and feed result back
}GenerateObjectBuilder
Extract structured data with automatic JSON schema generation and validation.
Basic Extraction
type Person struct {
Name string `json:"name" jsonschema:"required"`
Age int `json:"age" jsonschema:"minimum=0"`
Email string `json:"email" jsonschema:"format=email"`
Hobbies []string `json:"hobbies"`
}
person, err := sdk.NewGenerateObjectBuilder[Person](
ctx, llmManager, logger, metrics,
).
WithPrompt("Extract: John Doe, 30, john@example.com, loves hiking and photography").
WithModel("gpt-4").
Execute()
if err != nil {
return err
}
fmt.Printf("%+v\n", person)
// Output: &{Name:John Doe Age:30 Email:john@example.com Hobbies:[hiking photography]}Complex Structures
type Product struct {
Name string `json:"name" jsonschema:"required"`
Price float64 `json:"price" jsonschema:"minimum=0"`
Currency string `json:"currency" jsonschema:"enum=USD,EUR,GBP"`
Category string `json:"category"`
Tags []string `json:"tags"`
InStock bool `json:"in_stock"`
Variants []Variant `json:"variants"`
ReleaseDate string `json:"release_date" jsonschema:"format=date"`
}
type Variant struct {
SKU string `json:"sku"`
Color string `json:"color"`
Size string `json:"size"`
Price float64 `json:"price"`
}
product, err := sdk.NewGenerateObjectBuilder[Product](
ctx, llmManager, logger, metrics,
).
WithPrompt(`
Product: iPhone 15 Pro
Price: $999 USD
Category: Smartphone
Tags: 5G, A17 chip, Titanium
In Stock: Yes
Variants:
- SKU: IP15P-BK-256, Color: Black, Size: 256GB, Price: $999
- SKU: IP15P-WH-512, Color: White, Size: 512GB, Price: $1199
Release Date: 2023-09-22
`).
Execute()With Validation
type validator struct{}
func (v *validator) Validate(obj interface{}) []string {
person := obj.(*Person)
var violations []string
if person.Age < 0 || person.Age > 150 {
violations = append(violations, "age must be between 0 and 150")
}
if !strings.Contains(person.Email, "@") {
violations = append(violations, "email must be valid")
}
return violations
}
person, err := sdk.NewGenerateObjectBuilder[Person](
ctx, llmManager, logger, metrics,
).
WithPrompt("Extract: Invalid Person, -5, notanemail").
WithValidator(&validator{}).
WithMaxRetries(3). // Retry on validation failure
Execute()
if err != nil {
fmt.Println("Validation failed after retries:", err)
}Automatic Schema Generation
The builder automatically generates JSON schemas from your structs:
type Config struct {
Host string `json:"host" jsonschema:"required,format=hostname"`
Port int `json:"port" jsonschema:"minimum=1,maximum=65535"`
UseSSL bool `json:"use_ssl"`
Timeout int `json:"timeout" jsonschema:"minimum=0"`
MaxRetry int `json:"max_retry" jsonschema:"minimum=0,maximum=10"`
}
// Schema is automatically generated and sent to LLM:
// {
// "type": "object",
// "properties": {
// "host": {"type": "string", "format": "hostname"},
// "port": {"type": "integer", "minimum": 1, "maximum": 65535},
// "use_ssl": {"type": "boolean"},
// "timeout": {"type": "integer", "minimum": 0},
// "max_retry": {"type": "integer", "minimum": 0, "maximum": 10}
// },
// "required": ["host"]
// }
config, _ := sdk.NewGenerateObjectBuilder[Config](
ctx, llmManager, logger, metrics,
).WithPrompt("Extract config from: api.example.com:8080 with SSL, 30s timeout, 3 retries").Execute()StreamBuilder
Real-time token-by-token streaming with reasoning and tool call tracking.
Basic Streaming
result, err := sdk.NewStreamBuilder(ctx, llmManager, logger, metrics).
WithPrompt("Write a haiku about Go programming").
OnToken(func(token string) {
fmt.Print(token) // Print as tokens arrive
}).
Stream()
if err != nil {
return err
}
fmt.Printf("\n\nTotal tokens: %d\n", result.Usage.TotalTokens)With Reasoning Steps
result, err := sdk.NewStreamBuilder(ctx, llmManager, logger, metrics).
WithPrompt("Solve: What is 15% of 80?").
WithReasoning(true).
OnReasoning(func(step string) {
fmt.Printf("[Thinking: %s]\n", step)
}).
OnToken(func(token string) {
fmt.Print(token)
}).
Stream()
// Output:
// [Thinking: First, I need to convert 15% to decimal]
// [Thinking: 15% = 0.15]
// [Thinking: Multiply 0.15 by 80]
// The answer is 12.Complete Lifecycle
var buffer strings.Builder
result, err := sdk.NewStreamBuilder(ctx, llmManager, logger, metrics).
WithPrompt("Write a short story").
OnStart(func() {
fmt.Println("🚀 Starting generation...")
}).
OnToken(func(token string) {
buffer.WriteString(token)
fmt.Print(token)
}).
OnReasoning(func(step string) {
fmt.Printf("\n💭 [%s]\n", step)
}).
OnToolCall(func(toolCall *sdk.ToolCallResult) {
fmt.Printf("\n🔧 Tool: %s(%s)\n", toolCall.Name, toolCall.Arguments)
}).
OnComplete(func(result *sdk.Result) {
fmt.Printf("\n\n✅ Completed! Tokens: %d\n", result.Usage.TotalTokens)
}).
OnError(func(err error) {
fmt.Printf("\n❌ Error: %v\n", err)
}).
Stream()Streaming with Tools
tools := []llm.ToolDefinition{
{
Type: "function",
Function: &llm.FunctionDefinition{
Name: "search_docs",
Description: "Search documentation",
Parameters: map[string]interface{}{
"type": "object",
"properties": map[string]interface{}{
"query": map[string]string{"type": "string"},
},
},
},
},
}
result, err := sdk.NewStreamBuilder(ctx, llmManager, logger, metrics).
WithPrompt("How do I use channels in Go?").
WithTools(tools).
OnToken(func(token string) {
fmt.Print(token)
}).
OnToolCall(func(tc *sdk.ToolCallResult) {
fmt.Printf("\n[Tool Call: %s]\n", tc.Name)
// Execute tool and continue streaming
}).
Stream()Performance Tips
Reuse Builders
// Create once, use many times
builder := sdk.NewGenerateBuilder(ctx, llmManager, logger, metrics).
WithModel("gpt-4").
WithTemperature(0.7)
// Use with different prompts
result1, _ := builder.WithPrompt("First question").Execute()
result2, _ := builder.WithPrompt("Second question").Execute()Enable Caching
cache := sdk.NewSemanticCache(vectorStore, cacheStore, logger, metrics,
sdk.SemanticCacheConfig{
SimilarityThreshold: 0.95,
TTL: 1 * time.Hour,
},
)
result, _ := sdk.NewGenerateBuilder(ctx, llmManager, logger, metrics).
WithPrompt("What is Go?").
WithCache(cache). // Automatically caches responses
Execute()Batch Requests
processor := sdk.NewBatchProcessor(llmManager, logger, metrics,
sdk.BatchConfig{
MaxBatchSize: 10,
MaxWaitTime: 100 * time.Millisecond,
WorkerCount: 5,
},
)
// Submit multiple requests - automatically batched
for _, prompt := range prompts {
processor.Submit(ctx, sdk.BatchRequest{
Prompt: prompt,
Model: "gpt-4",
})
}Error Handling
result, err := sdk.NewGenerateBuilder(ctx, llmManager, logger, metrics).
WithPrompt("Hello").
Execute()
if err != nil {
switch {
case errors.Is(err, sdk.ErrRateLimited):
// Handle rate limit
time.Sleep(time.Second)
// Retry
case errors.Is(err, sdk.ErrInvalidRequest):
// Fix request parameters
case errors.Is(err, sdk.ErrProviderError):
// Try different provider
default:
return err
}
}Next Steps
- Multi-Modal - Process images, audio, video
- Streaming - Advanced streaming patterns
- Examples - View builder examples
How is this guide?
Last updated on