tree: 9a0176633d7bd7a5f432bcae8b91f961ec306be5 [path history] [tgz]
  1. factory.go
  2. llm.go
  3. README.md
  4. utils.go
server/llm/README.md

LLM Interface Package

This package provides a generic interface for different Large Language Model (LLM) providers, with OpenAI's API structure as the primary reference. It supports multiple providers including OpenAI, xAI, Claude, Gemini, and local models.

Features

  • Unified Interface: Single interface for all LLM providers
  • Multiple Providers: Support for OpenAI, xAI, Claude, Gemini, and local models
  • Tool/Function Calling: Support for function calling and tool usage
  • Embeddings: Generate embeddings for text
  • Configurable: Flexible configuration options for each provider
  • Thread-Safe: Thread-safe factory and registry implementations

Quick Start

package main

import (
    "context"
    "fmt"
    "log"
    
    "your-project/server/llm"
)

func main() {
    // Create a configuration for OpenAI
    config := llm.Config{
        Provider: llm.ProviderOpenAI,
        APIKey:   "your-openai-api-key",
        BaseURL:  "https://api.openai.com/v1",
        Timeout:  30 * time.Second,
    }
    
    // Create a provider (you'll need to register the implementation first)
    provider, err := llm.CreateProvider(config)
    if err != nil {
        log.Fatal(err)
    }
    defer provider.Close()
    
    // Create a chat completion request
    req := llm.ChatCompletionRequest{
        Model: "gpt-3.5-turbo",
        Messages: []llm.Message{
            {Role: llm.RoleUser, Content: "Hello, how are you?"},
        },
        MaxTokens:   &[]int{100}[0],
        Temperature: &[]float64{0.7}[0],
    }
    
    // Get the response
    resp, err := provider.ChatCompletion(context.Background(), req)
    if err != nil {
        log.Fatal(err)
    }
    
    fmt.Println("Response:", resp.Choices[0].Message.Content)
}

Core Types

Provider

Represents different LLM service providers:

const (
    ProviderOpenAI Provider = "openai"
    ProviderXAI    Provider = "xai"
    ProviderClaude Provider = "claude"
    ProviderGemini Provider = "gemini"
    ProviderLocal  Provider = "local"
)

Message

Represents a single message in a conversation:

type Message struct {
    Role      Role       `json:"role"`
    Content   string     `json:"content"`
    ToolCalls []ToolCall `json:"tool_calls,omitempty"`
    ToolCallID string    `json:"tool_call_id,omitempty"`
    Name      string     `json:"name,omitempty"`
}

ChatCompletionRequest

Represents a request to complete a chat conversation:

type ChatCompletionRequest struct {
    Model            string                 `json:"model"`
    Messages         []Message              `json:"messages"`
    MaxTokens        *int                   `json:"max_tokens,omitempty"`
    Temperature      *float64               `json:"temperature,omitempty"`
    TopP             *float64               `json:"top_p,omitempty"`
    Stream           *bool                  `json:"stream,omitempty"`
    Tools            []Tool                 `json:"tools,omitempty"`
    ToolChoice       interface{}            `json:"tool_choice,omitempty"`
    ResponseFormat   *ResponseFormat        `json:"response_format,omitempty"`
    ExtraParams      map[string]interface{} `json:"-"` // Provider-specific parameters
}

Main Interface

LLMProvider

The main interface that all LLM providers must implement:

type LLMProvider interface {
    // ChatCompletion creates a chat completion
    ChatCompletion(ctx context.Context, req ChatCompletionRequest) (*ChatCompletionResponse, error)
    
    // CreateEmbeddings generates embeddings for the given input
    CreateEmbeddings(ctx context.Context, req EmbeddingRequest) (*EmbeddingResponse, error)
    
    // Close performs any necessary cleanup
    Close() error
}

Provider Factory

The package includes a factory system for creating and managing LLM providers:

// Register a provider factory
err := llm.RegisterProvider(llm.ProviderOpenAI, openaiFactory)

// Create a provider
provider, err := llm.CreateProvider(config)

// Check if a provider is supported
if llm.SupportsProvider(llm.ProviderOpenAI) {
    // Provider is available
}

// List all registered providers
providers := llm.ListProviders()

Configuration

Each provider can be configured with specific settings:

config := llm.Config{
    Provider:    llm.ProviderOpenAI,
    APIKey:      "your-api-key",
    BaseURL:     "https://api.openai.com/v1",
    Timeout:     30 * time.Second,
    MaxRetries:  3,
    ExtraConfig: map[string]interface{}{
        "organization": "your-org-id",
    },
}

Tool/Function Calling

Support for function calling and tool usage:

tools := []llm.Tool{
    {
        Type: "function",
        Function: llm.Function{
            Name:        "get_weather",
            Description: "Get the current weather for a location",
            Parameters: map[string]interface{}{
                "type": "object",
                "properties": map[string]interface{}{
                    "location": map[string]interface{}{
                        "type":        "string",
                        "description": "The city and state, e.g. San Francisco, CA",
                    },
                },
                "required": []string{"location"},
            },
        },
    },
}

req := llm.ChatCompletionRequest{
    Model:    "gpt-3.5-turbo",
    Messages: messages,
    Tools:    tools,
}

Embeddings

Generate embeddings for text:

req := llm.EmbeddingRequest{
    Input: "Hello, world!",
    Model: "text-embedding-ada-002",
}

resp, err := provider.CreateEmbeddings(context.Background(), req)
if err != nil {
    log.Fatal(err)
}

fmt.Printf("Embedding dimensions: %d\n", len(resp.Data[0].Embedding))

Implementing a New Provider

To implement a new LLM provider:

  1. Implement the LLMProvider interface:
type MyProvider struct {
    config llm.Config
    client *http.Client
}

func (p *MyProvider) ChatCompletion(ctx context.Context, req llm.ChatCompletionRequest) (*llm.ChatCompletionResponse, error) {
    // Implementation here
}

func (p *MyProvider) CreateEmbeddings(ctx context.Context, req llm.EmbeddingRequest) (*llm.EmbeddingResponse, error) {
    // Implementation here
}

func (p *MyProvider) Close() error {
    // Cleanup implementation
    return nil
}
  1. Create a factory:
type MyProviderFactory struct{}

func (f *MyProviderFactory) CreateProvider(config llm.Config) (llm.LLMProvider, error) {
    return &MyProvider{config: config}, nil
}

func (f *MyProviderFactory) SupportsProvider(provider llm.Provider) bool {
    return provider == llm.ProviderMyProvider
}
  1. Register the provider:
func init() {
    llm.RegisterProvider(llm.ProviderMyProvider, &MyProviderFactory{})
}

Error Handling

The package defines common error types:

var (
    ErrInvalidConfig     = fmt.Errorf("invalid configuration")
    ErrUnsupportedProvider = fmt.Errorf("unsupported provider")
    ErrAPIKeyRequired    = fmt.Errorf("API key is required")
    ErrModelNotFound     = fmt.Errorf("model not found")
    ErrRateLimitExceeded = fmt.Errorf("rate limit exceeded")
    ErrContextCancelled  = fmt.Errorf("context cancelled")
    ErrTimeout           = fmt.Errorf("request timeout")
)

Utilities

The package includes utility functions:

// Validate configuration
err := llm.ValidateConfig(config)

// Check if provider is valid
if llm.IsValidProvider(llm.ProviderOpenAI) {
    // Provider is valid
}

// Get default configuration
config, err := llm.GetDefaultConfig(llm.ProviderOpenAI)

// Merge custom config with defaults
config = llm.MergeConfig(customConfig)

// Validate requests
err := llm.ValidateChatCompletionRequest(req)
err := llm.ValidateEmbeddingRequest(req)

// Estimate tokens
tokens := llm.EstimateTokens(req)

Thread Safety

The factory and registry implementations are thread-safe and can be used concurrently from multiple goroutines.

Default Configurations

The package provides default configurations for each provider:

var DefaultConfigs = map[llm.Provider]llm.Config{
    llm.ProviderOpenAI: {
        Provider:   llm.ProviderOpenAI,
        BaseURL:    "https://api.openai.com/v1",
        Timeout:    30 * time.Second,
        MaxRetries: 3,
    },
    // ... other providers
}

Next Steps

  1. Implement the actual provider implementations (OpenAI, xAI, Claude, etc.)
  2. Add tests for the interface and implementations
  3. Add more utility functions as needed
  4. Consider adding caching and retry mechanisms
  5. Add support for more provider-specific features