Add fake llm provider
Change-Id: I7983796013f27e146506a42c8238b69a1838f1d9
diff --git a/server/llm/fake/factory.go b/server/llm/fake/factory.go
new file mode 100644
index 0000000..8031e62
--- /dev/null
+++ b/server/llm/fake/factory.go
@@ -0,0 +1,28 @@
+package fake
+
+import (
+ "github.com/iomodo/staff/llm"
+)
+
+// FakeFactory creates fake LLM providers for testing
+type FakeFactory struct{}
+
+// NewFakeFactory creates a new fake factory
+func NewFakeFactory() *FakeFactory {
+ return &FakeFactory{}
+}
+
+// CreateProvider creates a new fake provider
+func (f *FakeFactory) CreateProvider(config llm.Config) (llm.LLMProvider, error) {
+ return NewFakeProvider(), nil
+}
+
+// SupportsProvider returns true if this factory supports the given provider type
+func (f *FakeFactory) SupportsProvider(provider llm.Provider) bool {
+ return provider == llm.ProviderFake
+}
+
+// init registers the fake factory when the package is imported
+func init() {
+ llm.RegisterProvider(llm.ProviderFake, NewFakeFactory())
+}
\ No newline at end of file
diff --git a/server/llm/fake/fake.go b/server/llm/fake/fake.go
new file mode 100644
index 0000000..3739c27
--- /dev/null
+++ b/server/llm/fake/fake.go
@@ -0,0 +1,194 @@
+package fake
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/iomodo/staff/llm"
+)
+
+// FakeProvider implements a fake LLM provider for testing
+type FakeProvider struct {
+ responses []string
+ index int
+}
+
+// NewFakeProvider creates a new fake provider with predefined responses
+func NewFakeProvider() *FakeProvider {
+ responses := []string{
+ `## Task Solution
+
+I've analyzed the task requirements and here's my proposed solution:
+
+### Implementation Plan
+1. Create the necessary data structures
+2. Implement the core business logic
+3. Add proper error handling
+4. Write comprehensive tests
+5. Update documentation
+
+### Code Changes
+- Add new functions to handle the requirements
+- Update existing modules for compatibility
+- Implement proper validation
+- Add logging for debugging
+
+### Testing Strategy
+- Unit tests for all new functions
+- Integration tests for the workflow
+- Performance tests for scalability
+- Edge case validation
+
+### Files to Create/Modify
+- src/main.go - Core implementation
+- src/handlers.go - Request handlers
+- src/models.go - Data models
+- tests/ - Test files
+- docs/ - Documentation updates
+
+### Dependencies
+No new external dependencies required.
+
+### Deployment Notes
+- Backward compatible changes
+- No database migrations needed
+- Can be deployed incrementally
+
+This solution addresses all the requirements while maintaining code quality and system stability.`,
+
+ `## Comprehensive Task Analysis
+
+After careful consideration, I recommend the following approach:
+
+### Technical Architecture
+- **Backend**: Implement using existing Go patterns
+- **Database**: Utilize current PostgreSQL setup
+- **API**: RESTful endpoints with proper versioning
+- **Security**: OAuth2 authentication with JWT tokens
+
+### Development Steps
+1. **Phase 1**: Core functionality implementation
+2. **Phase 2**: User interface development
+3. **Phase 3**: Testing and optimization
+4. **Phase 4**: Documentation and deployment
+
+### Risk Assessment
+- **Low Risk**: Well-defined requirements
+- **Medium Risk**: Timeline constraints
+- **Mitigation**: Incremental development approach
+
+### Resource Requirements
+- Development time: 2-3 weeks
+- Testing phase: 1 week
+- Documentation: 2-3 days
+
+### Success Metrics
+- Performance benchmarks met
+- All test cases passing
+- User acceptance criteria satisfied
+- Code coverage > 90%
+
+This solution provides a robust foundation for future enhancements while meeting immediate business needs.`,
+
+ `## Strategic Implementation Proposal
+
+### Executive Summary
+This task requires a comprehensive solution that balances technical excellence with business objectives.
+
+### Solution Overview
+- **Approach**: Agile development methodology
+- **Technology Stack**: Current tech stack enhancement
+- **Timeline**: 3-4 week delivery cycle
+- **Team**: Cross-functional collaboration
+
+### Technical Specifications
+- Clean architecture principles
+- Microservices design patterns
+- Event-driven communication
+- Comprehensive monitoring and logging
+
+### Implementation Details
+1. **Requirements Analysis**: Complete stakeholder alignment
+2. **System Design**: Scalable and maintainable architecture
+3. **Development**: Test-driven development approach
+4. **Quality Assurance**: Automated testing pipeline
+5. **Deployment**: Blue-green deployment strategy
+
+### Business Impact
+- Improved user experience
+- Enhanced system reliability
+- Reduced operational overhead
+- Increased development velocity
+
+### Next Steps
+1. Stakeholder review and approval
+2. Resource allocation confirmation
+3. Development sprint planning
+4. Implementation kickoff
+
+This solution ensures long-term success while delivering immediate value to the organization.`,
+ }
+
+ return &FakeProvider{
+ responses: responses,
+ index: 0,
+ }
+}
+
+// ChatCompletion implements the LLM interface
+func (f *FakeProvider) ChatCompletion(ctx context.Context, req llm.ChatCompletionRequest) (*llm.ChatCompletionResponse, error) {
+ // Simulate API delay
+ time.Sleep(500 * time.Millisecond)
+
+ // Get the next response (cycle through responses)
+ response := f.responses[f.index%len(f.responses)]
+ f.index++
+
+ return &llm.ChatCompletionResponse{
+ ID: fmt.Sprintf("fake-response-%d", f.index),
+ Object: "chat.completion",
+ Created: time.Now().Unix(),
+ Model: req.Model,
+ Choices: []llm.ChatCompletionChoice{
+ {
+ Index: 0,
+ Message: llm.Message{
+ Role: llm.RoleAssistant,
+ Content: response,
+ },
+ FinishReason: "stop",
+ },
+ },
+ Usage: llm.Usage{
+ PromptTokens: 100,
+ CompletionTokens: 300,
+ TotalTokens: 400,
+ },
+ }, nil
+}
+
+// CreateEmbeddings implements the LLM interface (not used in current implementation)
+func (f *FakeProvider) CreateEmbeddings(ctx context.Context, req llm.EmbeddingRequest) (*llm.EmbeddingResponse, error) {
+ return &llm.EmbeddingResponse{
+ Object: "list",
+ Data: []llm.Embedding{
+ {
+ Object: "embedding",
+ Index: 0,
+ Embedding: make([]float64, 1536), // OpenAI embedding size
+ },
+ },
+ Model: req.Model,
+ Usage: llm.Usage{
+ PromptTokens: 50,
+ TotalTokens: 50,
+ },
+ }, nil
+}
+
+// Close implements the LLM interface
+func (f *FakeProvider) Close() error {
+ // Nothing to close for fake provider
+ return nil
+}
\ No newline at end of file
diff --git a/server/llm/llm.go b/server/llm/llm.go
index 2fd3bf3..aa63c3e 100644
--- a/server/llm/llm.go
+++ b/server/llm/llm.go
@@ -35,6 +35,7 @@
ProviderClaude Provider = "claude"
ProviderGemini Provider = "gemini"
ProviderLocal Provider = "local"
+ ProviderFake Provider = "fake"
)
// Role represents the role of a message participant
@@ -270,4 +271,10 @@
Timeout: 60 * time.Second,
MaxRetries: 1,
},
+ ProviderFake: {
+ Provider: ProviderFake,
+ BaseURL: "fake://test",
+ Timeout: 1 * time.Second,
+ MaxRetries: 0,
+ },
}
diff --git a/server/llm/providers/providers.go b/server/llm/providers/providers.go
index 87015aa..98d01a8 100644
--- a/server/llm/providers/providers.go
+++ b/server/llm/providers/providers.go
@@ -1,6 +1,7 @@
package providers
import (
+ _ "github.com/iomodo/staff/llm/fake" // Register Fake provider for testing
_ "github.com/iomodo/staff/llm/openai" // Register OpenAI provider
)
diff --git a/server/llm/utils.go b/server/llm/utils.go
index 59064b3..7381364 100644
--- a/server/llm/utils.go
+++ b/server/llm/utils.go
@@ -19,7 +19,8 @@
// ValidateConfig validates a configuration for an LLM provider
func ValidateConfig(config Config) error {
- if config.APIKey == "" {
+ // Fake provider doesn't need API key
+ if config.Provider != ProviderFake && config.APIKey == "" {
return ErrAPIKeyRequired
}
@@ -41,7 +42,7 @@
// IsValidProvider checks if a provider is supported
func IsValidProvider(provider Provider) bool {
switch provider {
- case ProviderOpenAI, ProviderXAI, ProviderClaude, ProviderGemini, ProviderLocal:
+ case ProviderOpenAI, ProviderXAI, ProviderClaude, ProviderGemini, ProviderLocal, ProviderFake:
return true
default:
return false
@@ -197,6 +198,8 @@
return "Gemini (Google)"
case ProviderLocal:
return "Local"
+ case ProviderFake:
+ return "Fake (Testing)"
default:
return string(provider)
}