blob: 3739c27cec070d4c4660b44239c715584399c9c7 [file] [log] [blame]
package fake
import (
"context"
"fmt"
"time"
"github.com/iomodo/staff/llm"
)
// FakeProvider implements a fake LLM provider for testing
type FakeProvider struct {
responses []string
index int
}
// NewFakeProvider creates a new fake provider with predefined responses
func NewFakeProvider() *FakeProvider {
responses := []string{
`## Task Solution
I've analyzed the task requirements and here's my proposed solution:
### Implementation Plan
1. Create the necessary data structures
2. Implement the core business logic
3. Add proper error handling
4. Write comprehensive tests
5. Update documentation
### Code Changes
- Add new functions to handle the requirements
- Update existing modules for compatibility
- Implement proper validation
- Add logging for debugging
### Testing Strategy
- Unit tests for all new functions
- Integration tests for the workflow
- Performance tests for scalability
- Edge case validation
### Files to Create/Modify
- src/main.go - Core implementation
- src/handlers.go - Request handlers
- src/models.go - Data models
- tests/ - Test files
- docs/ - Documentation updates
### Dependencies
No new external dependencies required.
### Deployment Notes
- Backward compatible changes
- No database migrations needed
- Can be deployed incrementally
This solution addresses all the requirements while maintaining code quality and system stability.`,
`## Comprehensive Task Analysis
After careful consideration, I recommend the following approach:
### Technical Architecture
- **Backend**: Implement using existing Go patterns
- **Database**: Utilize current PostgreSQL setup
- **API**: RESTful endpoints with proper versioning
- **Security**: OAuth2 authentication with JWT tokens
### Development Steps
1. **Phase 1**: Core functionality implementation
2. **Phase 2**: User interface development
3. **Phase 3**: Testing and optimization
4. **Phase 4**: Documentation and deployment
### Risk Assessment
- **Low Risk**: Well-defined requirements
- **Medium Risk**: Timeline constraints
- **Mitigation**: Incremental development approach
### Resource Requirements
- Development time: 2-3 weeks
- Testing phase: 1 week
- Documentation: 2-3 days
### Success Metrics
- Performance benchmarks met
- All test cases passing
- User acceptance criteria satisfied
- Code coverage > 90%
This solution provides a robust foundation for future enhancements while meeting immediate business needs.`,
`## Strategic Implementation Proposal
### Executive Summary
This task requires a comprehensive solution that balances technical excellence with business objectives.
### Solution Overview
- **Approach**: Agile development methodology
- **Technology Stack**: Current tech stack enhancement
- **Timeline**: 3-4 week delivery cycle
- **Team**: Cross-functional collaboration
### Technical Specifications
- Clean architecture principles
- Microservices design patterns
- Event-driven communication
- Comprehensive monitoring and logging
### Implementation Details
1. **Requirements Analysis**: Complete stakeholder alignment
2. **System Design**: Scalable and maintainable architecture
3. **Development**: Test-driven development approach
4. **Quality Assurance**: Automated testing pipeline
5. **Deployment**: Blue-green deployment strategy
### Business Impact
- Improved user experience
- Enhanced system reliability
- Reduced operational overhead
- Increased development velocity
### Next Steps
1. Stakeholder review and approval
2. Resource allocation confirmation
3. Development sprint planning
4. Implementation kickoff
This solution ensures long-term success while delivering immediate value to the organization.`,
}
return &FakeProvider{
responses: responses,
index: 0,
}
}
// ChatCompletion implements the LLM interface
func (f *FakeProvider) ChatCompletion(ctx context.Context, req llm.ChatCompletionRequest) (*llm.ChatCompletionResponse, error) {
// Simulate API delay
time.Sleep(500 * time.Millisecond)
// Get the next response (cycle through responses)
response := f.responses[f.index%len(f.responses)]
f.index++
return &llm.ChatCompletionResponse{
ID: fmt.Sprintf("fake-response-%d", f.index),
Object: "chat.completion",
Created: time.Now().Unix(),
Model: req.Model,
Choices: []llm.ChatCompletionChoice{
{
Index: 0,
Message: llm.Message{
Role: llm.RoleAssistant,
Content: response,
},
FinishReason: "stop",
},
},
Usage: llm.Usage{
PromptTokens: 100,
CompletionTokens: 300,
TotalTokens: 400,
},
}, nil
}
// CreateEmbeddings implements the LLM interface (not used in current implementation)
func (f *FakeProvider) CreateEmbeddings(ctx context.Context, req llm.EmbeddingRequest) (*llm.EmbeddingResponse, error) {
return &llm.EmbeddingResponse{
Object: "list",
Data: []llm.Embedding{
{
Object: "embedding",
Index: 0,
Embedding: make([]float64, 1536), // OpenAI embedding size
},
},
Model: req.Model,
Usage: llm.Usage{
PromptTokens: 50,
TotalTokens: 50,
},
}, nil
}
// Close implements the LLM interface
func (f *FakeProvider) Close() error {
// Nothing to close for fake provider
return nil
}