blob: 92560cf48de7ebf3572bcd574afadbadf2173d17 [file] [log] [blame]
package fake
import (
"context"
"fmt"
"strings"
"time"
"github.com/iomodo/staff/llm"
)
// FakeProvider implements a fake LLM provider for testing
type FakeProvider struct {
responses []string
index int
}
// NewFakeProvider creates a new fake provider with predefined responses
func New() *FakeProvider {
responses := []string{
`## Task Solution
I've analyzed the task requirements and here's my proposed solution:
### Implementation Plan
1. Create the necessary data structures
2. Implement the core business logic
3. Add proper error handling
4. Write comprehensive tests
5. Update documentation
### Code Changes
- Add new functions to handle the requirements
- Update existing modules for compatibility
- Implement proper validation
- Add logging for debugging
### Testing Strategy
- Unit tests for all new functions
- Integration tests for the workflow
- Performance tests for scalability
- Edge case validation
### Files to Create/Modify
- src/main.go - Core implementation
- src/handlers.go - Request handlers
- src/models.go - Data models
- tests/ - Test files
- docs/ - Documentation updates
### Dependencies
No new external dependencies required.
### Deployment Notes
- Backward compatible changes
- No database migrations needed
- Can be deployed incrementally
This solution addresses all the requirements while maintaining code quality and system stability.`,
`## Comprehensive Task Analysis
After careful consideration, I recommend the following approach:
### Technical Architecture
- **Backend**: Implement using existing Go patterns
- **Database**: Utilize current PostgreSQL setup
- **API**: RESTful endpoints with proper versioning
- **Security**: OAuth2 authentication with JWT tokens
### Development Steps
1. **Phase 1**: Core functionality implementation
2. **Phase 2**: User interface development
3. **Phase 3**: Testing and optimization
4. **Phase 4**: Documentation and deployment
### Risk Assessment
- **Low Risk**: Well-defined requirements
- **Medium Risk**: Timeline constraints
- **Mitigation**: Incremental development approach
### Resource Requirements
- Development time: 2-3 weeks
- Testing phase: 1 week
- Documentation: 2-3 days
### Success Metrics
- Performance benchmarks met
- All test cases passing
- User acceptance criteria satisfied
- Code coverage > 90%
This solution provides a robust foundation for future enhancements while meeting immediate business needs.`,
`## Strategic Implementation Proposal
### Executive Summary
This task requires a comprehensive solution that balances technical excellence with business objectives.
### Solution Overview
- **Approach**: Agile development methodology
- **Technology Stack**: Current tech stack enhancement
- **Timeline**: 3-4 week delivery cycle
- **Team**: Cross-functional collaboration
### Technical Specifications
- Clean architecture principles
- Microservices design patterns
- Event-driven communication
- Comprehensive monitoring and logging
### Implementation Details
1. **Requirements Analysis**: Complete stakeholder alignment
2. **System Design**: Scalable and maintainable architecture
3. **Development**: Test-driven development approach
4. **Quality Assurance**: Automated testing pipeline
5. **Deployment**: Blue-green deployment strategy
### Business Impact
- Improved user experience
- Enhanced system reliability
- Reduced operational overhead
- Increased development velocity
### Next Steps
1. Stakeholder review and approval
2. Resource allocation confirmation
3. Development sprint planning
4. Implementation kickoff
This solution ensures long-term success while delivering immediate value to the organization.`,
`{
"analysis_summary": "This task requires a comprehensive multi-phase approach involving frontend, backend, and infrastructure components. The complexity suggests breaking it into specialized subtasks for different team members.",
"subtasks": [
{
"title": "Design System Architecture",
"description": "Create detailed technical architecture diagrams, define API contracts, and establish database schema design",
"priority": "high",
"assigned_to": "ceo",
"estimated_hours": 12,
"dependencies": []
},
{
"title": "Backend API Development",
"description": "Implement core backend services, API endpoints, authentication middleware, and data validation layers",
"priority": "high",
"assigned_to": "ceo",
"estimated_hours": 24,
"dependencies": ["0"]
},
{
"title": "Database Setup and Migration",
"description": "Set up database infrastructure, create migration scripts, establish indexes, and implement backup procedures",
"priority": "medium",
"assigned_to": "ceo",
"estimated_hours": 8,
"dependencies": ["0"]
},
{
"title": "Frontend Interface Implementation",
"description": "Build user interface components, implement state management, integrate with backend APIs, and ensure responsive design",
"priority": "high",
"assigned_to": "ceo",
"estimated_hours": 32,
"dependencies": ["1"]
},
{
"title": "Testing and Quality Assurance",
"description": "Develop comprehensive test suites including unit tests, integration tests, and end-to-end testing scenarios",
"priority": "medium",
"assigned_to": "ceo",
"estimated_hours": 16,
"dependencies": ["1", "3"]
},
{
"title": "Deployment and Documentation",
"description": "Set up CI/CD pipeline, deploy to staging environment, create user documentation, and prepare production deployment",
"priority": "low",
"assigned_to": "ceo",
"estimated_hours": 10,
"dependencies": ["4"]
}
],
"recommended_approach": "Start with architecture design to establish clear foundations, then proceed with parallel backend and database development. Once backend APIs are stable, begin frontend implementation while maintaining continuous testing throughout the process.",
"estimated_total_hours": 102,
"risk_assessment": "Main risks include scope creep, API integration complexity, and potential database performance issues. Mitigation strategies include regular stakeholder reviews, comprehensive API documentation, and early performance testing."
}`,
`{
"needs_subtasks": true,
"reasoning": "This task appears to be complex and multi-faceted, requiring different specialized skills including backend development, frontend work, database management, and testing. Breaking it down into subtasks would allow for better parallel execution and specialized agent assignment.",
"complexity_score": 8,
"required_skills": ["backend_development", "frontend_development", "database_design", "api_development", "testing", "deployment"]
}`,
`{
"needs_subtasks": false,
"reasoning": "This task is straightforward and can be completed by a single agent with existing capabilities. The scope is well-defined and doesn't require multiple specialized skills or extensive coordination.",
"complexity_score": 3,
"required_skills": ["basic_development"]
}`,
}
return &FakeProvider{
responses: responses,
index: 0,
}
}
// ChatCompletion implements the LLM interface
func (f *FakeProvider) ChatCompletion(ctx context.Context, req llm.ChatCompletionRequest) (*llm.ChatCompletionResponse, error) {
// Simulate API delay
time.Sleep(500 * time.Millisecond)
// Check the type of request to provide appropriate response
isSubtaskAnalysisRequest := false
isSubtaskDecisionRequest := false
for _, msg := range req.Messages {
if strings.Contains(msg.Content, "break it down into subtasks") || strings.Contains(msg.Content, "subtask analysis") {
isSubtaskAnalysisRequest = true
break
} else if strings.Contains(msg.Content, "needs to be broken down") || strings.Contains(msg.Content, "evaluate whether") {
isSubtaskDecisionRequest = true
break
}
}
var response string
if isSubtaskAnalysisRequest && len(f.responses) > 3 {
// Use the detailed subtask analysis JSON response
response = f.responses[3] // The detailed JSON response is at index 3
} else if isSubtaskDecisionRequest && len(f.responses) > 4 {
// Use alternating decision responses
if f.index%2 == 0 {
response = f.responses[4] // "needs_subtasks": true
} else {
response = f.responses[5] // "needs_subtasks": false
}
f.index++
} else {
// Get the next regular response (cycle through first 3)
response = f.responses[f.index%3] // Only cycle through first 3 responses
f.index++
}
return &llm.ChatCompletionResponse{
ID: fmt.Sprintf("fake-response-%d", f.index),
Object: "chat.completion",
Created: time.Now().Unix(),
Model: req.Model,
Choices: []llm.ChatCompletionChoice{
{
Index: 0,
Message: llm.Message{
Role: llm.RoleAssistant,
Content: response,
},
FinishReason: "stop",
},
},
Usage: llm.Usage{
PromptTokens: 100,
CompletionTokens: 300,
TotalTokens: 400,
},
}, nil
}
// CreateEmbeddings implements the LLM interface (not used in current implementation)
func (f *FakeProvider) CreateEmbeddings(ctx context.Context, req llm.EmbeddingRequest) (*llm.EmbeddingResponse, error) {
return &llm.EmbeddingResponse{
Object: "list",
Data: []llm.Embedding{
{
Object: "embedding",
Index: 0,
Embedding: make([]float64, 1536), // OpenAI embedding size
},
},
Model: req.Model,
Usage: llm.Usage{
PromptTokens: 50,
TotalTokens: 50,
},
}, nil
}
// Close implements the LLM interface
func (f *FakeProvider) Close() error {
// Nothing to close for fake provider
return nil
}