Add fake llm provider

Change-Id: I7983796013f27e146506a42c8238b69a1838f1d9
diff --git a/server/agent/manager.go b/server/agent/manager.go
index 2d31b87..74e6771 100644
--- a/server/agent/manager.go
+++ b/server/agent/manager.go
@@ -87,7 +87,7 @@
 
 	// Create LLM provider
 	llmConfig := llm.Config{
-		Provider: llm.ProviderOpenAI,
+		Provider: llm.ProviderFake, // Use fake provider for testing
 		APIKey:   m.config.OpenAI.APIKey,
 		BaseURL:  m.config.OpenAI.BaseURL,
 		Timeout:  m.config.OpenAI.Timeout,
diff --git a/server/config-fake.yaml b/server/config-fake.yaml
new file mode 100644
index 0000000..387ba96
--- /dev/null
+++ b/server/config-fake.yaml
@@ -0,0 +1,47 @@
+# Staff MVP Configuration with Fake LLM for Testing
+# This config uses fake LLM responses for testing without API keys
+
+openai:
+  api_key: "fake-key"  # Not used by fake provider
+  base_url: "fake://test"
+  timeout: "5s"
+
+github:
+  token: "fake-github-token"  # Replace with real token for actual GitHub operations
+  owner: "shota"
+  repo: "staff"
+
+git:
+  branch_prefix: "task/"
+  commit_message_template: "Task {task_id}: {task_title} by {agent_name}"
+  pr_template: |
+    ## Task: {task_title}
+    
+    **Priority:** {priority}  
+    **Task ID:** {task_id}  
+    **Agent:** {agent_name}
+    
+    ### Description
+    {task_description}
+    
+    ### Solution
+    {solution}
+    
+    ### Files Changed
+    {files_changed}
+    
+    ---
+    *This PR was automatically generated by Staff AI Agent System (Testing Mode)*
+
+# Fake LLM configuration for testing
+agents:
+  - name: "ceo"
+    role: "CEO"
+    model: "fake-gpt-4"
+    temperature: 0.3
+    max_tokens: 4000
+    system_prompt_file: "/Users/shota/github/staff/operations/agents/ceo/system.md"
+
+tasks:
+  storage_path: "/Users/shota/github/staff/operations/tasks/"
+  completed_path: "/Users/shota/github/staff/operations/completed/"
\ No newline at end of file
diff --git a/server/llm/fake/factory.go b/server/llm/fake/factory.go
new file mode 100644
index 0000000..8031e62
--- /dev/null
+++ b/server/llm/fake/factory.go
@@ -0,0 +1,28 @@
+package fake
+
+import (
+	"github.com/iomodo/staff/llm"
+)
+
+// FakeFactory creates fake LLM providers for testing
+type FakeFactory struct{}
+
+// NewFakeFactory creates a new fake factory
+func NewFakeFactory() *FakeFactory {
+	return &FakeFactory{}
+}
+
+// CreateProvider creates a new fake provider
+func (f *FakeFactory) CreateProvider(config llm.Config) (llm.LLMProvider, error) {
+	return NewFakeProvider(), nil
+}
+
+// SupportsProvider returns true if this factory supports the given provider type
+func (f *FakeFactory) SupportsProvider(provider llm.Provider) bool {
+	return provider == llm.ProviderFake
+}
+
+// init registers the fake factory when the package is imported
+func init() {
+	llm.RegisterProvider(llm.ProviderFake, NewFakeFactory())
+}
\ No newline at end of file
diff --git a/server/llm/fake/fake.go b/server/llm/fake/fake.go
new file mode 100644
index 0000000..3739c27
--- /dev/null
+++ b/server/llm/fake/fake.go
@@ -0,0 +1,194 @@
+package fake
+
+import (
+	"context"
+	"fmt"
+	"time"
+
+	"github.com/iomodo/staff/llm"
+)
+
+// FakeProvider implements a fake LLM provider for testing
+type FakeProvider struct {
+	responses []string
+	index     int
+}
+
+// NewFakeProvider creates a new fake provider with predefined responses
+func NewFakeProvider() *FakeProvider {
+	responses := []string{
+		`## Task Solution
+
+I've analyzed the task requirements and here's my proposed solution:
+
+### Implementation Plan
+1. Create the necessary data structures
+2. Implement the core business logic
+3. Add proper error handling
+4. Write comprehensive tests
+5. Update documentation
+
+### Code Changes
+- Add new functions to handle the requirements
+- Update existing modules for compatibility
+- Implement proper validation
+- Add logging for debugging
+
+### Testing Strategy
+- Unit tests for all new functions
+- Integration tests for the workflow
+- Performance tests for scalability
+- Edge case validation
+
+### Files to Create/Modify
+- src/main.go - Core implementation
+- src/handlers.go - Request handlers
+- src/models.go - Data models
+- tests/ - Test files
+- docs/ - Documentation updates
+
+### Dependencies
+No new external dependencies required.
+
+### Deployment Notes
+- Backward compatible changes
+- No database migrations needed
+- Can be deployed incrementally
+
+This solution addresses all the requirements while maintaining code quality and system stability.`,
+
+		`## Comprehensive Task Analysis
+
+After careful consideration, I recommend the following approach:
+
+### Technical Architecture
+- **Backend**: Implement using existing Go patterns
+- **Database**: Utilize current PostgreSQL setup
+- **API**: RESTful endpoints with proper versioning
+- **Security**: OAuth2 authentication with JWT tokens
+
+### Development Steps
+1. **Phase 1**: Core functionality implementation
+2. **Phase 2**: User interface development
+3. **Phase 3**: Testing and optimization
+4. **Phase 4**: Documentation and deployment
+
+### Risk Assessment
+- **Low Risk**: Well-defined requirements
+- **Medium Risk**: Timeline constraints
+- **Mitigation**: Incremental development approach
+
+### Resource Requirements
+- Development time: 2-3 weeks
+- Testing phase: 1 week
+- Documentation: 2-3 days
+
+### Success Metrics
+- Performance benchmarks met
+- All test cases passing
+- User acceptance criteria satisfied
+- Code coverage > 90%
+
+This solution provides a robust foundation for future enhancements while meeting immediate business needs.`,
+
+		`## Strategic Implementation Proposal
+
+### Executive Summary
+This task requires a comprehensive solution that balances technical excellence with business objectives.
+
+### Solution Overview
+- **Approach**: Agile development methodology
+- **Technology Stack**: Current tech stack enhancement
+- **Timeline**: 3-4 week delivery cycle
+- **Team**: Cross-functional collaboration
+
+### Technical Specifications
+- Clean architecture principles
+- Microservices design patterns
+- Event-driven communication
+- Comprehensive monitoring and logging
+
+### Implementation Details
+1. **Requirements Analysis**: Complete stakeholder alignment
+2. **System Design**: Scalable and maintainable architecture
+3. **Development**: Test-driven development approach
+4. **Quality Assurance**: Automated testing pipeline
+5. **Deployment**: Blue-green deployment strategy
+
+### Business Impact
+- Improved user experience
+- Enhanced system reliability
+- Reduced operational overhead
+- Increased development velocity
+
+### Next Steps
+1. Stakeholder review and approval
+2. Resource allocation confirmation
+3. Development sprint planning
+4. Implementation kickoff
+
+This solution ensures long-term success while delivering immediate value to the organization.`,
+	}
+
+	return &FakeProvider{
+		responses: responses,
+		index:     0,
+	}
+}
+
+// ChatCompletion implements the LLM interface
+func (f *FakeProvider) ChatCompletion(ctx context.Context, req llm.ChatCompletionRequest) (*llm.ChatCompletionResponse, error) {
+	// Simulate API delay
+	time.Sleep(500 * time.Millisecond)
+
+	// Get the next response (cycle through responses)
+	response := f.responses[f.index%len(f.responses)]
+	f.index++
+
+	return &llm.ChatCompletionResponse{
+		ID:      fmt.Sprintf("fake-response-%d", f.index),
+		Object:  "chat.completion",
+		Created: time.Now().Unix(),
+		Model:   req.Model,
+		Choices: []llm.ChatCompletionChoice{
+			{
+				Index: 0,
+				Message: llm.Message{
+					Role:    llm.RoleAssistant,
+					Content: response,
+				},
+				FinishReason: "stop",
+			},
+		},
+		Usage: llm.Usage{
+			PromptTokens:     100,
+			CompletionTokens: 300,
+			TotalTokens:      400,
+		},
+	}, nil
+}
+
+// CreateEmbeddings implements the LLM interface (not used in current implementation)
+func (f *FakeProvider) CreateEmbeddings(ctx context.Context, req llm.EmbeddingRequest) (*llm.EmbeddingResponse, error) {
+	return &llm.EmbeddingResponse{
+		Object: "list",
+		Data: []llm.Embedding{
+			{
+				Object:    "embedding",
+				Index:     0,
+				Embedding: make([]float64, 1536), // OpenAI embedding size
+			},
+		},
+		Model: req.Model,
+		Usage: llm.Usage{
+			PromptTokens: 50,
+			TotalTokens:  50,
+		},
+	}, nil
+}
+
+// Close implements the LLM interface
+func (f *FakeProvider) Close() error {
+	// Nothing to close for fake provider
+	return nil
+}
\ No newline at end of file
diff --git a/server/llm/llm.go b/server/llm/llm.go
index 2fd3bf3..aa63c3e 100644
--- a/server/llm/llm.go
+++ b/server/llm/llm.go
@@ -35,6 +35,7 @@
 	ProviderClaude Provider = "claude"
 	ProviderGemini Provider = "gemini"
 	ProviderLocal  Provider = "local"
+	ProviderFake   Provider = "fake"
 )
 
 // Role represents the role of a message participant
@@ -270,4 +271,10 @@
 		Timeout:    60 * time.Second,
 		MaxRetries: 1,
 	},
+	ProviderFake: {
+		Provider:   ProviderFake,
+		BaseURL:    "fake://test",
+		Timeout:    1 * time.Second,
+		MaxRetries: 0,
+	},
 }
diff --git a/server/llm/providers/providers.go b/server/llm/providers/providers.go
index 87015aa..98d01a8 100644
--- a/server/llm/providers/providers.go
+++ b/server/llm/providers/providers.go
@@ -1,6 +1,7 @@
 package providers
 
 import (
+	_ "github.com/iomodo/staff/llm/fake"   // Register Fake provider for testing
 	_ "github.com/iomodo/staff/llm/openai" // Register OpenAI provider
 )
 
diff --git a/server/llm/utils.go b/server/llm/utils.go
index 59064b3..7381364 100644
--- a/server/llm/utils.go
+++ b/server/llm/utils.go
@@ -19,7 +19,8 @@
 
 // ValidateConfig validates a configuration for an LLM provider
 func ValidateConfig(config Config) error {
-	if config.APIKey == "" {
+	// Fake provider doesn't need API key
+	if config.Provider != ProviderFake && config.APIKey == "" {
 		return ErrAPIKeyRequired
 	}
 
@@ -41,7 +42,7 @@
 // IsValidProvider checks if a provider is supported
 func IsValidProvider(provider Provider) bool {
 	switch provider {
-	case ProviderOpenAI, ProviderXAI, ProviderClaude, ProviderGemini, ProviderLocal:
+	case ProviderOpenAI, ProviderXAI, ProviderClaude, ProviderGemini, ProviderLocal, ProviderFake:
 		return true
 	default:
 		return false
@@ -197,6 +198,8 @@
 		return "Gemini (Google)"
 	case ProviderLocal:
 		return "Local"
+	case ProviderFake:
+		return "Fake (Testing)"
 	default:
 		return string(provider)
 	}
diff --git a/server/test_fake_llm.go b/server/test_fake_llm.go
new file mode 100644
index 0000000..a322599
--- /dev/null
+++ b/server/test_fake_llm.go
@@ -0,0 +1,60 @@
+package main
+
+import (
+	"context"
+	"fmt"
+	"log"
+
+	"github.com/iomodo/staff/llm"
+	_ "github.com/iomodo/staff/llm/providers" // Auto-register providers
+)
+
+func main() {
+	// Create fake LLM config
+	config := llm.Config{
+		Provider: llm.ProviderFake,
+		APIKey:   "fake-key",
+		BaseURL:  "fake://test",
+	}
+
+	// Create provider
+	provider, err := llm.CreateProvider(config)
+	if err != nil {
+		log.Fatalf("Failed to create provider: %v", err)
+	}
+	defer provider.Close()
+
+	// Test chat completion
+	req := llm.ChatCompletionRequest{
+		Model: "fake-gpt-4",
+		Messages: []llm.Message{
+			{
+				Role:    llm.RoleSystem,
+				Content: "You are a helpful AI assistant.",
+			},
+			{
+				Role:    llm.RoleUser,
+				Content: "Create a solution for implementing user authentication",
+			},
+		},
+		MaxTokens:   &[]int{4000}[0],
+		Temperature: &[]float64{0.3}[0],
+	}
+
+	fmt.Println("Testing Fake LLM Provider...")
+	fmt.Println("==========================")
+
+	resp, err := provider.ChatCompletion(context.Background(), req)
+	if err != nil {
+		log.Fatalf("Chat completion failed: %v", err)
+	}
+
+	fmt.Printf("Response ID: %s\n", resp.ID)
+	fmt.Printf("Model: %s\n", resp.Model)
+	fmt.Printf("Provider: %s\n", resp.Provider)
+	fmt.Printf("Usage: %+v\n", resp.Usage)
+	fmt.Println("\nGenerated Solution:")
+	fmt.Println("===================")
+	fmt.Println(resp.Choices[0].Message.Content)
+	fmt.Println("\n✅ Fake LLM Provider is working correctly!")
+}
\ No newline at end of file