blob: 28fbb04f7e3ef82f9f6422e414415be0349f55f3 [file] [log] [blame]
Josh Bleecher Snyder4f84ab72025-04-22 16:40:54 -07001package oai
2
3import (
4 "cmp"
5 "context"
6 "encoding/json"
7 "errors"
8 "fmt"
9 "log/slog"
10 "math/rand/v2"
11 "net/http"
Philip Zeyliger72252cb2025-05-10 17:00:08 -070012 "strings"
Josh Bleecher Snyder4f84ab72025-04-22 16:40:54 -070013 "time"
14
15 "github.com/sashabaranov/go-openai"
16 "sketch.dev/llm"
17)
18
19const (
20 DefaultMaxTokens = 8192
21
22 OpenAIURL = "https://api.openai.com/v1"
23 FireworksURL = "https://api.fireworks.ai/inference/v1"
24 LlamaCPPURL = "http://localhost:8080/v1"
25 TogetherURL = "https://api.together.xyz/v1"
26 GeminiURL = "https://generativelanguage.googleapis.com/v1beta/openai/"
Josh Bleecher Snyderfa667032025-05-07 14:13:27 -070027 MistralURL = "https://api.mistral.ai/v1"
Josh Bleecher Snyder2edd62e2025-07-14 12:44:51 -070028 MoonshotURL = "https://api.moonshot.ai/v1"
Josh Bleecher Snyder4f84ab72025-04-22 16:40:54 -070029
30 // Environment variable names for API keys
31 OpenAIAPIKeyEnv = "OPENAI_API_KEY"
32 FireworksAPIKeyEnv = "FIREWORKS_API_KEY"
33 TogetherAPIKeyEnv = "TOGETHER_API_KEY"
34 GeminiAPIKeyEnv = "GEMINI_API_KEY"
Josh Bleecher Snyderfa667032025-05-07 14:13:27 -070035 MistralAPIKeyEnv = "MISTRAL_API_KEY"
Josh Bleecher Snyder2edd62e2025-07-14 12:44:51 -070036 MoonshotAPIKeyEnv = "MOONSHOT_API_KEY"
Josh Bleecher Snyder4f84ab72025-04-22 16:40:54 -070037)
38
39type Model struct {
Josh Bleecher Snyder8236cbc2025-05-09 09:57:57 -070040 UserName string // provided by the user to identify this model (e.g. "gpt4.1")
41 ModelName string // provided to the service provide to specify which model to use (e.g. "gpt-4.1-2025-04-14")
42 URL string
Josh Bleecher Snyder8236cbc2025-05-09 09:57:57 -070043 APIKeyEnv string // environment variable name for the API key
44 IsReasoningModel bool // whether this model is a reasoning model (e.g. O3, O4-mini)
Josh Bleecher Snyder4f84ab72025-04-22 16:40:54 -070045}
46
Josh Bleecher Snyder4f84ab72025-04-22 16:40:54 -070047var (
48 DefaultModel = GPT41
49
50 GPT41 = Model{
51 UserName: "gpt4.1",
52 ModelName: "gpt-4.1-2025-04-14",
53 URL: OpenAIURL,
Josh Bleecher Snyder4f84ab72025-04-22 16:40:54 -070054 APIKeyEnv: OpenAIAPIKeyEnv,
55 }
56
Josh Bleecher Snyder8236cbc2025-05-09 09:57:57 -070057 GPT4o = Model{
58 UserName: "gpt4o",
59 ModelName: "gpt-4o-2024-08-06",
60 URL: OpenAIURL,
Josh Bleecher Snyder8236cbc2025-05-09 09:57:57 -070061 APIKeyEnv: OpenAIAPIKeyEnv,
62 }
63
64 GPT4oMini = Model{
65 UserName: "gpt4o-mini",
66 ModelName: "gpt-4o-mini-2024-07-18",
67 URL: OpenAIURL,
Josh Bleecher Snyder8236cbc2025-05-09 09:57:57 -070068 APIKeyEnv: OpenAIAPIKeyEnv,
69 }
70
71 GPT41Mini = Model{
72 UserName: "gpt4.1-mini",
73 ModelName: "gpt-4.1-mini-2025-04-14",
74 URL: OpenAIURL,
Josh Bleecher Snyder8236cbc2025-05-09 09:57:57 -070075 APIKeyEnv: OpenAIAPIKeyEnv,
76 }
77
78 GPT41Nano = Model{
79 UserName: "gpt4.1-nano",
80 ModelName: "gpt-4.1-nano-2025-04-14",
81 URL: OpenAIURL,
Josh Bleecher Snyder8236cbc2025-05-09 09:57:57 -070082 APIKeyEnv: OpenAIAPIKeyEnv,
83 }
84
85 O3 = Model{
86 UserName: "o3",
87 ModelName: "o3-2025-04-16",
88 URL: OpenAIURL,
Josh Bleecher Snyder8236cbc2025-05-09 09:57:57 -070089 APIKeyEnv: OpenAIAPIKeyEnv,
90 IsReasoningModel: true,
91 }
92
93 O4Mini = Model{
94 UserName: "o4-mini",
95 ModelName: "o4-mini-2025-04-16",
96 URL: OpenAIURL,
Josh Bleecher Snyder8236cbc2025-05-09 09:57:57 -070097 APIKeyEnv: OpenAIAPIKeyEnv,
98 IsReasoningModel: true,
99 }
100
Josh Bleecher Snyder4f84ab72025-04-22 16:40:54 -0700101 Gemini25Flash = Model{
102 UserName: "gemini-flash-2.5",
103 ModelName: "gemini-2.5-flash-preview-04-17",
104 URL: GeminiURL,
Josh Bleecher Snyder4f84ab72025-04-22 16:40:54 -0700105 APIKeyEnv: GeminiAPIKeyEnv,
106 }
107
108 Gemini25Pro = Model{
109 UserName: "gemini-pro-2.5",
110 ModelName: "gemini-2.5-pro-preview-03-25",
111 URL: GeminiURL,
112 // GRRRR. Really??
113 // Input is: $1.25, prompts <= 200k tokens, $2.50, prompts > 200k tokens
114 // Output is: $10.00, prompts <= 200k tokens, $15.00, prompts > 200k
115 // Caching is: $0.31, prompts <= 200k tokens, $0.625, prompts > 200k, $4.50 / 1,000,000 tokens per hour
116 // Whatever that means. Are we caching? I have no idea.
117 // How do you always manage to be the annoying one, Google?
118 // I'm not complicating things just for you.
Josh Bleecher Snyder4f84ab72025-04-22 16:40:54 -0700119 APIKeyEnv: GeminiAPIKeyEnv,
120 }
121
122 TogetherDeepseekV3 = Model{
123 UserName: "together-deepseek-v3",
124 ModelName: "deepseek-ai/DeepSeek-V3",
125 URL: TogetherURL,
Josh Bleecher Snyder4f84ab72025-04-22 16:40:54 -0700126 APIKeyEnv: TogetherAPIKeyEnv,
127 }
128
Josh Bleecher Snyderd1bd5192025-06-02 14:10:52 -0700129 TogetherDeepseekR1 = Model{
130 UserName: "together-deepseek-r1",
131 ModelName: "deepseek-ai/DeepSeek-R1",
132 URL: TogetherURL,
Josh Bleecher Snyderd1bd5192025-06-02 14:10:52 -0700133 APIKeyEnv: TogetherAPIKeyEnv,
134 }
135
Josh Bleecher Snyder4f84ab72025-04-22 16:40:54 -0700136 TogetherLlama4Maverick = Model{
137 UserName: "together-llama4-maverick",
138 ModelName: "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
139 URL: TogetherURL,
Josh Bleecher Snyder4f84ab72025-04-22 16:40:54 -0700140 APIKeyEnv: TogetherAPIKeyEnv,
141 }
142
Josh Bleecher Snyder8236cbc2025-05-09 09:57:57 -0700143 FireworksLlama4Maverick = Model{
144 UserName: "fireworks-llama4-maverick",
145 ModelName: "accounts/fireworks/models/llama4-maverick-instruct-basic",
146 URL: FireworksURL,
Josh Bleecher Snyder8236cbc2025-05-09 09:57:57 -0700147 APIKeyEnv: FireworksAPIKeyEnv,
148 }
149
Josh Bleecher Snyder4f84ab72025-04-22 16:40:54 -0700150 TogetherLlama3_3_70B = Model{
151 UserName: "together-llama3-70b",
152 ModelName: "meta-llama/Llama-3.3-70B-Instruct-Turbo",
153 URL: TogetherURL,
Josh Bleecher Snyder4f84ab72025-04-22 16:40:54 -0700154 APIKeyEnv: TogetherAPIKeyEnv,
155 }
156
157 TogetherMistralSmall = Model{
158 UserName: "together-mistral-small",
159 ModelName: "mistralai/Mistral-Small-24B-Instruct-2501",
160 URL: TogetherURL,
Josh Bleecher Snyder4f84ab72025-04-22 16:40:54 -0700161 APIKeyEnv: TogetherAPIKeyEnv,
162 }
163
Josh Bleecher Snyder3e213082025-05-02 13:22:02 -0700164 TogetherQwen3 = Model{
165 UserName: "together-qwen3",
166 ModelName: "Qwen/Qwen3-235B-A22B-fp8-tput",
167 URL: TogetherURL,
Josh Bleecher Snyder3e213082025-05-02 13:22:02 -0700168 APIKeyEnv: TogetherAPIKeyEnv,
169 }
170
Josh Bleecher Snyder8236cbc2025-05-09 09:57:57 -0700171 TogetherGemma2 = Model{
172 UserName: "together-gemma2",
173 ModelName: "google/gemma-2-27b-it",
174 URL: TogetherURL,
Josh Bleecher Snyder8236cbc2025-05-09 09:57:57 -0700175 APIKeyEnv: TogetherAPIKeyEnv,
176 }
177
Josh Bleecher Snyder4f84ab72025-04-22 16:40:54 -0700178 LlamaCPP = Model{
179 UserName: "llama.cpp",
180 ModelName: "llama.cpp local model",
181 URL: LlamaCPPURL,
Josh Bleecher Snyder4f84ab72025-04-22 16:40:54 -0700182 }
183
184 FireworksDeepseekV3 = Model{
185 UserName: "fireworks-deepseek-v3",
186 ModelName: "accounts/fireworks/models/deepseek-v3-0324",
187 URL: FireworksURL,
Josh Bleecher Snyder4f84ab72025-04-22 16:40:54 -0700188 APIKeyEnv: FireworksAPIKeyEnv,
189 }
Josh Bleecher Snyderfa667032025-05-07 14:13:27 -0700190
Josh Bleecher Snyder2edd62e2025-07-14 12:44:51 -0700191 MoonshotKimiK2 = Model{
192 UserName: "moonshot-kimi-k2",
193 ModelName: "moonshot-v1-auto",
194 URL: MoonshotURL,
195 APIKeyEnv: MoonshotAPIKeyEnv,
196 }
197
Josh Bleecher Snyderfa667032025-05-07 14:13:27 -0700198 MistralMedium = Model{
199 UserName: "mistral-medium-3",
200 ModelName: "mistral-medium-latest",
201 URL: MistralURL,
Josh Bleecher Snyderfa667032025-05-07 14:13:27 -0700202 APIKeyEnv: MistralAPIKeyEnv,
203 }
Josh Bleecher Snyder1a648f32025-05-21 17:15:04 +0000204
205 DevstralSmall = Model{
206 UserName: "devstral-small",
207 ModelName: "devstral-small-latest",
208 URL: MistralURL,
Josh Bleecher Snyder1a648f32025-05-21 17:15:04 +0000209 APIKeyEnv: MistralAPIKeyEnv,
210 }
Josh Bleecher Snyder4f84ab72025-04-22 16:40:54 -0700211)
212
213// Service provides chat completions.
214// Fields should not be altered concurrently with calling any method on Service.
215type Service struct {
216 HTTPC *http.Client // defaults to http.DefaultClient if nil
217 APIKey string // optional, if not set will try to load from env var
218 Model Model // defaults to DefaultModel if zero value
219 MaxTokens int // defaults to DefaultMaxTokens if zero
220 Org string // optional - organization ID
221}
222
223var _ llm.Service = (*Service)(nil)
224
225// ModelsRegistry is a registry of all known models with their user-friendly names.
226var ModelsRegistry = []Model{
227 GPT41,
Josh Bleecher Snyder8236cbc2025-05-09 09:57:57 -0700228 GPT41Mini,
229 GPT41Nano,
230 GPT4o,
231 GPT4oMini,
232 O3,
233 O4Mini,
Josh Bleecher Snyder4f84ab72025-04-22 16:40:54 -0700234 Gemini25Flash,
235 Gemini25Pro,
236 TogetherDeepseekV3,
Josh Bleecher Snyderd1bd5192025-06-02 14:10:52 -0700237 TogetherDeepseekR1,
Josh Bleecher Snyder4f84ab72025-04-22 16:40:54 -0700238 TogetherLlama4Maverick,
239 TogetherLlama3_3_70B,
240 TogetherMistralSmall,
Josh Bleecher Snyder3e213082025-05-02 13:22:02 -0700241 TogetherQwen3,
Josh Bleecher Snyder8236cbc2025-05-09 09:57:57 -0700242 TogetherGemma2,
Josh Bleecher Snyder4f84ab72025-04-22 16:40:54 -0700243 LlamaCPP,
244 FireworksDeepseekV3,
Josh Bleecher Snyder2edd62e2025-07-14 12:44:51 -0700245 MoonshotKimiK2,
Josh Bleecher Snyder8236cbc2025-05-09 09:57:57 -0700246 FireworksLlama4Maverick,
247 MistralMedium,
Josh Bleecher Snyder1a648f32025-05-21 17:15:04 +0000248 DevstralSmall,
Josh Bleecher Snyder4f84ab72025-04-22 16:40:54 -0700249}
250
251// ListModels returns a list of all available models with their user-friendly names.
252func ListModels() []string {
253 var names []string
254 for _, model := range ModelsRegistry {
255 if model.UserName != "" {
256 names = append(names, model.UserName)
257 }
258 }
259 return names
260}
261
262// ModelByUserName returns a model by its user-friendly name.
263// Returns nil if no model with the given name is found.
264func ModelByUserName(name string) *Model {
265 for _, model := range ModelsRegistry {
266 if model.UserName == name {
267 return &model
268 }
269 }
270 return nil
271}
272
273var (
274 fromLLMRole = map[llm.MessageRole]string{
275 llm.MessageRoleAssistant: "assistant",
276 llm.MessageRoleUser: "user",
277 }
Josh Bleecher Snyder4f84ab72025-04-22 16:40:54 -0700278 fromLLMToolChoiceType = map[llm.ToolChoiceType]string{
279 llm.ToolChoiceTypeAuto: "auto",
280 llm.ToolChoiceTypeAny: "any",
281 llm.ToolChoiceTypeNone: "none",
282 llm.ToolChoiceTypeTool: "function", // OpenAI uses "function" instead of "tool"
283 }
284 toLLMRole = map[string]llm.MessageRole{
285 "assistant": llm.MessageRoleAssistant,
286 "user": llm.MessageRoleUser,
287 }
288 toLLMStopReason = map[string]llm.StopReason{
289 "stop": llm.StopReasonStopSequence,
290 "length": llm.StopReasonMaxTokens,
291 "tool_calls": llm.StopReasonToolUse,
292 "function_call": llm.StopReasonToolUse, // Map both to ToolUse
293 "content_filter": llm.StopReasonStopSequence, // No direct equivalent
294 }
295)
296
297// fromLLMContent converts llm.Content to the format expected by OpenAI.
298func fromLLMContent(c llm.Content) (string, []openai.ToolCall) {
299 switch c.Type {
300 case llm.ContentTypeText:
301 return c.Text, nil
302 case llm.ContentTypeToolUse:
303 // For OpenAI, tool use is sent as a null content with tool_calls in the message
304 return "", []openai.ToolCall{
305 {
306 Type: openai.ToolTypeFunction,
307 ID: c.ID, // Use the content ID if provided
308 Function: openai.FunctionCall{
309 Name: c.ToolName,
310 Arguments: string(c.ToolInput),
311 },
312 },
313 }
314 case llm.ContentTypeToolResult:
315 // Tool results in OpenAI are sent as a separate message with tool_call_id
Philip Zeyliger72252cb2025-05-10 17:00:08 -0700316 // OpenAI doesn't support multiple content items or images in tool results
317 // Combine all text content into a single string
318 var resultText string
319 if len(c.ToolResult) > 0 {
320 // Collect all text from content objects
321 texts := make([]string, 0, len(c.ToolResult))
322 for _, result := range c.ToolResult {
323 if result.Text != "" {
324 texts = append(texts, result.Text)
325 }
326 }
327 resultText = strings.Join(texts, "\n")
328 }
329 return resultText, nil
Josh Bleecher Snyder4f84ab72025-04-22 16:40:54 -0700330 default:
331 // For thinking or other types, convert to text
332 return c.Text, nil
333 }
334}
335
336// fromLLMMessage converts llm.Message to OpenAI ChatCompletionMessage format
337func fromLLMMessage(msg llm.Message) []openai.ChatCompletionMessage {
338 // For OpenAI, we need to handle tool results differently than regular messages
339 // Each tool result becomes its own message with role="tool"
340
341 var messages []openai.ChatCompletionMessage
342
343 // Check if this is a regular message or contains tool results
344 var regularContent []llm.Content
345 var toolResults []llm.Content
346
347 for _, c := range msg.Content {
348 if c.Type == llm.ContentTypeToolResult {
349 toolResults = append(toolResults, c)
350 } else {
351 regularContent = append(regularContent, c)
352 }
353 }
354
355 // Process tool results as separate messages, but first
356 for _, tr := range toolResults {
Philip Zeyliger72252cb2025-05-10 17:00:08 -0700357 // Convert toolresult array to a string for OpenAI
358 var toolResultContent string
359 if len(tr.ToolResult) > 0 {
360 // For now, just use the first text content in the array
361 toolResultContent = tr.ToolResult[0].Text
362 }
363
Josh Bleecher Snyder4f84ab72025-04-22 16:40:54 -0700364 m := openai.ChatCompletionMessage{
365 Role: "tool",
Philip Zeyliger72252cb2025-05-10 17:00:08 -0700366 Content: cmp.Or(toolResultContent, " "), // Use empty space if empty to avoid omitempty issues
Josh Bleecher Snyder4f84ab72025-04-22 16:40:54 -0700367 ToolCallID: tr.ToolUseID,
368 }
369 messages = append(messages, m)
370 }
371 // Process regular content second
372 if len(regularContent) > 0 {
373 m := openai.ChatCompletionMessage{
374 Role: fromLLMRole[msg.Role],
375 }
376
377 // For assistant messages that contain tool calls
378 var toolCalls []openai.ToolCall
379 var textContent string
380
381 for _, c := range regularContent {
382 content, tools := fromLLMContent(c)
383 if len(tools) > 0 {
384 toolCalls = append(toolCalls, tools...)
385 } else if content != "" {
386 if textContent != "" {
387 textContent += "\n"
388 }
389 textContent += content
390 }
391 }
392
393 m.Content = textContent
394 m.ToolCalls = toolCalls
395
396 messages = append(messages, m)
397 }
398
399 return messages
400}
401
402// fromLLMToolChoice converts llm.ToolChoice to the format expected by OpenAI.
403func fromLLMToolChoice(tc *llm.ToolChoice) any {
404 if tc == nil {
405 return nil
406 }
407
408 if tc.Type == llm.ToolChoiceTypeTool && tc.Name != "" {
409 return openai.ToolChoice{
410 Type: openai.ToolTypeFunction,
411 Function: openai.ToolFunction{
412 Name: tc.Name,
413 },
414 }
415 }
416
417 // For non-specific tool choice, just use the string
418 return fromLLMToolChoiceType[tc.Type]
419}
420
421// fromLLMTool converts llm.Tool to the format expected by OpenAI.
422func fromLLMTool(t *llm.Tool) openai.Tool {
423 return openai.Tool{
424 Type: openai.ToolTypeFunction,
425 Function: &openai.FunctionDefinition{
426 Name: t.Name,
427 Description: t.Description,
428 Parameters: t.InputSchema,
429 },
430 }
431}
432
433// fromLLMSystem converts llm.SystemContent to an OpenAI system message.
434func fromLLMSystem(systemContent []llm.SystemContent) []openai.ChatCompletionMessage {
435 if len(systemContent) == 0 {
436 return nil
437 }
438
439 // Combine all system content into a single system message
440 var systemText string
441 for i, content := range systemContent {
442 if i > 0 && systemText != "" && content.Text != "" {
443 systemText += "\n"
444 }
445 systemText += content.Text
446 }
447
448 if systemText == "" {
449 return nil
450 }
451
452 return []openai.ChatCompletionMessage{
453 {
454 Role: "system",
455 Content: systemText,
456 },
457 }
458}
459
460// toRawLLMContent converts a raw content string from OpenAI to llm.Content.
461func toRawLLMContent(content string) llm.Content {
462 return llm.Content{
463 Type: llm.ContentTypeText,
464 Text: content,
465 }
466}
467
468// toToolCallLLMContent converts a tool call from OpenAI to llm.Content.
469func toToolCallLLMContent(toolCall openai.ToolCall) llm.Content {
470 // Generate a content ID if needed
471 id := toolCall.ID
472 if id == "" {
473 // Create a deterministic ID based on the function name if no ID is provided
474 id = "tc_" + toolCall.Function.Name
475 }
476
477 return llm.Content{
478 ID: id,
479 Type: llm.ContentTypeToolUse,
480 ToolName: toolCall.Function.Name,
481 ToolInput: json.RawMessage(toolCall.Function.Arguments),
482 }
483}
484
485// toToolResultLLMContent converts a tool result message from OpenAI to llm.Content.
486func toToolResultLLMContent(msg openai.ChatCompletionMessage) llm.Content {
487 return llm.Content{
Philip Zeyliger72252cb2025-05-10 17:00:08 -0700488 Type: llm.ContentTypeToolResult,
489 ToolUseID: msg.ToolCallID,
490 ToolResult: []llm.Content{{
491 Type: llm.ContentTypeText,
492 Text: msg.Content,
493 }},
494 ToolError: false, // OpenAI doesn't specify errors explicitly
Josh Bleecher Snyder4f84ab72025-04-22 16:40:54 -0700495 }
496}
497
498// toLLMContents converts message content from OpenAI to []llm.Content.
499func toLLMContents(msg openai.ChatCompletionMessage) []llm.Content {
500 var contents []llm.Content
501
502 // If this is a tool response, handle it separately
503 if msg.Role == "tool" && msg.ToolCallID != "" {
504 return []llm.Content{toToolResultLLMContent(msg)}
505 }
506
507 // If there's text content, add it
508 if msg.Content != "" {
509 contents = append(contents, toRawLLMContent(msg.Content))
510 }
511
512 // If there are tool calls, add them
513 for _, tc := range msg.ToolCalls {
514 contents = append(contents, toToolCallLLMContent(tc))
515 }
516
517 // If empty, add an empty text content
518 if len(contents) == 0 {
519 contents = append(contents, llm.Content{
520 Type: llm.ContentTypeText,
521 Text: "",
522 })
523 }
524
525 return contents
526}
527
528// toLLMUsage converts usage information from OpenAI to llm.Usage.
Josh Bleecher Snyder59bb27d2025-06-05 07:32:10 -0700529func (s *Service) toLLMUsage(au openai.Usage, headers http.Header) llm.Usage {
Josh Bleecher Snyder4f84ab72025-04-22 16:40:54 -0700530 // fmt.Printf("raw usage: %+v / %v / %v\n", au, au.PromptTokensDetails, au.CompletionTokensDetails)
531 in := uint64(au.PromptTokens)
532 var inc uint64
533 if au.PromptTokensDetails != nil {
534 inc = uint64(au.PromptTokensDetails.CachedTokens)
535 }
536 out := uint64(au.CompletionTokens)
537 u := llm.Usage{
538 InputTokens: in,
539 CacheReadInputTokens: inc,
540 CacheCreationInputTokens: in,
541 OutputTokens: out,
542 }
Josh Bleecher Snyder59bb27d2025-06-05 07:32:10 -0700543 u.CostUSD = llm.CostUSDFromResponse(headers)
Josh Bleecher Snyder4f84ab72025-04-22 16:40:54 -0700544 return u
545}
546
547// toLLMResponse converts the OpenAI response to llm.Response.
548func (s *Service) toLLMResponse(r *openai.ChatCompletionResponse) *llm.Response {
549 // fmt.Printf("Raw response\n")
550 // enc := json.NewEncoder(os.Stdout)
551 // enc.SetIndent("", " ")
552 // enc.Encode(r)
553 // fmt.Printf("\n")
554
555 if len(r.Choices) == 0 {
556 return &llm.Response{
557 ID: r.ID,
558 Model: r.Model,
559 Role: llm.MessageRoleAssistant,
Josh Bleecher Snyder59bb27d2025-06-05 07:32:10 -0700560 Usage: s.toLLMUsage(r.Usage, r.Header()),
Josh Bleecher Snyder4f84ab72025-04-22 16:40:54 -0700561 }
562 }
563
564 // Process the primary choice
565 choice := r.Choices[0]
566
567 return &llm.Response{
568 ID: r.ID,
569 Model: r.Model,
570 Role: toRoleFromString(choice.Message.Role),
571 Content: toLLMContents(choice.Message),
572 StopReason: toStopReason(string(choice.FinishReason)),
Josh Bleecher Snyder59bb27d2025-06-05 07:32:10 -0700573 Usage: s.toLLMUsage(r.Usage, r.Header()),
Josh Bleecher Snyder4f84ab72025-04-22 16:40:54 -0700574 }
575}
576
577// toRoleFromString converts a role string to llm.MessageRole.
578func toRoleFromString(role string) llm.MessageRole {
579 if role == "tool" || role == "system" || role == "function" {
580 return llm.MessageRoleAssistant // Map special roles to assistant for consistency
581 }
582 if mr, ok := toLLMRole[role]; ok {
583 return mr
584 }
585 return llm.MessageRoleUser // Default to user if unknown
586}
587
588// toStopReason converts a finish reason string to llm.StopReason.
589func toStopReason(reason string) llm.StopReason {
590 if sr, ok := toLLMStopReason[reason]; ok {
591 return sr
592 }
593 return llm.StopReasonStopSequence // Default
594}
595
Philip Zeyligerb8a8f352025-06-02 07:39:37 -0700596// TokenContextWindow returns the maximum token context window size for this service
597func (s *Service) TokenContextWindow() int {
598 model := cmp.Or(s.Model, DefaultModel)
599
600 // OpenAI models generally have 128k context windows
601 // Some newer models have larger windows, but 128k is a safe default
602 switch model.ModelName {
603 case "gpt-4.1-2025-04-14", "gpt-4.1-mini-2025-04-14", "gpt-4.1-nano-2025-04-14":
604 return 200000 // 200k for newer GPT-4.1 models
605 case "gpt-4o-2024-08-06", "gpt-4o-mini-2024-07-18":
606 return 128000 // 128k for GPT-4o models
607 case "o3-2025-04-16", "o3-mini-2025-04-16":
608 return 200000 // 200k for O3 models
609 default:
610 // Default for unknown models
611 return 128000
612 }
613}
614
Josh Bleecher Snyder4f84ab72025-04-22 16:40:54 -0700615// Do sends a request to OpenAI using the go-openai package.
616func (s *Service) Do(ctx context.Context, ir *llm.Request) (*llm.Response, error) {
617 // Configure the OpenAI client
618 httpc := cmp.Or(s.HTTPC, http.DefaultClient)
619 model := cmp.Or(s.Model, DefaultModel)
620
621 // TODO: do this one during Service setup? maybe with a constructor instead?
622 config := openai.DefaultConfig(s.APIKey)
623 if model.URL != "" {
624 config.BaseURL = model.URL
625 }
626 if s.Org != "" {
627 config.OrgID = s.Org
628 }
629 config.HTTPClient = httpc
630
631 client := openai.NewClientWithConfig(config)
632
633 // Start with system messages if provided
634 var allMessages []openai.ChatCompletionMessage
635 if len(ir.System) > 0 {
636 sysMessages := fromLLMSystem(ir.System)
637 allMessages = append(allMessages, sysMessages...)
638 }
639
640 // Add regular and tool messages
641 for _, msg := range ir.Messages {
642 msgs := fromLLMMessage(msg)
643 allMessages = append(allMessages, msgs...)
644 }
645
646 // Convert tools
647 var tools []openai.Tool
648 for _, t := range ir.Tools {
649 tools = append(tools, fromLLMTool(t))
650 }
651
652 // Create the OpenAI request
653 req := openai.ChatCompletionRequest{
654 Model: model.ModelName,
655 Messages: allMessages,
Josh Bleecher Snyder4f84ab72025-04-22 16:40:54 -0700656 Tools: tools,
657 ToolChoice: fromLLMToolChoice(ir.ToolChoice), // TODO: make fromLLMToolChoice return an error when a perfect translation is not possible
658 }
Josh Bleecher Snyder8236cbc2025-05-09 09:57:57 -0700659 if model.IsReasoningModel {
660 req.MaxCompletionTokens = cmp.Or(s.MaxTokens, DefaultMaxTokens)
661 } else {
662 req.MaxTokens = cmp.Or(s.MaxTokens, DefaultMaxTokens)
663 }
Josh Bleecher Snyder4f84ab72025-04-22 16:40:54 -0700664 // fmt.Printf("Sending request to OpenAI\n")
665 // enc := json.NewEncoder(os.Stdout)
666 // enc.SetIndent("", " ")
667 // enc.Encode(req)
668 // fmt.Printf("\n")
669
670 // Retry mechanism
Josh Bleecher Snyder38411992025-05-16 17:51:03 +0000671 backoff := []time.Duration{1 * time.Second, 2 * time.Second, 5 * time.Second, 10 * time.Second, 15 * time.Second}
Josh Bleecher Snyder4f84ab72025-04-22 16:40:54 -0700672
673 // retry loop
Josh Bleecher Snyder38411992025-05-16 17:51:03 +0000674 var errs error // accumulated errors across all attempts
Josh Bleecher Snyder4f84ab72025-04-22 16:40:54 -0700675 for attempts := 0; ; attempts++ {
Josh Bleecher Snyder38411992025-05-16 17:51:03 +0000676 if attempts > 10 {
677 return nil, fmt.Errorf("openai request failed after %d attempts: %w", attempts, errs)
678 }
679 if attempts > 0 {
680 sleep := backoff[min(attempts, len(backoff)-1)] + time.Duration(rand.Int64N(int64(time.Second)))
681 slog.WarnContext(ctx, "openai request sleep before retry", "sleep", sleep, "attempts", attempts)
682 time.Sleep(sleep)
683 }
684
Josh Bleecher Snyder4f84ab72025-04-22 16:40:54 -0700685 resp, err := client.CreateChatCompletion(ctx, req)
686
687 // Handle successful response
688 if err == nil {
689 return s.toLLMResponse(&resp), nil
690 }
691
692 // Handle errors
693 var apiErr *openai.APIError
694 if ok := errors.As(err, &apiErr); !ok {
Josh Bleecher Snyder38411992025-05-16 17:51:03 +0000695 // Not an OpenAI API error, return immediately with accumulated errors
696 return nil, errors.Join(errs, err)
Josh Bleecher Snyder4f84ab72025-04-22 16:40:54 -0700697 }
698
699 switch {
700 case apiErr.HTTPStatusCode >= 500:
701 // Server error, try again with backoff
Josh Bleecher Snyder38411992025-05-16 17:51:03 +0000702 slog.WarnContext(ctx, "openai_request_failed", "error", apiErr.Error(), "status_code", apiErr.HTTPStatusCode)
703 errs = errors.Join(errs, fmt.Errorf("status %d: %s", apiErr.HTTPStatusCode, apiErr.Error()))
Josh Bleecher Snyder4f84ab72025-04-22 16:40:54 -0700704 continue
705
706 case apiErr.HTTPStatusCode == 429:
Josh Bleecher Snyder38411992025-05-16 17:51:03 +0000707 // Rate limited, accumulate error and retry
708 slog.WarnContext(ctx, "openai_request_rate_limited", "error", apiErr.Error())
709 errs = errors.Join(errs, fmt.Errorf("status %d (rate limited): %s", apiErr.HTTPStatusCode, apiErr.Error()))
Josh Bleecher Snyder4f84ab72025-04-22 16:40:54 -0700710 continue
711
Josh Bleecher Snyder38411992025-05-16 17:51:03 +0000712 case apiErr.HTTPStatusCode >= 400 && apiErr.HTTPStatusCode < 500:
713 // Client error, probably unrecoverable
714 slog.WarnContext(ctx, "openai_request_failed", "error", apiErr.Error(), "status_code", apiErr.HTTPStatusCode)
715 return nil, errors.Join(errs, fmt.Errorf("status %d: %s", apiErr.HTTPStatusCode, apiErr.Error()))
716
Josh Bleecher Snyder4f84ab72025-04-22 16:40:54 -0700717 default:
Josh Bleecher Snyder38411992025-05-16 17:51:03 +0000718 // Other error, accumulate and retry
719 slog.WarnContext(ctx, "openai_request_failed", "error", apiErr.Error(), "status_code", apiErr.HTTPStatusCode)
720 errs = errors.Join(errs, fmt.Errorf("status %d: %s", apiErr.HTTPStatusCode, apiErr.Error()))
721 continue
Josh Bleecher Snyder4f84ab72025-04-22 16:40:54 -0700722 }
723 }
724}