Initial commit
diff --git a/.github/workflows/go_test.yml b/.github/workflows/go_test.yml
new file mode 100644
index 0000000..4e94690
--- /dev/null
+++ b/.github/workflows/go_test.yml
@@ -0,0 +1,39 @@
+name: go_tests
+on:
+ workflow_call:
+ push:
+ branches-ignore:
+ - "queue-main-**"
+ pull_request:
+jobs:
+ test:
+ runs-on: "linux-x64-ubuntu-latest-64-core"
+ steps:
+ - uses: actions/checkout@master
+
+ - uses: actions/setup-go@v5
+ with:
+ go-version-file: "go.mod"
+ cache: true
+
+ - name: Cache Go 1.24.2
+ uses: actions/cache@v4
+ with:
+ path: |
+ ~/.cache/go-build
+ key: ${{ runner.os }}-go1.24.2-${{ hashFiles('**/go.sum') }}
+ restore-keys: |
+ ${{ runner.os }}-go1.24.2-
+
+ - name: Install tools
+ run: |
+ go install golang.org/x/tools/gopls@latest
+
+ - name: Go generate
+ run: |
+ go generate ./...
+
+ - name: Run tests
+ run: |
+ go test -v ./...
+ go test -v -race ./...
diff --git a/.github/workflows/queue-main.yml b/.github/workflows/queue-main.yml
new file mode 100644
index 0000000..ae0f0b1
--- /dev/null
+++ b/.github/workflows/queue-main.yml
@@ -0,0 +1,37 @@
+# Simplified Commit Queue
+#
+# (Force) push to "queue-main-$USER" a potential change, and
+# this job will push it main if it passes pre-commit and tests,
+# which will run in parallel. The push may fail if the commit
+# cannot be pushed cleanly because it needs to be rebased, which
+# will happen especially if another developer pushes a change at
+# roughly the same time.
+
+name: Main Branch Commit Queue
+on:
+ push:
+ branches:
+ - "queue-main-**"
+
+permissions: read-all
+
+jobs:
+ test:
+ uses: ./.github/workflows/go_test.yml
+ permissions: read-all
+
+ push-to-main:
+ runs-on: ubuntu-latest
+ needs: [test]
+ permissions:
+ contents: write
+ steps:
+ - uses: actions/checkout@v4
+ with:
+ fetch-depth: 0
+
+ - name: Push to main
+ run: |
+ git config --global user.name "GitHub Actions Bot"
+ git config --global user.email "actions@github.com"
+ git push origin HEAD:main
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..309f705
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,4 @@
+node_modules
+
+# Generated Tailwind CSS file
+loop/webui/src/tailwind.css
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..4b0a449
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,13 @@
+Copyright 2025 Bold Software, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..4674ef1
--- /dev/null
+++ b/README.md
@@ -0,0 +1,10 @@
+# Sketch
+
+Under construction.
+
+Usage:
+
+```sh
+go install sketch.dev/cmd/sketch@latest
+sketch
+```
diff --git a/ant/ant.go b/ant/ant.go
new file mode 100644
index 0000000..a3eb79a
--- /dev/null
+++ b/ant/ant.go
@@ -0,0 +1,976 @@
+package ant
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "log/slog"
+ "maps"
+ "math/rand/v2"
+ "net/http"
+ "slices"
+ "strings"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/richardlehane/crock32"
+ "sketch.dev/skribe"
+)
+
+const (
+ DefaultModel = Claude37Sonnet
+ // See https://docs.anthropic.com/en/docs/about-claude/models/all-models for
+ // current maximums. There's currently a flag to enable 128k output (output-128k-2025-02-19)
+ DefaultMaxTokens = 8192
+ DefaultURL = "https://api.anthropic.com/v1/messages"
+)
+
+const (
+ Claude35Sonnet = "claude-3-5-sonnet-20241022"
+ Claude35Haiku = "claude-3-5-haiku-20241022"
+ Claude37Sonnet = "claude-3-7-sonnet-20250219"
+)
+
+const (
+ MessageRoleUser = "user"
+ MessageRoleAssistant = "assistant"
+
+ ContentTypeText = "text"
+ ContentTypeThinking = "thinking"
+ ContentTypeRedactedThinking = "redacted_thinking"
+ ContentTypeToolUse = "tool_use"
+ ContentTypeToolResult = "tool_result"
+
+ StopReasonStopSequence = "stop_sequence"
+ StopReasonMaxTokens = "max_tokens"
+ StopReasonEndTurn = "end_turn"
+ StopReasonToolUse = "tool_use"
+)
+
+type Listener interface {
+ // TODO: Content is leaking an anthropic API; should we avoid it?
+ // TODO: Where should we include start/end time and usage?
+ OnToolResult(ctx context.Context, convo *Convo, toolName string, toolInput json.RawMessage, content Content, result *string, err error)
+ OnResponse(ctx context.Context, convo *Convo, msg *MessageResponse)
+ OnRequest(ctx context.Context, convo *Convo, msg *Message)
+}
+
+type NoopListener struct{}
+
+func (n *NoopListener) OnToolResult(ctx context.Context, convo *Convo, toolName string, toolInput json.RawMessage, content Content, result *string, err error) {
+}
+func (n *NoopListener) OnResponse(ctx context.Context, convo *Convo, msg *MessageResponse) {}
+func (n *NoopListener) OnRequest(ctx context.Context, convo *Convo, msg *Message) {}
+
+type Content struct {
+ // TODO: image support?
+ // https://docs.anthropic.com/en/api/messages
+ ID string `json:"id,omitempty"`
+ Type string `json:"type,omitempty"`
+ Text string `json:"text,omitempty"`
+
+ // for thinking
+ Thinking string `json:"thinking,omitempty"`
+ Data string `json:"data,omitempty"` // for redacted_thinking
+ Signature string `json:"signature,omitempty"` // for thinking
+
+ // for tool_use
+ ToolName string `json:"name,omitempty"`
+ ToolInput json.RawMessage `json:"input,omitempty"`
+
+ // for tool_result
+ ToolUseID string `json:"tool_use_id,omitempty"`
+ ToolError bool `json:"is_error,omitempty"`
+ ToolResult string `json:"content,omitempty"`
+
+ // timing information for tool_result; not sent to Claude
+ StartTime *time.Time `json:"-"`
+ EndTime *time.Time `json:"-"`
+
+ CacheControl json.RawMessage `json:"cache_control,omitempty"`
+}
+
+func StringContent(s string) Content {
+ return Content{Type: ContentTypeText, Text: s}
+}
+
+// Message represents a message in the conversation.
+type Message struct {
+ Role string `json:"role"`
+ Content []Content `json:"content"`
+ ToolUse *ToolUse `json:"tool_use,omitempty"` // use to control whether/which tool to use
+}
+
+// ToolUse represents a tool use in the message content.
+type ToolUse struct {
+ ID string `json:"id"`
+ Name string `json:"name"`
+}
+
+// Tool represents a tool available to Claude.
+type Tool struct {
+ Name string `json:"name"`
+ // Type is used by the text editor tool; see
+ // https://docs.anthropic.com/en/docs/build-with-claude/tool-use/text-editor-tool
+ Type string `json:"type,omitempty"`
+ Description string `json:"description,omitempty"`
+ InputSchema json.RawMessage `json:"input_schema,omitempty"`
+
+ // The Run function is automatically called when the tool is used.
+ // Run functions may be called concurrently with each other and themselves.
+ // The input to Run function is the input to the tool, as provided by Claude, in compliance with the input schema.
+ // The outputs from Run will be sent back to Claude.
+ // If you do not want to respond to the tool call request from Claude, return ErrDoNotRespond.
+ // ctx contains extra (rarely used) tool call information; retrieve it with ToolCallInfoFromContext.
+ Run func(ctx context.Context, input json.RawMessage) (string, error) `json:"-"`
+}
+
+var ErrDoNotRespond = errors.New("do not respond")
+
+// Usage represents the billing and rate-limit usage.
+type Usage struct {
+ InputTokens uint64 `json:"input_tokens"`
+ CacheCreationInputTokens uint64 `json:"cache_creation_input_tokens"`
+ CacheReadInputTokens uint64 `json:"cache_read_input_tokens"`
+ OutputTokens uint64 `json:"output_tokens"`
+ CostUSD float64 `json:"cost_usd"`
+}
+
+func (u *Usage) Add(other Usage) {
+ u.InputTokens += other.InputTokens
+ u.CacheCreationInputTokens += other.CacheCreationInputTokens
+ u.CacheReadInputTokens += other.CacheReadInputTokens
+ u.OutputTokens += other.OutputTokens
+ u.CostUSD += other.CostUSD
+}
+
+func (u *Usage) String() string {
+ return fmt.Sprintf("in: %d, out: %d", u.InputTokens, u.OutputTokens)
+}
+
+func (u *Usage) IsZero() bool {
+ return *u == Usage{}
+}
+
+func (u *Usage) Attr() slog.Attr {
+ return slog.Group("usage",
+ slog.Uint64("input_tokens", u.InputTokens),
+ slog.Uint64("output_tokens", u.OutputTokens),
+ slog.Uint64("cache_creation_input_tokens", u.CacheCreationInputTokens),
+ slog.Uint64("cache_read_input_tokens", u.CacheReadInputTokens),
+ )
+}
+
+type ErrorResponse struct {
+ Type string `json:"type"`
+ Message string `json:"message"`
+}
+
+// MessageResponse represents the response from the message API.
+type MessageResponse struct {
+ ID string `json:"id"`
+ Type string `json:"type"`
+ Role string `json:"role"`
+ Model string `json:"model"`
+ Content []Content `json:"content"`
+ StopReason string `json:"stop_reason"`
+ StopSequence *string `json:"stop_sequence,omitempty"`
+ Usage Usage `json:"usage"`
+ StartTime *time.Time `json:"start_time,omitempty"`
+ EndTime *time.Time `json:"end_time,omitempty"`
+}
+
+func (m *MessageResponse) ToMessage() Message {
+ return Message{
+ Role: m.Role,
+ Content: m.Content,
+ }
+}
+
+func (m *MessageResponse) StopSequenceString() string {
+ if m.StopSequence == nil {
+ return ""
+ }
+ return *m.StopSequence
+}
+
+const (
+ ToolChoiceTypeAuto = "auto" // default
+ ToolChoiceTypeAny = "any" // any tool, but must use one
+ ToolChoiceTypeNone = "none" // no tools allowed
+ ToolChoiceTypeTool = "tool" // must use the tool specified in the Name field
+)
+
+type ToolChoice struct {
+ Type string `json:"type"`
+ Name string `json:"name,omitempty"`
+}
+
+// https://docs.anthropic.com/en/api/messages#body-system
+type SystemContent struct {
+ Text string `json:"text,omitempty"`
+ Type string `json:"type,omitempty"`
+ CacheControl json.RawMessage `json:"cache_control,omitempty"`
+}
+
+// MessageRequest represents the request payload for creating a message.
+type MessageRequest struct {
+ Model string `json:"model"`
+ Messages []Message `json:"messages"`
+ ToolChoice *ToolChoice `json:"tool_choice,omitempty"`
+ MaxTokens int `json:"max_tokens"`
+ Tools []*Tool `json:"tools,omitempty"`
+ Stream bool `json:"stream,omitempty"`
+ System []SystemContent `json:"system,omitempty"`
+ Temperature float64 `json:"temperature,omitempty"`
+ TopK int `json:"top_k,omitempty"`
+ TopP float64 `json:"top_p,omitempty"`
+ StopSequences []string `json:"stop_sequences,omitempty"`
+
+ TokenEfficientToolUse bool `json:"-"` // DO NOT USE, broken on Anthropic's side as of 2025-02-28
+}
+
+const dumpText = false // debugging toggle to see raw communications with Claude
+
+// createMessage sends a request to the Anthropic message API to create a message.
+func createMessage(ctx context.Context, httpc *http.Client, url, apiKey string, request *MessageRequest) (*MessageResponse, error) {
+ var payload []byte
+ var err error
+ if dumpText || testing.Testing() {
+ payload, err = json.MarshalIndent(request, "", " ")
+ } else {
+ payload, err = json.Marshal(request)
+ payload = append(payload, '\n')
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ if false {
+ fmt.Printf("claude request payload:\n%s\n", payload)
+ }
+
+ backoff := []time.Duration{15 * time.Second, 30 * time.Second, time.Minute}
+ largerMaxTokens := false
+ var partialUsage Usage
+
+ // retry loop
+ for attempts := 0; ; attempts++ {
+ if dumpText {
+ fmt.Printf("RAW REQUEST:\n%s\n\n", payload)
+ }
+ req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewReader(payload))
+ if err != nil {
+ return nil, err
+ }
+
+ req.Header.Set("Content-Type", "application/json")
+ req.Header.Set("X-API-Key", apiKey)
+ req.Header.Set("Anthropic-Version", "2023-06-01")
+
+ features := []string{}
+
+ if request.TokenEfficientToolUse {
+ features = append(features, "token-efficient-tool-use-2025-02-19")
+ }
+ if largerMaxTokens {
+ features = append(features, "output-128k-2025-02-19")
+ request.MaxTokens = 128 * 1024
+ }
+ if len(features) > 0 {
+ req.Header.Set("anthropic-beta", strings.Join(features, ","))
+ }
+
+ resp, err := httpc.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ buf, _ := io.ReadAll(resp.Body)
+ resp.Body.Close()
+
+ switch {
+ case resp.StatusCode == http.StatusOK:
+ if dumpText {
+ fmt.Printf("RAW RESPONSE:\n%s\n\n", buf)
+ }
+ var response MessageResponse
+ err = json.NewDecoder(bytes.NewReader(buf)).Decode(&response)
+ if err != nil {
+ return nil, err
+ }
+ if response.StopReason == StopReasonMaxTokens && !largerMaxTokens {
+ fmt.Printf("Retrying Anthropic API call with larger max tokens size.")
+ // Retry with more output tokens.
+ largerMaxTokens = true
+ response.Usage.CostUSD = response.TotalDollars()
+ partialUsage = response.Usage
+ continue
+ }
+
+ // Calculate and set the cost_usd field
+ if largerMaxTokens {
+ response.Usage.Add(partialUsage)
+ }
+ response.Usage.CostUSD = response.TotalDollars()
+
+ return &response, nil
+ case resp.StatusCode >= 500 && resp.StatusCode < 600:
+ // overloaded or unhappy, in one form or another
+ sleep := backoff[min(attempts, len(backoff)-1)] + time.Duration(rand.Int64N(int64(time.Second)))
+ slog.WarnContext(ctx, "anthropic_request_failed", "response", string(buf), "status_code", resp.StatusCode, "sleep", sleep)
+ time.Sleep(sleep)
+ case resp.StatusCode == 429:
+ // rate limited. wait 1 minute as a starting point, because that's the rate limiting window.
+ // and then add some additional time for backoff.
+ sleep := time.Minute + backoff[min(attempts, len(backoff)-1)] + time.Duration(rand.Int64N(int64(time.Second)))
+ slog.WarnContext(ctx, "anthropic_request_rate_limited", "response", string(buf), "sleep", sleep)
+ // case resp.StatusCode == 400:
+ // TODO: parse ErrorResponse, make (*ErrorResponse) implement error
+ default:
+ return nil, fmt.Errorf("API request failed with status %s\n%s", resp.Status, buf)
+ }
+ }
+}
+
+// A Convo is a managed conversation with Claude.
+// It automatically manages the state of the conversation,
+// including appending messages send/received,
+// calling tools and sending their results,
+// tracking usage, etc.
+//
+// Exported fields must not be altered concurrently with calling any method on Convo.
+// Typical usage is to configure a Convo once before using it.
+type Convo struct {
+ // ID is a unique ID for the conversation
+ ID string
+ // Ctx is the context for the entire conversation.
+ Ctx context.Context
+ // HTTPC is the HTTP client for the conversation.
+ HTTPC *http.Client
+ // URL is the remote messages URL to dial.
+ URL string
+ // APIKey is the API key for the conversation.
+ APIKey string
+ // Model is the model for the conversation.
+ Model string
+ // MaxTokens is the max tokens for each response in the conversation.
+ MaxTokens int
+ // Tools are the tools available during the conversation.
+ Tools []*Tool
+ // SystemPrompt is the system prompt for the conversation.
+ SystemPrompt string
+ // PromptCaching indicates whether to use Anthropic's prompt caching.
+ // See https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching#continuing-a-multi-turn-conversation
+ // for the documentation. At request send time, we set the cache_control field on the
+ // last message. We also cache the system prompt.
+ // Default: true.
+ PromptCaching bool
+ // ToolUseOnly indicates whether Claude may only use tools during this conversation.
+ // TODO: add more fine-grained control over tool use?
+ ToolUseOnly bool
+ // Parent is the parent conversation, if any.
+ // It is non-nil for "subagent" calls.
+ // It is set automatically when calling SubConvo,
+ // and usually should not be set manually.
+ Parent *Convo
+ // Budget is the budget for this conversation (and all sub-conversations).
+ // The Conversation DOES NOT automatically enforce the budget.
+ // It is up to the caller to call OverBudget() as appropriate.
+ Budget Budget
+
+ // messages tracks the messages so far in the conversation.
+ messages []Message
+
+ // Listener receives messages being sent.
+ Listener Listener
+
+ muToolUseCancel *sync.Mutex
+ toolUseCancel map[string]context.CancelCauseFunc
+
+ // Protects usage. This is used for subconversations (that share part of CumulativeUsage) as well.
+ mu *sync.Mutex
+ // usage tracks usage for this conversation and all sub-conversations.
+ usage *CumulativeUsage
+}
+
+// newConvoID generates a new 8-byte random id.
+// The uniqueness/collision requirements here are very low.
+// They are not global identifiers,
+// just enough to distinguish different convos in a single session.
+func newConvoID() string {
+ u1 := rand.Uint32()
+ s := crock32.Encode(uint64(u1))
+ if len(s) < 7 {
+ s += strings.Repeat("0", 7-len(s))
+ }
+ return s[:3] + "-" + s[3:]
+}
+
+// NewConvo creates a new conversation with Claude with sensible defaults.
+// ctx is the context for the entire conversation.
+func NewConvo(ctx context.Context, apiKey string) *Convo {
+ id := newConvoID()
+ return &Convo{
+ Ctx: skribe.ContextWithAttr(ctx, slog.String("convo_id", id)),
+ HTTPC: http.DefaultClient,
+ URL: DefaultURL,
+ APIKey: apiKey,
+ Model: DefaultModel,
+ MaxTokens: DefaultMaxTokens,
+ PromptCaching: true,
+ usage: newUsage(),
+ Listener: &NoopListener{},
+ ID: id,
+ muToolUseCancel: &sync.Mutex{},
+ toolUseCancel: map[string]context.CancelCauseFunc{},
+ mu: &sync.Mutex{},
+ }
+}
+
+// SubConvo creates a sub-conversation with the same configuration as the parent conversation.
+// (This propagates context for cancellation, HTTP client, API key, etc.)
+// The sub-conversation shares no messages with the parent conversation.
+// It does not inherit tools from the parent conversation.
+func (c *Convo) SubConvo() *Convo {
+ id := newConvoID()
+ return &Convo{
+ Ctx: skribe.ContextWithAttr(c.Ctx, slog.String("convo_id", id), slog.String("parent_convo_id", c.ID)),
+ HTTPC: c.HTTPC,
+ APIKey: c.APIKey,
+ Model: c.Model,
+ MaxTokens: c.MaxTokens,
+ PromptCaching: c.PromptCaching,
+ Parent: c,
+ // For convenience, sub-convo usage shares tool uses map with parent,
+ // all other fields separate, propagated in AddResponse
+ usage: newUsageWithSharedToolUses(c.usage),
+ mu: c.mu,
+ Listener: c.Listener,
+ ID: id,
+ // Do not copy Budget. Each budget is independent,
+ // and OverBudget checks whether any ancestor is over budget.
+ }
+}
+
+// Depth reports how many "sub-conversations" deep this conversation is.
+// That it, it walks up parents until it finds a root.
+func (c *Convo) Depth() int {
+ x := c
+ var depth int
+ for x.Parent != nil {
+ x = x.Parent
+ depth++
+ }
+ return depth
+}
+
+// SendUserTextMessage sends a text message to Claude in this conversation.
+// otherContents contains additional contents to send with the message, usually tool results.
+func (c *Convo) SendUserTextMessage(s string, otherContents ...Content) (*MessageResponse, error) {
+ contents := slices.Clone(otherContents)
+ if s != "" {
+ contents = append(contents, Content{Type: ContentTypeText, Text: s})
+ }
+ msg := Message{
+ Role: MessageRoleUser,
+ Content: contents,
+ }
+ return c.SendMessage(msg)
+}
+
+func (c *Convo) messageRequest(msg Message) *MessageRequest {
+ system := []SystemContent{}
+ if c.SystemPrompt != "" {
+ var d SystemContent
+ d = SystemContent{Type: "text", Text: c.SystemPrompt}
+ if c.PromptCaching {
+ d.CacheControl = json.RawMessage(`{"type":"ephemeral"}`)
+ }
+ system = []SystemContent{d}
+ }
+
+ // Claude is happy to return an empty response in response to our Done() call,
+ // and, if so, you'll see something like:
+ // API request failed with status 400 Bad Request
+ // {"type":"error","error": {"type":"invalid_request_error",
+ // "message":"messages.5: all messages must have non-empty content except for the optional final assistant message"}}
+ // So, we filter out those empty messages.
+ var nonEmptyMessages []Message
+ for _, m := range c.messages {
+ if len(m.Content) > 0 {
+ nonEmptyMessages = append(nonEmptyMessages, m)
+ }
+ }
+
+ mr := &MessageRequest{
+ Model: c.Model,
+ Messages: append(nonEmptyMessages, msg), // not yet committed to keeping msg
+ System: system,
+ Tools: c.Tools,
+ MaxTokens: c.MaxTokens,
+ }
+ if c.ToolUseOnly {
+ mr.ToolChoice = &ToolChoice{Type: ToolChoiceTypeAny}
+ }
+ return mr
+}
+
+func (c *Convo) findTool(name string) (*Tool, error) {
+ for _, tool := range c.Tools {
+ if tool.Name == name {
+ return tool, nil
+ }
+ }
+ return nil, fmt.Errorf("tool %q not found", name)
+}
+
+// insertMissingToolResults adds error results for tool uses that were requested
+// but not included in the message, which can happen in error paths like "out of budget."
+// We only insert these if there were no tool responses at all, since an incorrect
+// number of tool results would be a programmer error. Mutates inputs.
+func (c *Convo) insertMissingToolResults(mr *MessageRequest, msg *Message) {
+ if len(mr.Messages) < 2 {
+ return
+ }
+ prev := mr.Messages[len(mr.Messages)-2]
+ var toolUsePrev int
+ for _, c := range prev.Content {
+ if c.Type == ContentTypeToolUse {
+ toolUsePrev++
+ }
+ }
+ if toolUsePrev == 0 {
+ return
+ }
+ var toolUseCurrent int
+ for _, c := range msg.Content {
+ if c.Type == ContentTypeToolResult {
+ toolUseCurrent++
+ }
+ }
+ if toolUseCurrent != 0 {
+ return
+ }
+ var prefix []Content
+ for _, part := range prev.Content {
+ if part.Type != ContentTypeToolUse {
+ continue
+ }
+ content := Content{
+ Type: ContentTypeToolResult,
+ ToolUseID: part.ID,
+ ToolError: true,
+ ToolResult: "not executed; retry possible",
+ }
+ prefix = append(prefix, content)
+ msg.Content = append(prefix, msg.Content...)
+ mr.Messages[len(mr.Messages)-1].Content = msg.Content
+ }
+ slog.DebugContext(c.Ctx, "inserted missing tool results")
+}
+
+// SendMessage sends a message to Claude.
+// The conversation records (internally) all messages succesfully sent and received.
+func (c *Convo) SendMessage(msg Message) (*MessageResponse, error) {
+ mr := c.messageRequest(msg)
+ var lastMessage *Message
+ if c.PromptCaching {
+ lastMessage = &mr.Messages[len(mr.Messages)-1]
+ if len(lastMessage.Content) > 0 {
+ lastMessage.Content[len(lastMessage.Content)-1].CacheControl = json.RawMessage(`{"type":"ephemeral"}`)
+ }
+ }
+ defer func() {
+ if lastMessage == nil {
+ return
+ }
+ if len(lastMessage.Content) > 0 {
+ lastMessage.Content[len(lastMessage.Content)-1].CacheControl = []byte{}
+ }
+ }()
+ c.insertMissingToolResults(mr, &msg)
+ c.Listener.OnRequest(c.Ctx, c, &msg)
+
+ startTime := time.Now()
+ resp, err := createMessage(c.Ctx, c.HTTPC, c.URL, c.APIKey, mr)
+ if resp != nil {
+ resp.StartTime = &startTime
+ endTime := time.Now()
+ resp.EndTime = &endTime
+ }
+
+ if err != nil {
+ return nil, err
+ }
+ c.messages = append(c.messages, msg, resp.ToMessage())
+ // Propagate usage to all ancestors (including us).
+ for x := c; x != nil; x = x.Parent {
+ x.usage.AddResponse(resp)
+ }
+ c.Listener.OnResponse(c.Ctx, c, resp)
+ return resp, err
+}
+
+type toolCallInfoKeyType string
+
+var toolCallInfoKey toolCallInfoKeyType
+
+type ToolCallInfo struct {
+ ToolUseID string
+ Convo *Convo
+}
+
+func ToolCallInfoFromContext(ctx context.Context) ToolCallInfo {
+ v := ctx.Value(toolCallInfoKey)
+ i, _ := v.(ToolCallInfo)
+ return i
+}
+
+func (c *Convo) ToolResultCancelContents(resp *MessageResponse) ([]Content, error) {
+ if resp.StopReason != StopReasonToolUse {
+ return nil, nil
+ }
+ var toolResults []Content
+
+ for _, part := range resp.Content {
+ if part.Type != ContentTypeToolUse {
+ continue
+ }
+ c.incrementToolUse(part.ToolName)
+
+ content := Content{
+ Type: ContentTypeToolResult,
+ ToolUseID: part.ID,
+ }
+
+ content.ToolError = true
+ content.ToolResult = "user canceled this too_use"
+ toolResults = append(toolResults, content)
+ }
+ return toolResults, nil
+}
+
+func (c *Convo) CancelToolUse(toolUseID string, err error) error {
+ c.muToolUseCancel.Lock()
+ defer c.muToolUseCancel.Unlock()
+ cancel, ok := c.toolUseCancel[toolUseID]
+ if !ok {
+ return fmt.Errorf("cannot cancel %s: no cancel function registered for this tool_use_id. All I have is %+v", toolUseID, c.toolUseCancel)
+ }
+ delete(c.toolUseCancel, toolUseID)
+ cancel(err)
+ return nil
+}
+
+func (c *Convo) newToolUseContext(ctx context.Context, toolUseID string) (context.Context, context.CancelFunc) {
+ c.muToolUseCancel.Lock()
+ defer c.muToolUseCancel.Unlock()
+ ctx, cancel := context.WithCancelCause(ctx)
+ c.toolUseCancel[toolUseID] = cancel
+ return ctx, func() { c.CancelToolUse(toolUseID, nil) }
+}
+
+// ToolResultContents runs all tool uses requested by the response and returns their results.
+// Cancelling ctx will cancel any running tool calls.
+func (c *Convo) ToolResultContents(ctx context.Context, resp *MessageResponse) ([]Content, error) {
+ if resp.StopReason != StopReasonToolUse {
+ return nil, nil
+ }
+ // Extract all tool calls from the response, call the tools, and gather the results.
+ var wg sync.WaitGroup
+ toolResultC := make(chan Content, len(resp.Content))
+ for _, part := range resp.Content {
+ if part.Type != ContentTypeToolUse {
+ continue
+ }
+ c.incrementToolUse(part.ToolName)
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+
+ // Record start time
+ startTime := time.Now()
+
+ content := Content{
+ Type: ContentTypeToolResult,
+ ToolUseID: part.ID,
+ StartTime: &startTime,
+ }
+ sendErr := func(err error) {
+ // Record end time
+ endTime := time.Now()
+ content.EndTime = &endTime
+
+ content.ToolError = true
+ content.ToolResult = err.Error()
+ c.Listener.OnToolResult(ctx, c, part.ToolName, part.ToolInput, content, nil, err)
+ toolResultC <- content
+ }
+ sendRes := func(res string) {
+ // Record end time
+ endTime := time.Now()
+ content.EndTime = &endTime
+
+ content.ToolResult = res
+ c.Listener.OnToolResult(ctx, c, part.ToolName, part.ToolInput, content, &res, nil)
+ toolResultC <- content
+ }
+
+ tool, err := c.findTool(part.ToolName)
+ if err != nil {
+ sendErr(err)
+ return
+ }
+ // Create a new context for just this tool_use call, and register its
+ // cancel function so that it can be canceled individually.
+ toolUseCtx, cancel := c.newToolUseContext(ctx, part.ID)
+ defer cancel()
+ // TODO: move this into newToolUseContext?
+ toolUseCtx = context.WithValue(toolUseCtx, toolCallInfoKey, ToolCallInfo{ToolUseID: part.ID, Convo: c})
+ toolResult, err := tool.Run(toolUseCtx, part.ToolInput)
+ if errors.Is(err, ErrDoNotRespond) {
+ return
+ }
+ if toolUseCtx.Err() != nil {
+ sendErr(context.Cause(toolUseCtx))
+ return
+ }
+
+ if err != nil {
+ sendErr(err)
+ return
+ }
+ sendRes(toolResult)
+ }()
+ }
+ wg.Wait()
+ close(toolResultC)
+ var toolResults []Content
+ for toolResult := range toolResultC {
+ toolResults = append(toolResults, toolResult)
+ }
+ if ctx.Err() != nil {
+ return nil, ctx.Err()
+ }
+ return toolResults, nil
+}
+
+func (c *Convo) incrementToolUse(name string) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ c.usage.ToolUses[name]++
+}
+
+// ContentsAttr returns contents as a slog.Attr.
+// It is meant for logging.
+func ContentsAttr(contents []Content) slog.Attr {
+ var contentAttrs []any // slog.Attr
+ for _, content := range contents {
+ var attrs []any // slog.Attr
+ switch content.Type {
+ case ContentTypeText:
+ attrs = append(attrs, slog.String("text", content.Text))
+ case ContentTypeToolUse:
+ attrs = append(attrs, slog.String("tool_name", content.ToolName))
+ attrs = append(attrs, slog.String("tool_input", string(content.ToolInput)))
+ case ContentTypeToolResult:
+ attrs = append(attrs, slog.String("tool_result", content.ToolResult))
+ attrs = append(attrs, slog.Bool("tool_error", content.ToolError))
+ case ContentTypeThinking:
+ attrs = append(attrs, slog.String("thinking", content.Text))
+ default:
+ attrs = append(attrs, slog.String("unknown_content_type", content.Type))
+ attrs = append(attrs, slog.Any("text", content)) // just log it all raw, better to have too much than not enough
+ }
+ contentAttrs = append(contentAttrs, slog.Group(content.ID, attrs...))
+ }
+ return slog.Group("contents", contentAttrs...)
+}
+
+// MustSchema validates that schema is a valid JSON schema and returns it as a json.RawMessage.
+// It panics if the schema is invalid.
+func MustSchema(schema string) json.RawMessage {
+ // TODO: validate schema, for now just make sure it's valid JSON
+ schema = strings.TrimSpace(schema)
+ bytes := []byte(schema)
+ if !json.Valid(bytes) {
+ panic("invalid JSON schema: " + schema)
+ }
+ return json.RawMessage(bytes)
+}
+
+// cents per million tokens
+// (not dollars because i'm twitchy about using floats for money)
+type centsPer1MTokens struct {
+ Input uint64
+ Output uint64
+ CacheRead uint64
+ CacheCreation uint64
+}
+
+// https://www.anthropic.com/pricing#anthropic-api
+var modelCost = map[string]centsPer1MTokens{
+ Claude37Sonnet: {
+ Input: 300, // $3
+ Output: 1500, // $15
+ CacheRead: 30, // $0.30
+ CacheCreation: 375, // $3.75
+ },
+ Claude35Haiku: {
+ Input: 80, // $0.80
+ Output: 400, // $4.00
+ CacheRead: 8, // $0.08
+ CacheCreation: 100, // $1.00
+ },
+ Claude35Sonnet: {
+ Input: 300, // $3
+ Output: 1500, // $15
+ CacheRead: 30, // $0.30
+ CacheCreation: 375, // $3.75
+ },
+}
+
+// TotalDollars returns the total cost to obtain this response, in dollars.
+func (mr *MessageResponse) TotalDollars() float64 {
+ cpm, ok := modelCost[mr.Model]
+ if !ok {
+ panic(fmt.Sprintf("no pricing info for model: %s", mr.Model))
+ }
+ use := mr.Usage
+ megaCents := use.InputTokens*cpm.Input +
+ use.OutputTokens*cpm.Output +
+ use.CacheReadInputTokens*cpm.CacheRead +
+ use.CacheCreationInputTokens*cpm.CacheCreation
+ cents := float64(megaCents) / 1_000_000.0
+ return cents / 100.0
+}
+
+func newUsage() *CumulativeUsage {
+ return &CumulativeUsage{ToolUses: make(map[string]int), StartTime: time.Now()}
+}
+
+func newUsageWithSharedToolUses(parent *CumulativeUsage) *CumulativeUsage {
+ return &CumulativeUsage{ToolUses: parent.ToolUses, StartTime: time.Now()}
+}
+
+// CumulativeUsage represents cumulative usage across a Convo, including all sub-conversations.
+type CumulativeUsage struct {
+ StartTime time.Time `json:"start_time"`
+ Responses uint64 `json:"messages"` // count of responses
+ InputTokens uint64 `json:"input_tokens"`
+ OutputTokens uint64 `json:"output_tokens"`
+ CacheReadInputTokens uint64 `json:"cache_read_input_tokens"`
+ CacheCreationInputTokens uint64 `json:"cache_creation_input_tokens"`
+ TotalCostUSD float64 `json:"total_cost_usd"`
+ ToolUses map[string]int `json:"tool_uses"` // tool name -> number of uses
+}
+
+func (u *CumulativeUsage) Clone() CumulativeUsage {
+ v := *u
+ v.ToolUses = maps.Clone(u.ToolUses)
+ return v
+}
+
+func (c *Convo) CumulativeUsage() CumulativeUsage {
+ if c == nil {
+ return CumulativeUsage{}
+ }
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ return c.usage.Clone()
+}
+
+func (u *CumulativeUsage) WallTime() time.Duration {
+ return time.Since(u.StartTime)
+}
+
+func (u *CumulativeUsage) DollarsPerHour() float64 {
+ hours := u.WallTime().Hours()
+ if hours == 0 {
+ return 0
+ }
+ return u.TotalCostUSD / hours
+}
+
+func (u *CumulativeUsage) AddResponse(resp *MessageResponse) {
+ usage := resp.Usage
+ u.Responses++
+ u.InputTokens += usage.InputTokens
+ u.OutputTokens += usage.OutputTokens
+ u.CacheReadInputTokens += usage.CacheReadInputTokens
+ u.CacheCreationInputTokens += usage.CacheCreationInputTokens
+ u.TotalCostUSD += resp.TotalDollars()
+}
+
+// Attr returns the cumulative usage as a slog.Attr with key "usage".
+func (u CumulativeUsage) Attr() slog.Attr {
+ elapsed := time.Since(u.StartTime)
+ return slog.Group("usage",
+ slog.Duration("wall_time", elapsed),
+ slog.Uint64("responses", u.Responses),
+ slog.Uint64("input_tokens", u.InputTokens),
+ slog.Uint64("output_tokens", u.OutputTokens),
+ slog.Uint64("cache_read_input_tokens", u.CacheReadInputTokens),
+ slog.Uint64("cache_creation_input_tokens", u.CacheCreationInputTokens),
+ slog.Float64("total_cost_usd", u.TotalCostUSD),
+ slog.Float64("dollars_per_hour", u.TotalCostUSD/elapsed.Hours()),
+ slog.Any("tool_uses", maps.Clone(u.ToolUses)),
+ )
+}
+
+// A Budget represents the maximum amount of resources that may be spent on a conversation.
+// Note that the default (zero) budget is unlimited.
+type Budget struct {
+ MaxResponses uint64 // if > 0, max number of iterations (=responses)
+ MaxDollars float64 // if > 0, max dollars that may be spent
+ MaxWallTime time.Duration // if > 0, max wall time that may be spent
+}
+
+// OverBudget returns an error if the convo (or any of its parents) has exceeded its budget.
+// TODO: document parent vs sub budgets, multiple errors, etc, once we know the desired behavior.
+func (c *Convo) OverBudget() error {
+ for x := c; x != nil; x = x.Parent {
+ if err := x.overBudget(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// ResetBudget sets the budget to the passed in budget and
+// adjusts it by what's been used so far.
+func (c *Convo) ResetBudget(budget Budget) {
+ c.Budget = budget
+ if c.Budget.MaxDollars > 0 {
+ c.Budget.MaxDollars += c.CumulativeUsage().TotalCostUSD
+ }
+ if c.Budget.MaxResponses > 0 {
+ c.Budget.MaxResponses += c.CumulativeUsage().Responses
+ }
+ if c.Budget.MaxWallTime > 0 {
+ c.Budget.MaxWallTime += c.usage.WallTime()
+ }
+}
+
+func (c *Convo) overBudget() error {
+ usage := c.CumulativeUsage()
+ // TODO: stop before we exceed the budget instead of after?
+ // Top priority is money, then time, then response count.
+ var err error
+ cont := "Continuing to chat will reset the budget."
+ if c.Budget.MaxDollars > 0 && usage.TotalCostUSD >= c.Budget.MaxDollars {
+ err = errors.Join(err, fmt.Errorf("$%.2f spent, budget is $%.2f. %s", usage.TotalCostUSD, c.Budget.MaxDollars, cont))
+ }
+ if c.Budget.MaxWallTime > 0 && usage.WallTime() >= c.Budget.MaxWallTime {
+ err = errors.Join(err, fmt.Errorf("%v elapsed, budget is %v. %s", usage.WallTime().Truncate(time.Second), c.Budget.MaxWallTime.Truncate(time.Second), cont))
+ }
+ if c.Budget.MaxResponses > 0 && usage.Responses >= c.Budget.MaxResponses {
+ err = errors.Join(err, fmt.Errorf("%d responses received, budget is %d. %s", usage.Responses, c.Budget.MaxResponses, cont))
+ }
+ return err
+}
diff --git a/ant/ant_test.go b/ant/ant_test.go
new file mode 100644
index 0000000..6556e9e
--- /dev/null
+++ b/ant/ant_test.go
@@ -0,0 +1,220 @@
+package ant
+
+import (
+ "context"
+ "math"
+ "net/http"
+ "os"
+ "strings"
+ "testing"
+
+ "sketch.dev/httprr"
+)
+
+func TestBasicConvo(t *testing.T) {
+ ctx := context.Background()
+ rr, err := httprr.Open("testdata/basic_convo.httprr", http.DefaultTransport)
+ if err != nil {
+ t.Fatal(err)
+ }
+ rr.ScrubReq(func(req *http.Request) error {
+ req.Header.Del("x-api-key")
+ return nil
+ })
+
+ convo := NewConvo(ctx, os.Getenv("ANTHROPIC_API_KEY"))
+ convo.HTTPC = rr.Client()
+
+ const name = "Cornelius"
+ res, err := convo.SendUserTextMessage("Hi, my name is " + name)
+ if err != nil {
+ t.Fatal(err)
+ }
+ for _, part := range res.Content {
+ t.Logf("%s", part.Text)
+ }
+ res, err = convo.SendUserTextMessage("What is my name?")
+ if err != nil {
+ t.Fatal(err)
+ }
+ got := ""
+ for _, part := range res.Content {
+ got += part.Text
+ }
+ if !strings.Contains(got, name) {
+ t.Errorf("model does not know the given name %s: %q", name, got)
+ }
+}
+
+// TestCalculateCostFromTokens tests the calculateCostFromTokens function
+func TestCalculateCostFromTokens(t *testing.T) {
+ tests := []struct {
+ name string
+ model string
+ inputTokens uint64
+ outputTokens uint64
+ cacheReadInputTokens uint64
+ cacheCreationInputTokens uint64
+ want float64
+ }{
+ {
+ name: "Zero tokens",
+ model: Claude37Sonnet,
+ inputTokens: 0,
+ outputTokens: 0,
+ cacheReadInputTokens: 0,
+ cacheCreationInputTokens: 0,
+ want: 0,
+ },
+ {
+ name: "1000 input tokens, 500 output tokens",
+ model: Claude37Sonnet,
+ inputTokens: 1000,
+ outputTokens: 500,
+ cacheReadInputTokens: 0,
+ cacheCreationInputTokens: 0,
+ want: 0.0105,
+ },
+ {
+ name: "10000 input tokens, 5000 output tokens",
+ model: Claude37Sonnet,
+ inputTokens: 10000,
+ outputTokens: 5000,
+ cacheReadInputTokens: 0,
+ cacheCreationInputTokens: 0,
+ want: 0.105,
+ },
+ {
+ name: "With cache read tokens",
+ model: Claude37Sonnet,
+ inputTokens: 1000,
+ outputTokens: 500,
+ cacheReadInputTokens: 2000,
+ cacheCreationInputTokens: 0,
+ want: 0.0111,
+ },
+ {
+ name: "With cache creation tokens",
+ model: Claude37Sonnet,
+ inputTokens: 1000,
+ outputTokens: 500,
+ cacheReadInputTokens: 0,
+ cacheCreationInputTokens: 1500,
+ want: 0.016125,
+ },
+ {
+ name: "With all token types",
+ model: Claude37Sonnet,
+ inputTokens: 1000,
+ outputTokens: 500,
+ cacheReadInputTokens: 2000,
+ cacheCreationInputTokens: 1500,
+ want: 0.016725,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ usage := Usage{
+ InputTokens: tt.inputTokens,
+ OutputTokens: tt.outputTokens,
+ CacheReadInputTokens: tt.cacheReadInputTokens,
+ CacheCreationInputTokens: tt.cacheCreationInputTokens,
+ }
+ mr := MessageResponse{
+ Model: tt.model,
+ Usage: usage,
+ }
+ totalCost := mr.TotalDollars()
+ if math.Abs(totalCost-tt.want) > 0.0001 {
+ t.Errorf("totalCost = %v, want %v", totalCost, tt.want)
+ }
+ })
+ }
+}
+
+// TestCancelToolUse tests the CancelToolUse function of the Convo struct
+func TestCancelToolUse(t *testing.T) {
+ tests := []struct {
+ name string
+ setupToolUse bool
+ toolUseID string
+ cancelErr error
+ expectError bool
+ expectCancel bool
+ }{
+ {
+ name: "Cancel existing tool use",
+ setupToolUse: true,
+ toolUseID: "tool123",
+ cancelErr: nil,
+ expectError: false,
+ expectCancel: true,
+ },
+ {
+ name: "Cancel existing tool use with error",
+ setupToolUse: true,
+ toolUseID: "tool456",
+ cancelErr: context.Canceled,
+ expectError: false,
+ expectCancel: true,
+ },
+ {
+ name: "Cancel non-existent tool use",
+ setupToolUse: false,
+ toolUseID: "tool789",
+ cancelErr: nil,
+ expectError: true,
+ expectCancel: false,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ convo := NewConvo(context.Background(), "")
+
+ var cancelCalled bool
+ var cancelledWithErr error
+
+ if tt.setupToolUse {
+ // Setup a mock cancel function to track calls
+ mockCancel := func(err error) {
+ cancelCalled = true
+ cancelledWithErr = err
+ }
+
+ convo.muToolUseCancel.Lock()
+ convo.toolUseCancel[tt.toolUseID] = mockCancel
+ convo.muToolUseCancel.Unlock()
+ }
+
+ err := convo.CancelToolUse(tt.toolUseID, tt.cancelErr)
+
+ // Check if we got the expected error state
+ if (err != nil) != tt.expectError {
+ t.Errorf("CancelToolUse() error = %v, expectError %v", err, tt.expectError)
+ }
+
+ // Check if the cancel function was called as expected
+ if cancelCalled != tt.expectCancel {
+ t.Errorf("Cancel function called = %v, expectCancel %v", cancelCalled, tt.expectCancel)
+ }
+
+ // If we expected the cancel to be called, verify it was called with the right error
+ if tt.expectCancel && cancelledWithErr != tt.cancelErr {
+ t.Errorf("Cancel function called with error = %v, expected %v", cancelledWithErr, tt.cancelErr)
+ }
+
+ // Verify the toolUseID was removed from the map if it was initially added
+ if tt.setupToolUse {
+ convo.muToolUseCancel.Lock()
+ _, exists := convo.toolUseCancel[tt.toolUseID]
+ convo.muToolUseCancel.Unlock()
+
+ if exists {
+ t.Errorf("toolUseID %s still exists in the map after cancellation", tt.toolUseID)
+ }
+ }
+ })
+ }
+}
diff --git a/ant/testdata/basic_convo.httprr b/ant/testdata/basic_convo.httprr
new file mode 100644
index 0000000..663de8d
--- /dev/null
+++ b/ant/testdata/basic_convo.httprr
@@ -0,0 +1,116 @@
+httprr trace v1
+457 1329
+POST https://api.anthropic.com/v1/messages HTTP/1.1
+Host: api.anthropic.com
+User-Agent: Go-http-client/1.1
+Content-Length: 261
+Anthropic-Version: 2023-06-01
+Content-Type: application/json
+
+{
+ "model": "claude-3-7-sonnet-20250219",
+ "messages": [
+ {
+ "role": "user",
+ "content": [
+ {
+ "type": "text",
+ "text": "Hi, my name is Cornelius",
+ "cache_control": {
+ "type": "ephemeral"
+ }
+ }
+ ]
+ }
+ ],
+ "max_tokens": 8192
+}HTTP/2.0 200 OK
+Anthropic-Organization-Id: 3c473a21-7208-450a-a9f8-80aebda45c1b
+Anthropic-Ratelimit-Input-Tokens-Limit: 200000
+Anthropic-Ratelimit-Input-Tokens-Remaining: 200000
+Anthropic-Ratelimit-Input-Tokens-Reset: 2025-03-11T17:45:06Z
+Anthropic-Ratelimit-Output-Tokens-Limit: 80000
+Anthropic-Ratelimit-Output-Tokens-Remaining: 79000
+Anthropic-Ratelimit-Output-Tokens-Reset: 2025-03-11T17:45:07Z
+Anthropic-Ratelimit-Requests-Limit: 4000
+Anthropic-Ratelimit-Requests-Remaining: 3999
+Anthropic-Ratelimit-Requests-Reset: 2025-03-11T17:45:05Z
+Anthropic-Ratelimit-Tokens-Limit: 280000
+Anthropic-Ratelimit-Tokens-Remaining: 279000
+Anthropic-Ratelimit-Tokens-Reset: 2025-03-11T17:45:06Z
+Cf-Cache-Status: DYNAMIC
+Cf-Ray: 91ecdd10fdc3f97f-SJC
+Content-Type: application/json
+Date: Tue, 11 Mar 2025 17:45:07 GMT
+Request-Id: req_01LBtxMdNzxDcDVPGJSh7giv
+Server: cloudflare
+Via: 1.1 google
+X-Robots-Tag: none
+
+{"id":"msg_01S1uUyUsTaKPBKuDUGGX8J2","type":"message","role":"assistant","model":"claude-3-7-sonnet-20250219","content":[{"type":"text","text":"Hello, Cornelius! It's nice to meet you. How are you doing today? Is there something I can help you with?"}],"stop_reason":"end_turn","stop_sequence":null,"usage":{"input_tokens":15,"cache_creation_input_tokens":0,"cache_read_input_tokens":0,"output_tokens":31}}779 1286
+POST https://api.anthropic.com/v1/messages HTTP/1.1
+Host: api.anthropic.com
+User-Agent: Go-http-client/1.1
+Content-Length: 583
+Anthropic-Version: 2023-06-01
+Content-Type: application/json
+
+{
+ "model": "claude-3-7-sonnet-20250219",
+ "messages": [
+ {
+ "role": "user",
+ "content": [
+ {
+ "type": "text",
+ "text": "Hi, my name is Cornelius"
+ }
+ ]
+ },
+ {
+ "role": "assistant",
+ "content": [
+ {
+ "type": "text",
+ "text": "Hello, Cornelius! It's nice to meet you. How are you doing today? Is there something I can help you with?"
+ }
+ ]
+ },
+ {
+ "role": "user",
+ "content": [
+ {
+ "type": "text",
+ "text": "What is my name?",
+ "cache_control": {
+ "type": "ephemeral"
+ }
+ }
+ ]
+ }
+ ],
+ "max_tokens": 8192
+}HTTP/2.0 200 OK
+Anthropic-Organization-Id: 3c473a21-7208-450a-a9f8-80aebda45c1b
+Anthropic-Ratelimit-Input-Tokens-Limit: 200000
+Anthropic-Ratelimit-Input-Tokens-Remaining: 200000
+Anthropic-Ratelimit-Input-Tokens-Reset: 2025-03-11T17:45:07Z
+Anthropic-Ratelimit-Output-Tokens-Limit: 80000
+Anthropic-Ratelimit-Output-Tokens-Remaining: 80000
+Anthropic-Ratelimit-Output-Tokens-Reset: 2025-03-11T17:45:07Z
+Anthropic-Ratelimit-Requests-Limit: 4000
+Anthropic-Ratelimit-Requests-Remaining: 3999
+Anthropic-Ratelimit-Requests-Reset: 2025-03-11T17:45:07Z
+Anthropic-Ratelimit-Tokens-Limit: 280000
+Anthropic-Ratelimit-Tokens-Remaining: 280000
+Anthropic-Ratelimit-Tokens-Reset: 2025-03-11T17:45:07Z
+Cf-Cache-Status: DYNAMIC
+Cf-Ray: 91ecdd1ae9a6f97f-SJC
+Content-Type: application/json
+Date: Tue, 11 Mar 2025 17:45:07 GMT
+Request-Id: req_01MBf3RWXNfQgwhVRwwkBYSn
+Server: cloudflare
+Via: 1.1 google
+X-Robots-Tag: none
+
+{"id":"msg_01FGz6DeWeDpspJG8cuxyVE9","type":"message","role":"assistant","model":"claude-3-7-sonnet-20250219","content":[{"type":"text","text":"Your name is Cornelius, as you mentioned in your introduction."}],"stop_reason":"end_turn","stop_sequence":null,"usage":{"input_tokens":54,"cache_creation_input_tokens":0,"cache_read_input_tokens":0,"output_tokens":17}}
\ No newline at end of file
diff --git a/claudetool/bash.go b/claudetool/bash.go
new file mode 100644
index 0000000..d76d7f1
--- /dev/null
+++ b/claudetool/bash.go
@@ -0,0 +1,163 @@
+package claudetool
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "math"
+ "os/exec"
+ "strings"
+ "syscall"
+ "time"
+
+ "sketch.dev/ant"
+ "sketch.dev/claudetool/bashkit"
+)
+
+// The Bash tool executes shell commands with bash -c and optional timeout
+var Bash = &ant.Tool{
+ Name: bashName,
+ Description: strings.TrimSpace(bashDescription),
+ InputSchema: ant.MustSchema(bashInputSchema),
+ Run: BashRun,
+}
+
+const (
+ bashName = "bash"
+ bashDescription = `
+Executes a shell command using bash -c with an optional timeout, returning combined stdout and stderr.
+
+Executables pre-installed in this environment include:
+- standard unix tools
+- go
+- git
+- rg
+- jq
+- gopls
+- sqlite
+- fzf
+- gh
+- python3
+`
+ // If you modify this, update the termui template for prettier rendering.
+ bashInputSchema = `
+{
+ "type": "object",
+ "required": ["command"],
+ "properties": {
+ "command": {
+ "type": "string",
+ "description": "Shell script to execute"
+ },
+ "timeout": {
+ "type": "string",
+ "description": "Timeout as a Go duration string, defaults to '1m'"
+ }
+ }
+}
+`
+)
+
+type bashInput struct {
+ Command string `json:"command"`
+ Timeout string `json:"timeout,omitempty"`
+}
+
+func (i *bashInput) timeout() time.Duration {
+ dur, err := time.ParseDuration(i.Timeout)
+ if err != nil {
+ return 1 * time.Minute
+ }
+ return dur
+}
+
+func BashRun(ctx context.Context, m json.RawMessage) (string, error) {
+ var req bashInput
+ if err := json.Unmarshal(m, &req); err != nil {
+ return "", fmt.Errorf("failed to unmarshal bash command input: %w", err)
+ }
+ // do a quick permissions check (NOT a security barrier)
+ err := bashkit.Check(req.Command)
+ if err != nil {
+ return "", err
+ }
+ out, execErr := executeBash(ctx, req)
+ if execErr == nil {
+ return out, nil
+ }
+ return "", execErr
+}
+
+const maxBashOutputLength = 131072
+
+func executeBash(ctx context.Context, req bashInput) (string, error) {
+ execCtx, cancel := context.WithTimeout(ctx, req.timeout())
+ defer cancel()
+
+ // Can't do the simple thing and call CombinedOutput because of the need to kill the process group.
+ cmd := exec.CommandContext(execCtx, "bash", "-c", req.Command)
+ cmd.Dir = WorkingDir(ctx)
+ cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}
+
+ var output bytes.Buffer
+ cmd.Stdin = nil
+ cmd.Stdout = &output
+ cmd.Stderr = &output
+ if err := cmd.Start(); err != nil {
+ return "", fmt.Errorf("command failed: %w", err)
+ }
+ proc := cmd.Process
+ done := make(chan struct{})
+ go func() {
+ select {
+ case <-execCtx.Done():
+ if execCtx.Err() == context.DeadlineExceeded && proc != nil {
+ // Kill the entire process group.
+ syscall.Kill(-proc.Pid, syscall.SIGKILL)
+ }
+ case <-done:
+ }
+ }()
+
+ err := cmd.Wait()
+ close(done)
+
+ if execCtx.Err() == context.DeadlineExceeded {
+ return "", fmt.Errorf("command timed out after %s", req.timeout())
+ }
+ longOutput := output.Len() > maxBashOutputLength
+ var outstr string
+ if longOutput {
+ outstr = fmt.Sprintf("output too long: got %v, max is %v\ninitial bytes of output:\n%s",
+ humanizeBytes(output.Len()), humanizeBytes(maxBashOutputLength),
+ output.Bytes()[:1024],
+ )
+ } else {
+ outstr = output.String()
+ }
+
+ if err != nil {
+ return "", fmt.Errorf("command failed: %w\n%s", err, outstr)
+ }
+
+ if longOutput {
+ return "", fmt.Errorf("%s", outstr)
+ }
+
+ return output.String(), nil
+}
+
+func humanizeBytes(bytes int) string {
+ switch {
+ case bytes < 4*1024:
+ return fmt.Sprintf("%dB", bytes)
+ case bytes < 1024*1024:
+ kb := int(math.Round(float64(bytes) / 1024.0))
+ return fmt.Sprintf("%dkB", kb)
+ case bytes < 1024*1024*1024:
+ mb := int(math.Round(float64(bytes) / (1024.0 * 1024.0)))
+ return fmt.Sprintf("%dMB", mb)
+ }
+ return "more than 1GB"
+}
diff --git a/claudetool/bash_test.go b/claudetool/bash_test.go
new file mode 100644
index 0000000..8fe4b5c
--- /dev/null
+++ b/claudetool/bash_test.go
@@ -0,0 +1,186 @@
+package claudetool
+
+import (
+ "context"
+ "encoding/json"
+ "strings"
+ "testing"
+ "time"
+)
+
+func TestBashRun(t *testing.T) {
+ // Test basic functionality
+ t.Run("Basic Command", func(t *testing.T) {
+ input := json.RawMessage(`{"command":"echo 'Hello, world!'"}`)
+
+ result, err := BashRun(context.Background(), input)
+ if err != nil {
+ t.Fatalf("Unexpected error: %v", err)
+ }
+
+ expected := "Hello, world!\n"
+ if result != expected {
+ t.Errorf("Expected %q, got %q", expected, result)
+ }
+ })
+
+ // Test with arguments
+ t.Run("Command With Arguments", func(t *testing.T) {
+ input := json.RawMessage(`{"command":"echo -n foo && echo -n bar"}`)
+
+ result, err := BashRun(context.Background(), input)
+ if err != nil {
+ t.Fatalf("Unexpected error: %v", err)
+ }
+
+ expected := "foobar"
+ if result != expected {
+ t.Errorf("Expected %q, got %q", expected, result)
+ }
+ })
+
+ // Test with timeout parameter
+ t.Run("With Timeout", func(t *testing.T) {
+ inputObj := struct {
+ Command string `json:"command"`
+ Timeout string `json:"timeout"`
+ }{
+ Command: "sleep 0.1 && echo 'Completed'",
+ Timeout: "5s",
+ }
+ inputJSON, err := json.Marshal(inputObj)
+ if err != nil {
+ t.Fatalf("Failed to marshal input: %v", err)
+ }
+
+ result, err := BashRun(context.Background(), inputJSON)
+ if err != nil {
+ t.Fatalf("Unexpected error: %v", err)
+ }
+
+ expected := "Completed\n"
+ if result != expected {
+ t.Errorf("Expected %q, got %q", expected, result)
+ }
+ })
+
+ // Test command timeout
+ t.Run("Command Timeout", func(t *testing.T) {
+ inputObj := struct {
+ Command string `json:"command"`
+ Timeout string `json:"timeout"`
+ }{
+ Command: "sleep 0.5 && echo 'Should not see this'",
+ Timeout: "100ms",
+ }
+ inputJSON, err := json.Marshal(inputObj)
+ if err != nil {
+ t.Fatalf("Failed to marshal input: %v", err)
+ }
+
+ _, err = BashRun(context.Background(), inputJSON)
+ if err == nil {
+ t.Errorf("Expected timeout error, got none")
+ } else if !strings.Contains(err.Error(), "timed out") {
+ t.Errorf("Expected timeout error, got: %v", err)
+ }
+ })
+
+ // Test command that fails
+ t.Run("Failed Command", func(t *testing.T) {
+ input := json.RawMessage(`{"command":"exit 1"}`)
+
+ _, err := BashRun(context.Background(), input)
+ if err == nil {
+ t.Errorf("Expected error for failed command, got none")
+ }
+ })
+
+ // Test invalid input
+ t.Run("Invalid JSON Input", func(t *testing.T) {
+ input := json.RawMessage(`{"command":123}`) // Invalid JSON (command must be string)
+
+ _, err := BashRun(context.Background(), input)
+ if err == nil {
+ t.Errorf("Expected error for invalid input, got none")
+ }
+ })
+}
+
+func TestExecuteBash(t *testing.T) {
+ ctx := context.Background()
+
+ // Test successful command
+ t.Run("Successful Command", func(t *testing.T) {
+ req := bashInput{
+ Command: "echo 'Success'",
+ Timeout: "5s",
+ }
+
+ output, err := executeBash(ctx, req)
+ if err != nil {
+ t.Fatalf("Unexpected error: %v", err)
+ }
+
+ want := "Success\n"
+ if output != want {
+ t.Errorf("Expected %q, got %q", want, output)
+ }
+ })
+
+ // Test command with output to stderr
+ t.Run("Command with stderr", func(t *testing.T) {
+ req := bashInput{
+ Command: "echo 'Error message' >&2 && echo 'Success'",
+ Timeout: "5s",
+ }
+
+ output, err := executeBash(ctx, req)
+ if err != nil {
+ t.Fatalf("Unexpected error: %v", err)
+ }
+
+ want := "Error message\nSuccess\n"
+ if output != want {
+ t.Errorf("Expected %q, got %q", want, output)
+ }
+ })
+
+ // Test command that fails with stderr
+ t.Run("Failed Command with stderr", func(t *testing.T) {
+ req := bashInput{
+ Command: "echo 'Error message' >&2 && exit 1",
+ Timeout: "5s",
+ }
+
+ _, err := executeBash(ctx, req)
+ if err == nil {
+ t.Errorf("Expected error for failed command, got none")
+ } else if !strings.Contains(err.Error(), "Error message") {
+ t.Errorf("Expected stderr in error message, got: %v", err)
+ }
+ })
+
+ // Test timeout
+ t.Run("Command Timeout", func(t *testing.T) {
+ req := bashInput{
+ Command: "sleep 1 && echo 'Should not see this'",
+ Timeout: "100ms",
+ }
+
+ start := time.Now()
+ _, err := executeBash(ctx, req)
+ elapsed := time.Since(start)
+
+ // Command should time out after ~100ms, not wait for full 1 second
+ if elapsed >= 1*time.Second {
+ t.Errorf("Command did not respect timeout, took %v", elapsed)
+ }
+
+ if err == nil {
+ t.Errorf("Expected timeout error, got none")
+ } else if !strings.Contains(err.Error(), "timed out") {
+ t.Errorf("Expected timeout error, got: %v", err)
+ }
+ })
+}
diff --git a/claudetool/bashkit/bashkit.go b/claudetool/bashkit/bashkit.go
new file mode 100644
index 0000000..a56eef0
--- /dev/null
+++ b/claudetool/bashkit/bashkit.go
@@ -0,0 +1,97 @@
+package bashkit
+
+import (
+ "fmt"
+ "strings"
+
+ "mvdan.cc/sh/v3/syntax"
+)
+
+var checks = []func(*syntax.CallExpr) error{
+ noGitConfigUsernameEmailChanges,
+}
+
+// Check inspects bashScript and returns an error if it ought not be executed.
+// Check DOES NOT PROVIDE SECURITY against malicious actors.
+// It is intended to catch straightforward mistakes in which a model
+// does things despite having been instructed not to do them.
+func Check(bashScript string) error {
+ r := strings.NewReader(bashScript)
+ parser := syntax.NewParser()
+ file, err := parser.Parse(r, "")
+ if err != nil {
+ // Execution will fail, but we'll get a better error message from bash.
+ // Note that if this were security load bearing, this would be a terrible idea:
+ // You could smuggle stuff past Check by exploiting differences in what is considered syntactically valid.
+ // But it is not.
+ return nil
+ }
+
+ syntax.Walk(file, func(node syntax.Node) bool {
+ if err != nil {
+ return false
+ }
+ callExpr, ok := node.(*syntax.CallExpr)
+ if !ok {
+ return true
+ }
+ for _, check := range checks {
+ err = check(callExpr)
+ if err != nil {
+ return false
+ }
+ }
+ return true
+ })
+
+ return err
+}
+
+// noGitConfigUsernameEmailChanges checks for git config username/email changes.
+// It uses simple heuristics, and has both false positives and false negatives.
+func noGitConfigUsernameEmailChanges(cmd *syntax.CallExpr) error {
+ if hasGitConfigUsernameEmailChanges(cmd) {
+ return fmt.Errorf("permission denied: changing git config username/email is not allowed, use env vars instead")
+ }
+ return nil
+}
+
+func hasGitConfigUsernameEmailChanges(cmd *syntax.CallExpr) bool {
+ if len(cmd.Args) < 3 {
+ return false
+ }
+ if cmd.Args[0].Lit() != "git" {
+ return false
+ }
+
+ configIndex := -1
+ for i, arg := range cmd.Args {
+ if arg.Lit() == "config" {
+ configIndex = i
+ break
+ }
+ }
+
+ if configIndex < 0 || configIndex == len(cmd.Args)-1 {
+ return false
+ }
+
+ // check for user.name or user.email
+ keyIndex := -1
+ for i, arg := range cmd.Args {
+ if i < configIndex {
+ continue
+ }
+ if arg.Lit() == "user.name" || arg.Lit() == "user.email" {
+ keyIndex = i
+ break
+ }
+ }
+
+ if keyIndex < 0 || keyIndex == len(cmd.Args)-1 {
+ return false
+ }
+
+ // user.name/user.email is followed by a value
+ return true
+}
diff --git a/claudetool/bashkit/bashkit_test.go b/claudetool/bashkit/bashkit_test.go
new file mode 100644
index 0000000..8bcdd8f
--- /dev/null
+++ b/claudetool/bashkit/bashkit_test.go
@@ -0,0 +1,109 @@
+package bashkit
+
+import (
+ "strings"
+ "testing"
+)
+
+func TestCheck(t *testing.T) {
+ tests := []struct {
+ name string
+ script string
+ wantErr bool
+ errMatch string // string to match in error message, if wantErr is true
+ }{
+ {
+ name: "valid script",
+ script: "echo hello world",
+ wantErr: false,
+ errMatch: "",
+ },
+ {
+ name: "invalid syntax",
+ script: "echo 'unterminated string",
+ wantErr: false, // As per implementation, syntax errors are not flagged
+ errMatch: "",
+ },
+ {
+ name: "git config user.name",
+ script: "git config user.name 'John Doe'",
+ wantErr: true,
+ errMatch: "changing git config username/email is not allowed",
+ },
+ {
+ name: "git config user.email",
+ script: "git config user.email 'john@example.com'",
+ wantErr: true,
+ errMatch: "changing git config username/email is not allowed",
+ },
+ {
+ name: "git config with flag user.name",
+ script: "git config --global user.name 'John Doe'",
+ wantErr: true,
+ errMatch: "changing git config username/email is not allowed",
+ },
+ {
+ name: "git config with other setting",
+ script: "git config core.editor vim",
+ wantErr: false,
+ errMatch: "",
+ },
+ {
+ name: "git without config",
+ script: "git commit -m 'Add feature'",
+ wantErr: false,
+ errMatch: "",
+ },
+ {
+ name: "multiline script with proper escaped newlines",
+ script: "echo 'Setting up git...' && git config user.name 'John Doe' && echo 'Done!'",
+ wantErr: true,
+ errMatch: "changing git config username/email is not allowed",
+ },
+ {
+ name: "multiline script with backticks",
+ script: `echo 'Setting up git...'
+git config user.name 'John Doe'
+echo 'Done!'`,
+ wantErr: true,
+ errMatch: "changing git config username/email is not allowed",
+ },
+ {
+ name: "git config with variable",
+ script: "NAME='John Doe'\ngit config user.name $NAME",
+ wantErr: true,
+ errMatch: "changing git config username/email is not allowed",
+ },
+ {
+ name: "only git command",
+ script: "git",
+ wantErr: false,
+ errMatch: "",
+ },
+ {
+ name: "read git config",
+ script: "git config user.name",
+ wantErr: false,
+ errMatch: "",
+ },
+ {
+ name: "commented git config",
+ script: "# git config user.name 'John Doe'",
+ wantErr: false,
+ errMatch: "",
+ },
+ }
+
+ for _, tc := range tests {
+ t.Run(tc.name, func(t *testing.T) {
+ err := Check(tc.script)
+ if (err != nil) != tc.wantErr {
+ t.Errorf("Check() error = %v, wantErr %v", err, tc.wantErr)
+ return
+ }
+ if tc.wantErr && err != nil && !strings.Contains(err.Error(), tc.errMatch) {
+ t.Errorf("Check() error message = %v, want containing %v", err, tc.errMatch)
+ }
+ })
+ }
+}
diff --git a/claudetool/codereview.go b/claudetool/codereview.go
new file mode 100644
index 0000000..f305b4b
--- /dev/null
+++ b/claudetool/codereview.go
@@ -0,0 +1,347 @@
+package claudetool
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "log/slog"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+)
+
+// A CodeReviewer manages quality checks.
+type CodeReviewer struct {
+ repoRoot string
+ initialCommit string
+ initialStatus []fileStatus // git status of files at initial commit, absolute paths
+ reviewed []string // history of all commits which have been reviewed
+ initialWorktree string // git worktree at initial commit, absolute path
+}
+
+func NewCodeReviewer(ctx context.Context, repoRoot, initialCommit string) (*CodeReviewer, error) {
+ r := &CodeReviewer{
+ repoRoot: repoRoot,
+ initialCommit: initialCommit,
+ }
+ if r.repoRoot == "" {
+ return nil, fmt.Errorf("NewCodeReviewer: repoRoot must be non-empty")
+ }
+ if r.initialCommit == "" {
+ return nil, fmt.Errorf("NewCodeReviewer: initialCommit must be non-empty")
+ }
+ // Confirm that root is in fact the git repo root.
+ root, err := findRepoRoot(r.repoRoot)
+ if err != nil {
+ return nil, err
+ }
+ if root != r.repoRoot {
+ return nil, fmt.Errorf("NewCodeReviewer: repoRoot=%q but git repo root is %q", r.repoRoot, root)
+ }
+
+ // Get an initial list of dirty and untracked files.
+ // We'll filter them out later when deciding whether the worktree is clean.
+ status, err := r.repoStatus(ctx)
+ if err != nil {
+ return nil, err
+ }
+ r.initialStatus = status
+ return r, nil
+}
+
+// Autoformat formats all files changed in HEAD.
+// It returns a list of all files that were formatted.
+// It is best-effort only.
+func (r *CodeReviewer) Autoformat(ctx context.Context) []string {
+ // Refuse to format if HEAD == r.InitialCommit
+ head, err := r.CurrentCommit(ctx)
+ if err != nil {
+ slog.WarnContext(ctx, "CodeReviewer.Autoformat unable to get current commit", "err", err)
+ return nil
+ }
+ parent, err := r.ResolveCommit(ctx, "HEAD^1")
+ if err != nil {
+ slog.WarnContext(ctx, "CodeReviewer.Autoformat unable to get parent commit", "err", err)
+ return nil
+ }
+ if head == r.initialCommit {
+ slog.WarnContext(ctx, "CodeReviewer.Autoformat refusing to format because HEAD == InitialCommit")
+ return nil
+ }
+ // Retrieve a list of all files changed
+ // TODO: instead of one git diff --name-only and then N --name-status, do one --name-status.
+ changedFiles, err := r.changedFiles(ctx, r.initialCommit, head)
+ if err != nil {
+ slog.WarnContext(ctx, "CodeReviewer.Autoformat unable to get changed files", "err", err)
+ return nil
+ }
+
+ // General strategy: For all changed files,
+ // run the strictest formatter that passes on the original version.
+ // TODO: add non-Go formatters?
+ // TODO: at a minimum, for common file types, ensure trailing newlines and maybe trim trailing whitespace per line?
+ var fmtFiles []string
+ for _, file := range changedFiles {
+ if !strings.HasSuffix(file, ".go") {
+ continue
+ }
+ fileStatus, err := r.gitFileStatus(ctx, file)
+ if err != nil {
+ slog.WarnContext(ctx, "CodeReviewer.Autoformat unable to get file status", "file", file, "err", err)
+ continue
+ }
+ if fileStatus == "D" { // deleted, nothing to format
+ continue
+ }
+ code, err := r.getFileContentAtCommit(ctx, file, head)
+ if err != nil {
+ slog.WarnContext(ctx, "CodeReviewer.Autoformat unable to get file content at head", "file", file, "err", err)
+ continue
+ }
+ if isAutogeneratedGoFile(code) { // leave autogenerated files alone
+ continue
+ }
+ onDisk, err := os.ReadFile(file)
+ if err != nil {
+ slog.WarnContext(ctx, "CodeReviewer.Autoformat unable to read file", "file", file, "err", err)
+ continue
+ }
+ if !bytes.Equal(code, onDisk) { // file has been modified since HEAD
+ slog.WarnContext(ctx, "CodeReviewer.Autoformat file modified since HEAD", "file", file, "err", err)
+ continue
+ }
+ var formatterToUse string
+ if fileStatus == "A" {
+ formatterToUse = "gofumpt" // newly added, so we can format how we please: use gofumpt
+ } else {
+ prev, err := r.getFileContentAtCommit(ctx, file, parent)
+ if err != nil {
+ slog.WarnContext(ctx, "CodeReviewer.Autoformat unable to get file content at parent", "file", file, "err", err)
+ continue
+ }
+ formatterToUse = r.pickFormatter(ctx, prev) // pick the strictest formatter that passes on the original version
+ }
+
+ // Apply the chosen formatter to the current file
+ newCode := r.runFormatter(ctx, formatterToUse, code)
+ if newCode == nil { // no changes made
+ continue
+ }
+ // write to disk
+ if err := os.WriteFile(file, newCode, 0o600); err != nil {
+ slog.WarnContext(ctx, "CodeReviewer.Autoformat unable to write formatted file", "file", file, "err", err)
+ continue
+ }
+ fmtFiles = append(fmtFiles, file)
+ }
+ return fmtFiles
+}
+
+// RequireNormalGitState checks that the git repo state is pretty normal.
+func (r *CodeReviewer) RequireNormalGitState(_ context.Context) error {
+ rebaseDirs := []string{"rebase-merge", "rebase-apply"}
+ for _, dir := range rebaseDirs {
+ _, err := os.Stat(filepath.Join(r.repoRoot, dir))
+ if err == nil {
+ return fmt.Errorf("git repo is not clean: rebase in progress")
+ }
+ }
+ filesReason := map[string]string{
+ "MERGE_HEAD": "merge is in progress",
+ "CHERRY_PICK_HEAD": "cherry-pick is in progress",
+ "REVERT_HEAD": "revert is in progress",
+ "BISECT_LOG": "bisect is in progress",
+ }
+ for file, reason := range filesReason {
+ _, err := os.Stat(filepath.Join(r.repoRoot, file))
+ if err == nil {
+ return fmt.Errorf("git repo is not clean: %s", reason)
+ }
+ }
+ return nil
+}
+
+func (r *CodeReviewer) RequireNoUncommittedChanges(ctx context.Context) error {
+ // Check that there are no uncommitted changes, whether staged or not.
+ // (Changes in r.initialStatus are OK, no other changes are.)
+ statuses, err := r.repoStatus(ctx)
+ if err != nil {
+ return fmt.Errorf("unable to get repo status: %w", err)
+ }
+ uncommitted := new(strings.Builder)
+ for _, status := range statuses {
+ if !r.initialStatusesContainFile(status.Path) {
+ fmt.Fprintf(uncommitted, "%s %s\n", status.Path, status.RawStatus)
+ }
+ }
+ if uncommitted.Len() > 0 {
+ return fmt.Errorf("uncommitted changes in repo, please commit or revert:\n%s", uncommitted.String())
+ }
+ return nil
+}
+
+func (r *CodeReviewer) initialStatusesContainFile(file string) bool {
+ for _, s := range r.initialStatus {
+ if s.Path == file {
+ return true
+ }
+ }
+ return false
+}
+
+type fileStatus struct {
+ Path string
+ RawStatus string // always 2 characters
+}
+
+func (r *CodeReviewer) repoStatus(ctx context.Context) ([]fileStatus, error) {
+ // Run git status --porcelain, split into lines
+ cmd := exec.CommandContext(ctx, "git", "status", "--porcelain")
+ cmd.Dir = r.repoRoot
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ return nil, fmt.Errorf("failed to run git status: %w\n%s", err, out)
+ }
+ var statuses []fileStatus
+ for line := range strings.Lines(string(out)) {
+ if len(line) == 0 {
+ continue
+ }
+ if len(line) < 3 {
+ return nil, fmt.Errorf("invalid status line: %s", line)
+ }
+ path := line[3:]
+ status := line[:2]
+ absPath := r.absPath(path)
+ statuses = append(statuses, fileStatus{Path: absPath, RawStatus: status})
+ }
+ return statuses, nil
+}
+
+// CurrentCommit retrieves the current git commit hash
+func (r *CodeReviewer) CurrentCommit(ctx context.Context) (string, error) {
+ return r.ResolveCommit(ctx, "HEAD")
+}
+
+func (r *CodeReviewer) ResolveCommit(ctx context.Context, ref string) (string, error) {
+ cmd := exec.CommandContext(ctx, "git", "rev-parse", ref)
+ cmd.Dir = r.repoRoot
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ return "", fmt.Errorf("failed to get current commit hash: %w\n%s", err, out)
+ }
+ return strings.TrimSpace(string(out)), nil
+}
+
+func (r *CodeReviewer) absPath(relPath string) string {
+ return filepath.Clean(filepath.Join(r.repoRoot, relPath))
+}
+
+// gitFileStatus returns the status of a file (A for added, M for modified, D for deleted, etc.)
+func (r *CodeReviewer) gitFileStatus(ctx context.Context, file string) (string, error) {
+ cmd := exec.CommandContext(ctx, "git", "diff", "--name-status", r.initialCommit, "HEAD", "--", file)
+ cmd.Dir = r.repoRoot
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ return "", fmt.Errorf("failed to get file status: %w\n%s", err, out)
+ }
+ status := strings.TrimSpace(string(out))
+ if status == "" {
+ return "", fmt.Errorf("no status found for file: %s", file)
+ }
+ return string(status[0]), nil
+}
+
+// getFileContentAtCommit retrieves file content at a specific commit
+func (r *CodeReviewer) getFileContentAtCommit(ctx context.Context, file, commit string) ([]byte, error) {
+ relFile, err := filepath.Rel(r.repoRoot, file)
+ if err != nil {
+ slog.WarnContext(ctx, "CodeReviewer.getFileContentAtCommit: failed to get relative path", "repo_root", r.repoRoot, "file", file, "err", err)
+ file = relFile
+ }
+ cmd := exec.CommandContext(ctx, "git", "show", fmt.Sprintf("%s:%s", commit, relFile))
+ cmd.Dir = r.repoRoot
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ return nil, fmt.Errorf("failed to get file content at commit %s: %w\n%s", commit, err, out)
+ }
+ return out, nil
+}
+
+// runFormatter runs the specified formatter on a file and returns the results.
+// A nil result indicates that the file is unchanged, or that an error occurred.
+func (r *CodeReviewer) runFormatter(ctx context.Context, formatter string, content []byte) []byte {
+ if formatter == "" {
+ return nil // no formatter
+ }
+ // Run the formatter and capture the output
+ cmd := exec.CommandContext(ctx, formatter)
+ cmd.Dir = r.repoRoot
+ cmd.Stdin = bytes.NewReader(content)
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ // probably a parse error, err on the side of safety
+ return nil
+ }
+ if bytes.Equal(content, out) {
+ return nil // no changes
+ }
+ return out
+}
+
+// formatterWouldChange reports whether a formatter would make changes to the content.
+// If the contents are invalid, it returns false.
+// It works by piping the content to the formatter with the -l flag.
+func (r *CodeReviewer) formatterWouldChange(ctx context.Context, formatter string, content []byte) bool {
+ cmd := exec.CommandContext(ctx, formatter, "-l")
+ cmd.Dir = r.repoRoot
+ cmd.Stdin = bytes.NewReader(content)
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ // probably a parse error, err on the side of safety
+ return false
+ }
+
+ // If the output is empty, the file passes the formatter
+ // If the output contains "<standard input>", the file would be changed
+ return len(bytes.TrimSpace(out)) > 0
+}
+
+// pickFormatter picks a formatter to use for code.
+// If something goes wrong, it recommends no formatter (empty string).
+func (r *CodeReviewer) pickFormatter(ctx context.Context, code []byte) string {
+ // Test each formatter from strictest to least strict.
+ // Keep the first one that doesn't make changes.
+ formatters := []string{"gofumpt", "goimports", "gofmt"}
+ for _, formatter := range formatters {
+ if r.formatterWouldChange(ctx, formatter, code) {
+ continue
+ }
+ return formatter
+ }
+ return "" // no safe formatter found
+}
+
+// changedFiles retrieves a list of all files changed between two commits
+func (r *CodeReviewer) changedFiles(ctx context.Context, fromCommit, toCommit string) ([]string, error) {
+ cmd := exec.CommandContext(ctx, "git", "diff", "--name-only", fromCommit, toCommit)
+ cmd.Dir = r.repoRoot
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ return nil, fmt.Errorf("failed to get changed files: %w\n%s", err, out)
+ }
+ var files []string
+ for line := range strings.Lines(string(out)) {
+ line = strings.TrimSpace(line)
+ if len(line) == 0 {
+ continue
+ }
+ path := r.absPath(line)
+ if r.initialStatusesContainFile(path) {
+ continue
+ }
+ files = append(files, path)
+ }
+ return files, nil
+}
diff --git a/claudetool/differential.go b/claudetool/differential.go
new file mode 100644
index 0000000..14dce04
--- /dev/null
+++ b/claudetool/differential.go
@@ -0,0 +1,1125 @@
+package claudetool
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "log/slog"
+ "maps"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "slices"
+ "strings"
+ "time"
+
+ "golang.org/x/tools/go/packages"
+ "sketch.dev/ant"
+)
+
+// This file does differential quality analysis of a commit relative to a base commit.
+
+// Tool returns a tool spec for a CodeReview tool backed by r.
+func (r *CodeReviewer) Tool() *ant.Tool {
+ spec := &ant.Tool{
+ Name: "codereview",
+ Description: `Run an automated code review.`,
+ // If you modify this, update the termui template for prettier rendering.
+ InputSchema: ant.MustSchema(`{"type": "object"}`),
+ Run: r.Run,
+ }
+ return spec
+}
+
+func (r *CodeReviewer) Run(ctx context.Context, m json.RawMessage) (string, error) {
+ if err := r.RequireNormalGitState(ctx); err != nil {
+ slog.DebugContext(ctx, "CodeReviewer.Run: failed to check for normal git state", "err", err)
+ return "", err
+ }
+ if err := r.RequireNoUncommittedChanges(ctx); err != nil {
+ slog.DebugContext(ctx, "CodeReviewer.Run: failed to check for uncommitted changes", "err", err)
+ return "", err
+ }
+
+ // Check that the current commit is not the initial commit
+ currentCommit, err := r.CurrentCommit(ctx)
+ if err != nil {
+ slog.DebugContext(ctx, "CodeReviewer.Run: failed to get current commit", "err", err)
+ return "", err
+ }
+ if r.IsInitialCommit(currentCommit) {
+ slog.DebugContext(ctx, "CodeReviewer.Run: current commit is initial commit, nothing to review")
+ return "", fmt.Errorf("no new commits have been added, nothing to review")
+ }
+
+ // No matter what failures happen from here out, we will declare this to have been reviewed.
+ // This should help avoid the model getting blocked by a broken code review tool.
+ r.reviewed = append(r.reviewed, currentCommit)
+
+ changedFiles, err := r.changedFiles(ctx, r.initialCommit, currentCommit)
+ if err != nil {
+ slog.DebugContext(ctx, "CodeReviewer.Run: failed to get changed files", "err", err)
+ return "", err
+ }
+
+ // Prepare to analyze before/after for the impacted files.
+ // We use the current commit to determine what packages exist and are impacted.
+ // The packages in the initial commit may be different.
+ // Good enough for now.
+ // TODO: do better
+ directPkgs, allPkgs, err := r.packagesForFiles(ctx, changedFiles)
+ if err != nil {
+ // TODO: log and skip to stuff that doesn't require packages
+ slog.DebugContext(ctx, "CodeReviewer.Run: failed to get packages for files", "err", err)
+ return "", err
+ }
+ allPkgList := slices.Collect(maps.Keys(allPkgs))
+ directPkgList := slices.Collect(maps.Keys(directPkgs))
+
+ var msgs []string
+
+ testMsg, err := r.checkTests(ctx, allPkgList)
+ if err != nil {
+ slog.DebugContext(ctx, "CodeReviewer.Run: failed to check tests", "err", err)
+ return "", err
+ }
+ if testMsg != "" {
+ msgs = append(msgs, testMsg)
+ }
+
+ vetMsg, err := r.checkVet(ctx, directPkgList)
+ if err != nil {
+ slog.DebugContext(ctx, "CodeReviewer.Run: failed to check vet", "err", err)
+ return "", err
+ }
+ if vetMsg != "" {
+ msgs = append(msgs, vetMsg)
+ }
+
+ goplsMsg, err := r.checkGopls(ctx, changedFiles)
+ if err != nil {
+ slog.DebugContext(ctx, "CodeReviewer.Run: failed to check gopls", "err", err)
+ return "", err
+ }
+ if goplsMsg != "" {
+ msgs = append(msgs, goplsMsg)
+ }
+
+ if len(msgs) == 0 {
+ slog.DebugContext(ctx, "CodeReviewer.Run: no issues found")
+ return "OK", nil
+ }
+ slog.DebugContext(ctx, "CodeReviewer.Run: found issues", "issues", msgs)
+ return strings.Join(msgs, "\n\n"), nil
+}
+
+func (r *CodeReviewer) initializeInitialCommitWorktree(ctx context.Context) error {
+ if r.initialWorktree != "" {
+ return nil
+ }
+ tmpDir, err := os.MkdirTemp("", "sketch-codereview-worktree")
+ if err != nil {
+ return err
+ }
+ worktreeCmd := exec.CommandContext(ctx, "git", "worktree", "add", "--detach", tmpDir, r.initialCommit)
+ worktreeCmd.Dir = r.repoRoot
+ out, err := worktreeCmd.CombinedOutput()
+ if err != nil {
+ return fmt.Errorf("unable to create worktree for initial commit: %w\n%s", err, out)
+ }
+ r.initialWorktree = tmpDir
+ return nil
+}
+
+func (r *CodeReviewer) checkTests(ctx context.Context, pkgList []string) (string, error) {
+ goTestArgs := []string{"test", "-json", "-v"}
+ goTestArgs = append(goTestArgs, pkgList...)
+
+ afterTestCmd := exec.CommandContext(ctx, "go", goTestArgs...)
+ afterTestCmd.Dir = r.repoRoot
+ afterTestOut, afterTestErr := afterTestCmd.Output()
+ if afterTestErr == nil {
+ return "", nil // all tests pass, we're good!
+ }
+
+ err := r.initializeInitialCommitWorktree(ctx)
+ if err != nil {
+ return "", err
+ }
+
+ beforeTestCmd := exec.CommandContext(ctx, "go", goTestArgs...)
+ beforeTestCmd.Dir = r.initialWorktree
+ beforeTestOut, _ := beforeTestCmd.Output() // ignore error, interesting info is in the output
+
+ // Parse the jsonl test results
+ beforeResults, beforeParseErr := parseTestResults(beforeTestOut)
+ if beforeParseErr != nil {
+ return "", fmt.Errorf("unable to parse test results for initial commit: %w\n%s", beforeParseErr, beforeTestOut)
+ }
+ afterResults, afterParseErr := parseTestResults(afterTestOut)
+ if afterParseErr != nil {
+ return "", fmt.Errorf("unable to parse test results for current commit: %w\n%s", afterParseErr, afterTestOut)
+ }
+
+ testRegressions, err := r.compareTestResults(beforeResults, afterResults)
+ if err != nil {
+ return "", fmt.Errorf("failed to compare test results: %w", err)
+ }
+ // TODO: better output formatting?
+ res := r.formatTestRegressions(testRegressions)
+ return res, nil
+}
+
+// VetIssue represents a single issue found by go vet
+type VetIssue struct {
+ Position string `json:"posn"`
+ Message string `json:"message"`
+ // Ignoring suggested_fixes for now as we don't need them for comparison
+}
+
+// VetResult represents the JSON output of go vet -json for a single package
+type VetResult map[string][]VetIssue // category -> issues
+
+// VetResults represents the full JSON output of go vet -json
+type VetResults map[string]VetResult // package path -> result
+
+// checkVet runs go vet on the provided packages in both the current and initial state,
+// compares the results, and reports any new vet issues introduced in the current state.
+func (r *CodeReviewer) checkVet(ctx context.Context, pkgList []string) (string, error) {
+ if len(pkgList) == 0 {
+ return "", nil // no packages to check
+ }
+
+ // Run vet on the current state with JSON output
+ goVetArgs := []string{"vet", "-json"}
+ goVetArgs = append(goVetArgs, pkgList...)
+
+ afterVetCmd := exec.CommandContext(ctx, "go", goVetArgs...)
+ afterVetCmd.Dir = r.repoRoot
+ afterVetOut, afterVetErr := afterVetCmd.CombinedOutput() // ignore error, we'll parse the output regar
+ if afterVetErr != nil {
+ slog.WarnContext(ctx, "CodeReviewer.checkVet: (after) go vet failed", "err", afterVetErr, "output", string(afterVetOut))
+ return "", nil // nothing more we can do here
+ }
+
+ // Parse the JSON output (even if vet returned an error, as it does when issues are found)
+ afterVetResults, err := parseVetJSON(afterVetOut)
+ if err != nil {
+ return "", fmt.Errorf("failed to parse vet output for current state: %w", err)
+ }
+
+ // If no issues were found, we're done
+ if len(afterVetResults) == 0 || !vetResultsHaveIssues(afterVetResults) {
+ return "", nil
+ }
+
+ // Vet detected issues in the current state, check if they existed in the initial state
+ err = r.initializeInitialCommitWorktree(ctx)
+ if err != nil {
+ return "", err
+ }
+
+ beforeVetCmd := exec.CommandContext(ctx, "go", goVetArgs...)
+ beforeVetCmd.Dir = r.initialWorktree
+ beforeVetOut, _ := beforeVetCmd.CombinedOutput() // ignore error, we'll parse the output anyway
+
+ // Parse the JSON output for the initial state
+ beforeVetResults, err := parseVetJSON(beforeVetOut)
+ if err != nil {
+ return "", fmt.Errorf("failed to parse vet output for initial state: %w", err)
+ }
+
+ // Find new issues that weren't present in the initial state
+ vetRegressions := findVetRegressions(beforeVetResults, afterVetResults)
+ if !vetResultsHaveIssues(vetRegressions) {
+ return "", nil // no new issues
+ }
+
+ // Format the results
+ return formatVetRegressions(vetRegressions), nil
+}
+
+// parseVetJSON parses the JSON output from go vet -json
+func parseVetJSON(output []byte) (VetResults, error) {
+ // The output contains multiple JSON objects, one per package
+ // We need to parse them separately
+ results := make(VetResults)
+
+ // Process the output by collecting JSON chunks between # comment lines
+ lines := strings.Split(string(output), "\n")
+ currentChunk := strings.Builder{}
+
+ // Helper function to process accumulated JSON chunks
+ processChunk := func() {
+ chunk := strings.TrimSpace(currentChunk.String())
+ if chunk == "" || !strings.HasPrefix(chunk, "{") {
+ return // Skip empty chunks or non-JSON chunks
+ }
+
+ // Try to parse the chunk as JSON
+ var result VetResults
+ if err := json.Unmarshal([]byte(chunk), &result); err != nil {
+ return // Skip invalid JSON
+ }
+
+ // Merge with our results
+ for pkg, issues := range result {
+ results[pkg] = issues
+ }
+
+ // Reset the chunk builder
+ currentChunk.Reset()
+ }
+
+ // Process lines
+ for _, line := range lines {
+ // If we hit a comment line, process the previous chunk and start a new one
+ if strings.HasPrefix(strings.TrimSpace(line), "#") {
+ processChunk()
+ continue
+ }
+
+ // Add the line to the current chunk
+ currentChunk.WriteString(line)
+ currentChunk.WriteString("\n")
+ }
+
+ // Process the final chunk
+ processChunk()
+
+ return results, nil
+}
+
+// vetResultsHaveIssues checks if there are any actual issues in the vet results
+func vetResultsHaveIssues(results VetResults) bool {
+ for _, pkgResult := range results {
+ for _, issues := range pkgResult {
+ if len(issues) > 0 {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// findVetRegressions identifies vet issues that are new in the after state
+func findVetRegressions(before, after VetResults) VetResults {
+ regressions := make(VetResults)
+
+ // Go through all packages in the after state
+ for pkgPath, afterPkgResults := range after {
+ beforePkgResults, pkgExistedBefore := before[pkgPath]
+
+ // Initialize package in regressions if it has issues
+ if !pkgExistedBefore {
+ // If the package didn't exist before, all issues are new
+ regressions[pkgPath] = afterPkgResults
+ continue
+ }
+
+ // Compare issues by category
+ for category, afterIssues := range afterPkgResults {
+ beforeIssues, categoryExistedBefore := beforePkgResults[category]
+
+ if !categoryExistedBefore {
+ // If this category didn't exist before, all issues are new
+ if regressions[pkgPath] == nil {
+ regressions[pkgPath] = make(VetResult)
+ }
+ regressions[pkgPath][category] = afterIssues
+ continue
+ }
+
+ // Compare individual issues
+ var newIssues []VetIssue
+ for _, afterIssue := range afterIssues {
+ if !issueExistsIn(afterIssue, beforeIssues) {
+ newIssues = append(newIssues, afterIssue)
+ }
+ }
+
+ // Add new issues to regressions
+ if len(newIssues) > 0 {
+ if regressions[pkgPath] == nil {
+ regressions[pkgPath] = make(VetResult)
+ }
+ regressions[pkgPath][category] = newIssues
+ }
+ }
+ }
+
+ return regressions
+}
+
+// issueExistsIn checks if an issue already exists in a list of issues
+// using a looser comparison that's resilient to position changes
+func issueExistsIn(issue VetIssue, issues []VetIssue) bool {
+ issueFile := extractFilePath(issue.Position)
+
+ for _, existing := range issues {
+ // Main comparison is by message content, which is likely stable
+ if issue.Message == existing.Message {
+ // If messages match exactly, consider it the same issue even if position changed
+ return true
+ }
+
+ // As a secondary check, if the issue is in the same file and has similar message,
+ // it's likely the same issue that might have been slightly reworded or relocated
+ existingFile := extractFilePath(existing.Position)
+ if issueFile == existingFile && messagesSimilar(issue.Message, existing.Message) {
+ return true
+ }
+ }
+ return false
+}
+
+// extractFilePath gets just the file path from a position string like "/path/to/file.go:10:15"
+func extractFilePath(position string) string {
+ parts := strings.Split(position, ":")
+ if len(parts) >= 1 {
+ return parts[0]
+ }
+ return position // fallback to the full position if we can't parse it
+}
+
+// messagesSimilar checks if two messages are similar enough to be considered the same issue
+// This is a simple implementation that could be enhanced with more sophisticated text comparison
+func messagesSimilar(msg1, msg2 string) bool {
+ // For now, simple similarity check: if one is a substring of the other
+ return strings.Contains(msg1, msg2) || strings.Contains(msg2, msg1)
+}
+
+// formatVetRegressions generates a human-readable summary of vet regressions
+func formatVetRegressions(regressions VetResults) string {
+ if !vetResultsHaveIssues(regressions) {
+ return ""
+ }
+
+ var sb strings.Builder
+ sb.WriteString("Go vet issues detected:\n\n")
+
+ // Get sorted list of packages for deterministic output
+ pkgPaths := make([]string, 0, len(regressions))
+ for pkgPath := range regressions {
+ pkgPaths = append(pkgPaths, pkgPath)
+ }
+ slices.Sort(pkgPaths)
+
+ issueCount := 1
+ for _, pkgPath := range pkgPaths {
+ pkgResult := regressions[pkgPath]
+
+ // Get sorted list of categories
+ categories := make([]string, 0, len(pkgResult))
+ for category := range pkgResult {
+ categories = append(categories, category)
+ }
+ slices.Sort(categories)
+
+ for _, category := range categories {
+ issues := pkgResult[category]
+
+ // Skip empty issue lists (shouldn't happen, but just in case)
+ if len(issues) == 0 {
+ continue
+ }
+
+ // Sort issues by position for deterministic output
+ slices.SortFunc(issues, func(a, b VetIssue) int {
+ return strings.Compare(a.Position, b.Position)
+ })
+
+ // Format each issue
+ for _, issue := range issues {
+ sb.WriteString(fmt.Sprintf("%d. [%s] %s: %s\n",
+ issueCount,
+ category,
+ issue.Position,
+ issue.Message))
+ issueCount++
+ }
+ }
+ }
+
+ sb.WriteString("\nPlease fix these issues before proceeding.")
+ return sb.String()
+}
+
+// GoplsIssue represents a single issue reported by gopls check
+type GoplsIssue struct {
+ Position string // File position in format "file:line:col-range"
+ Message string // Description of the issue
+}
+
+// checkGopls runs gopls check on the provided files in both the current and initial state,
+// compares the results, and reports any new issues introduced in the current state.
+func (r *CodeReviewer) checkGopls(ctx context.Context, changedFiles []string) (string, error) {
+ if len(changedFiles) == 0 {
+ return "", nil // no files to check
+ }
+
+ // Filter out non-Go files as gopls only works on Go files
+ // and verify they still exist (not deleted)
+ var goFiles []string
+ for _, file := range changedFiles {
+ if !strings.HasSuffix(file, ".go") {
+ continue // not a Go file
+ }
+
+ // Check if the file still exists (not deleted)
+ if _, err := os.Stat(file); os.IsNotExist(err) {
+ continue // file doesn't exist anymore (deleted)
+ }
+
+ goFiles = append(goFiles, file)
+ }
+
+ if len(goFiles) == 0 {
+ return "", nil // no Go files to check
+ }
+
+ // Run gopls check on the current state
+ goplsArgs := append([]string{"check"}, goFiles...)
+
+ afterGoplsCmd := exec.CommandContext(ctx, "gopls", goplsArgs...)
+ afterGoplsCmd.Dir = r.repoRoot
+ afterGoplsOut, err := afterGoplsCmd.CombinedOutput() // gopls returns non-zero if it finds issues
+ if err != nil {
+ // Check if the output looks like real gopls issues or if it's just error output
+ if !looksLikeGoplsIssues(afterGoplsOut) {
+ slog.WarnContext(ctx, "CodeReviewer.checkGopls: gopls check failed to run properly", "err", err, "output", string(afterGoplsOut))
+ return "", nil // Skip rather than failing the entire code review
+ }
+ // Otherwise, proceed with parsing - it's likely just the non-zero exit code due to found issues
+ }
+
+ // Parse the output
+ afterIssues := parseGoplsOutput(afterGoplsOut)
+
+ // If no issues were found, we're done
+ if len(afterIssues) == 0 {
+ return "", nil
+ }
+
+ // Gopls detected issues in the current state, check if they existed in the initial state
+ initErr := r.initializeInitialCommitWorktree(ctx)
+ if initErr != nil {
+ return "", err
+ }
+
+ // For each file that exists in the initial commit, run gopls check
+ var initialFilesToCheck []string
+ for _, file := range goFiles {
+ // Get relative path for git operations
+ relFile, err := filepath.Rel(r.repoRoot, file)
+ if err != nil {
+ slog.WarnContext(ctx, "CodeReviewer.checkGopls: failed to get relative path", "repo_root", r.repoRoot, "file", file, "err", err)
+ continue
+ }
+
+ // Check if the file exists in the initial commit
+ checkCmd := exec.CommandContext(ctx, "git", "cat-file", "-e", fmt.Sprintf("%s:%s", r.initialCommit, relFile))
+ checkCmd.Dir = r.repoRoot
+ if err := checkCmd.Run(); err == nil {
+ // File exists in initial commit
+ initialFilePath := filepath.Join(r.initialWorktree, relFile)
+ initialFilesToCheck = append(initialFilesToCheck, initialFilePath)
+ }
+ }
+
+ // Run gopls check on the files that existed in the initial commit
+ beforeIssues := []GoplsIssue{}
+ if len(initialFilesToCheck) > 0 {
+ beforeGoplsArgs := append([]string{"check"}, initialFilesToCheck...)
+ beforeGoplsCmd := exec.CommandContext(ctx, "gopls", beforeGoplsArgs...)
+ beforeGoplsCmd.Dir = r.initialWorktree
+ var beforeGoplsOut []byte
+ var beforeCmdErr error
+ beforeGoplsOut, beforeCmdErr = beforeGoplsCmd.CombinedOutput()
+ if beforeCmdErr != nil && !looksLikeGoplsIssues(beforeGoplsOut) {
+ // If gopls fails to run properly on the initial commit, log a warning and continue
+ // with empty before issues - this will be conservative and report more issues
+ slog.WarnContext(ctx, "CodeReviewer.checkGopls: gopls check failed on initial commit",
+ "err", err, "output", string(beforeGoplsOut))
+ // Continue with empty beforeIssues
+ } else {
+ beforeIssues = parseGoplsOutput(beforeGoplsOut)
+ }
+ }
+
+ // Find new issues that weren't present in the initial state
+ goplsRegressions := findGoplsRegressions(beforeIssues, afterIssues)
+ if len(goplsRegressions) == 0 {
+ return "", nil // no new issues
+ }
+
+ // Format the results
+ return formatGoplsRegressions(goplsRegressions), nil
+}
+
+// parseGoplsOutput parses the text output from gopls check
+// Each line has the format: '/path/to/file.go:448:22-26: unused parameter: path'
+func parseGoplsOutput(output []byte) []GoplsIssue {
+ var issues []GoplsIssue
+ lines := strings.Split(string(output), "\n")
+
+ for _, line := range lines {
+ line = strings.TrimSpace(line)
+ if line == "" {
+ continue
+ }
+
+ // Skip lines that look like error messages rather than gopls issues
+ if strings.HasPrefix(line, "Error:") ||
+ strings.HasPrefix(line, "Failed:") ||
+ strings.HasPrefix(line, "Warning:") ||
+ strings.HasPrefix(line, "gopls:") {
+ continue
+ }
+
+ // Find the first colon that separates the file path from the line number
+ firstColonIdx := strings.Index(line, ":")
+ if firstColonIdx < 0 {
+ continue // Invalid format
+ }
+
+ // Verify the part before the first colon looks like a file path
+ potentialPath := line[:firstColonIdx]
+ if !strings.HasSuffix(potentialPath, ".go") {
+ continue // Not a Go file path
+ }
+
+ // Find the position of the first message separator ': '
+ // This separates the position info from the message
+ messageStart := strings.Index(line, ": ")
+ if messageStart < 0 || messageStart <= firstColonIdx {
+ continue // Invalid format
+ }
+
+ // Extract position and message
+ position := line[:messageStart]
+ message := line[messageStart+2:] // Skip the ': ' separator
+
+ // Verify position has the expected format (at least 2 colons for line:col)
+ colonCount := strings.Count(position, ":")
+ if colonCount < 2 {
+ continue // Not enough position information
+ }
+
+ issues = append(issues, GoplsIssue{
+ Position: position,
+ Message: message,
+ })
+ }
+
+ return issues
+}
+
+// looksLikeGoplsIssues checks if the output appears to be actual gopls issues
+// rather than error messages about gopls itself failing
+func looksLikeGoplsIssues(output []byte) bool {
+ // If output is empty, it's not valid issues
+ if len(output) == 0 {
+ return false
+ }
+
+ // Check if output has at least one line that looks like a gopls issue
+ // A gopls issue looks like: '/path/to/file.go:123:45-67: message'
+ lines := strings.Split(string(output), "\n")
+ for _, line := range lines {
+ line = strings.TrimSpace(line)
+ if line == "" {
+ continue
+ }
+
+ // A gopls issue has at least two colons (file path, line number, column)
+ // and contains a colon followed by a space (separating position from message)
+ colonCount := strings.Count(line, ":")
+ hasSeparator := strings.Contains(line, ": ")
+
+ if colonCount >= 2 && hasSeparator {
+ // Check if it starts with a likely file path (ending in .go)
+ parts := strings.SplitN(line, ":", 2)
+ if strings.HasSuffix(parts[0], ".go") {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// normalizeGoplsPosition extracts just the file path from a position string
+func normalizeGoplsPosition(position string) string {
+ // Extract just the file path by taking everything before the first colon
+ parts := strings.Split(position, ":")
+ if len(parts) < 1 {
+ return position
+ }
+ return parts[0]
+}
+
+// findGoplsRegressions identifies gopls issues that are new in the after state
+func findGoplsRegressions(before, after []GoplsIssue) []GoplsIssue {
+ var regressions []GoplsIssue
+
+ // Build map of before issues for easier lookup
+ beforeIssueMap := make(map[string]map[string]bool) // file -> message -> exists
+ for _, issue := range before {
+ file := normalizeGoplsPosition(issue.Position)
+ if _, exists := beforeIssueMap[file]; !exists {
+ beforeIssueMap[file] = make(map[string]bool)
+ }
+ // Store both the exact message and the general issue type for fuzzy matching
+ beforeIssueMap[file][issue.Message] = true
+
+ // Extract the general issue type (everything before the first ':' in the message)
+ generalIssue := issue.Message
+ if colonIdx := strings.Index(issue.Message, ":"); colonIdx > 0 {
+ generalIssue = issue.Message[:colonIdx]
+ }
+ beforeIssueMap[file][generalIssue] = true
+ }
+
+ // Check each after issue to see if it's new
+ for _, afterIssue := range after {
+ file := normalizeGoplsPosition(afterIssue.Position)
+ isNew := true
+
+ if fileIssues, fileExists := beforeIssueMap[file]; fileExists {
+ // Check for exact message match
+ if fileIssues[afterIssue.Message] {
+ isNew = false
+ } else {
+ // Check for general issue type match
+ generalIssue := afterIssue.Message
+ if colonIdx := strings.Index(afterIssue.Message, ":"); colonIdx > 0 {
+ generalIssue = afterIssue.Message[:colonIdx]
+ }
+ if fileIssues[generalIssue] {
+ isNew = false
+ }
+ }
+ }
+
+ if isNew {
+ regressions = append(regressions, afterIssue)
+ }
+ }
+
+ // Sort regressions for deterministic output
+ slices.SortFunc(regressions, func(a, b GoplsIssue) int {
+ return strings.Compare(a.Position, b.Position)
+ })
+
+ return regressions
+}
+
+// formatGoplsRegressions generates a human-readable summary of gopls check regressions
+func formatGoplsRegressions(regressions []GoplsIssue) string {
+ if len(regressions) == 0 {
+ return ""
+ }
+
+ var sb strings.Builder
+ sb.WriteString("Gopls check issues detected:\n\n")
+
+ // Format each issue
+ for i, issue := range regressions {
+ sb.WriteString(fmt.Sprintf("%d. %s: %s\n", i+1, issue.Position, issue.Message))
+ }
+
+ sb.WriteString("\nIMPORTANT: Only fix new gopls check issues in parts of the code that you have already edited. ")
+ sb.WriteString("Do not change existing code that was not part of your current edits.")
+ return sb.String()
+}
+
+func (r *CodeReviewer) HasReviewed(commit string) bool {
+ return slices.Contains(r.reviewed, commit)
+}
+
+func (r *CodeReviewer) IsInitialCommit(commit string) bool {
+ return commit == r.initialCommit
+}
+
+// packagesForFiles returns maps of packages related to the given files:
+// 1. directPkgs: packages that directly contain the changed files
+// 2. allPkgs: all packages that might be affected, including downstream packages that depend on the direct packages
+// It may include false positives.
+// Files must be absolute paths!
+func (r *CodeReviewer) packagesForFiles(ctx context.Context, files []string) (directPkgs, allPkgs map[string]*packages.Package, err error) {
+ for _, f := range files {
+ if !filepath.IsAbs(f) {
+ return nil, nil, fmt.Errorf("path %q is not absolute", f)
+ }
+ }
+ cfg := &packages.Config{
+ Mode: packages.LoadImports | packages.NeedEmbedFiles,
+ Context: ctx,
+ // Logf: func(msg string, args ...any) {
+ // slog.DebugContext(ctx, "loading go packages", "msg", fmt.Sprintf(msg, args...))
+ // },
+ // TODO: in theory, go.mod might not be in the repo root, and there might be multiple go.mod files.
+ // We can cross that bridge when we get there.
+ Dir: r.repoRoot,
+ Tests: true,
+ }
+ universe, err := packages.Load(cfg, "./...")
+ if err != nil {
+ return nil, nil, err
+ }
+ // Identify packages that directly contain the changed files
+ directPkgs = make(map[string]*packages.Package) // import path -> package
+ for _, pkg := range universe {
+ // fmt.Println("pkg:", pkg.PkgPath)
+ pkgFiles := allFiles(pkg)
+ // fmt.Println("pkgFiles:", pkgFiles)
+ for _, file := range files {
+ if pkgFiles[file] {
+ // prefer test packages, as they contain strictly more files (right?)
+ prev := directPkgs[pkg.PkgPath]
+ if prev == nil || prev.ForTest == "" {
+ directPkgs[pkg.PkgPath] = pkg
+ }
+ }
+ }
+ }
+
+ // Create a copy of directPkgs to expand with dependencies
+ allPkgs = make(map[string]*packages.Package)
+ for k, v := range directPkgs {
+ allPkgs[k] = v
+ }
+
+ // Add packages that depend on the direct packages
+ addDependentPackages(universe, allPkgs)
+ return directPkgs, allPkgs, nil
+}
+
+// allFiles returns all files that might be referenced by the package.
+// It may contain false positives.
+func allFiles(p *packages.Package) map[string]bool {
+ files := make(map[string]bool)
+ add := [][]string{p.GoFiles, p.CompiledGoFiles, p.OtherFiles, p.EmbedFiles, p.IgnoredFiles}
+ for _, extra := range add {
+ for _, file := range extra {
+ files[file] = true
+ }
+ }
+ return files
+}
+
+// addDependentPackages adds to pkgs all packages from universe
+// that directly or indirectly depend on any package already in pkgs.
+func addDependentPackages(universe []*packages.Package, pkgs map[string]*packages.Package) {
+ for {
+ changed := false
+ for _, p := range universe {
+ if _, ok := pkgs[p.PkgPath]; ok {
+ // already in pkgs
+ continue
+ }
+ for importPath := range p.Imports {
+ if _, ok := pkgs[importPath]; ok {
+ // imports a package dependent on pkgs, add it
+ pkgs[p.PkgPath] = p
+ changed = true
+ break
+ }
+ }
+ }
+ if !changed {
+ break
+ }
+ }
+}
+
+// testJSON is a union of BuildEvent and TestEvent
+type testJSON struct {
+ // TestEvent only:
+ // The Time field holds the time the event happened. It is conventionally omitted
+ // for cached test results.
+ Time time.Time `json:"Time"`
+ // BuildEvent only:
+ // The ImportPath field gives the package ID of the package being built.
+ // This matches the Package.ImportPath field of go list -json and the
+ // TestEvent.FailedBuild field of go test -json. Note that it does not
+ // match TestEvent.Package.
+ ImportPath string `json:"ImportPath"` // BuildEvent only
+ // TestEvent only:
+ // The Package field, if present, specifies the package being tested. When the
+ // go command runs parallel tests in -json mode, events from different tests are
+ // interlaced; the Package field allows readers to separate them.
+ Package string `json:"Package"`
+ // Action is used in both BuildEvent and TestEvent.
+ // It is the key to distinguishing between them.
+ // BuildEvent:
+ // build-output or build-fail
+ // TestEvent:
+ // start, run, pause, cont, pass, bench, fail, output, skip
+ Action string `json:"Action"`
+ // TestEvent only:
+ // The Test field, if present, specifies the test, example, or benchmark function
+ // that caused the event. Events for the overall package test do not set Test.
+ Test string `json:"Test"`
+ // TestEvent only:
+ // The Elapsed field is set for "pass" and "fail" events. It gives the time elapsed in seconds
+ // for the specific test or the overall package test that passed or failed.
+ Elapsed float64
+ // TestEvent:
+ // The Output field is set for Action == "output" and is a portion of the
+ // test's output (standard output and standard error merged together). The
+ // output is unmodified except that invalid UTF-8 output from a test is coerced
+ // into valid UTF-8 by use of replacement characters. With that one exception,
+ // the concatenation of the Output fields of all output events is the exact output
+ // of the test execution.
+ // BuildEvent:
+ // The Output field is set for Action == "build-output" and is a portion of
+ // the build's output. The concatenation of the Output fields of all output
+ // events is the exact output of the build. A single event may contain one
+ // or more lines of output and there may be more than one output event for
+ // a given ImportPath. This matches the definition of the TestEvent.Output
+ // field produced by go test -json.
+ Output string `json:"Output"`
+ // TestEvent only:
+ // The FailedBuild field is set for Action == "fail" if the test failure was caused
+ // by a build failure. It contains the package ID of the package that failed to
+ // build. This matches the ImportPath field of the "go list" output, as well as the
+ // BuildEvent.ImportPath field as emitted by "go build -json".
+ FailedBuild string `json:"FailedBuild"`
+}
+
+// parseTestResults converts test output in JSONL format into a slice of testJSON objects
+func parseTestResults(testOutput []byte) ([]testJSON, error) {
+ var results []testJSON
+ dec := json.NewDecoder(bytes.NewReader(testOutput))
+ for {
+ var event testJSON
+ if err := dec.Decode(&event); err != nil {
+ if err == io.EOF {
+ break
+ }
+ return nil, err
+ }
+ results = append(results, event)
+ }
+ return results, nil
+}
+
+// testStatus represents the status of a test in a given commit
+type testStatus int
+
+const (
+ testStatusUnknown testStatus = iota
+ testStatusPass
+ testStatusFail
+ testStatusBuildFail
+ testStatusSkip
+)
+
+// testInfo represents information about a specific test
+type testInfo struct {
+ Package string
+ Test string // empty for package tests
+}
+
+// String returns a human-readable string representation of the test
+func (t testInfo) String() string {
+ if t.Test == "" {
+ return t.Package
+ }
+ return fmt.Sprintf("%s.%s", t.Package, t.Test)
+}
+
+// testRegression represents a test that regressed between commits
+type testRegression struct {
+ Info testInfo
+ BeforeStatus testStatus
+ AfterStatus testStatus
+ Output string // failure output in the after state
+}
+
+// collectTestStatuses processes a slice of test events and returns a map of test statuses
+func collectTestStatuses(results []testJSON) map[testInfo]testStatus {
+ statuses := make(map[testInfo]testStatus)
+ failedBuilds := make(map[string]bool) // track packages with build failures
+ testOutputs := make(map[testInfo][]string) // collect output for failing tests
+
+ // First pass: identify build failures
+ for _, result := range results {
+ if result.Action == "fail" && result.FailedBuild != "" {
+ failedBuilds[result.FailedBuild] = true
+ }
+ }
+
+ // Second pass: collect test statuses
+ for _, result := range results {
+ info := testInfo{Package: result.Package, Test: result.Test}
+
+ // Skip output events for now, we'll process them in a separate pass
+ if result.Action == "output" {
+ if result.Test != "" { // only collect output for actual tests, not package messages
+ testOutputs[info] = append(testOutputs[info], result.Output)
+ }
+ continue
+ }
+
+ // Handle BuildEvent output
+ if result.Action == "build-fail" {
+ // Mark all tests in this package as build failures
+ for ti := range statuses {
+ if ti.Package == result.ImportPath {
+ statuses[ti] = testStatusBuildFail
+ }
+ }
+ continue
+ }
+
+ // Check if the package has a build failure
+ if _, hasBuildFailure := failedBuilds[result.Package]; hasBuildFailure {
+ statuses[info] = testStatusBuildFail
+ continue
+ }
+
+ // Handle test events
+ switch result.Action {
+ case "pass":
+ statuses[info] = testStatusPass
+ case "fail":
+ statuses[info] = testStatusFail
+ case "skip":
+ statuses[info] = testStatusSkip
+ }
+ }
+
+ return statuses
+}
+
+// compareTestResults identifies tests that have regressed between commits
+func (r *CodeReviewer) compareTestResults(beforeResults, afterResults []testJSON) ([]testRegression, error) {
+ beforeStatuses := collectTestStatuses(beforeResults)
+ afterStatuses := collectTestStatuses(afterResults)
+
+ // Collect output for failing tests
+ testOutputMap := make(map[testInfo]string)
+ for _, result := range afterResults {
+ if result.Action == "output" {
+ info := testInfo{Package: result.Package, Test: result.Test}
+ testOutputMap[info] += result.Output
+ }
+ }
+
+ var regressions []testRegression
+
+ // Look for tests that regressed
+ for info, afterStatus := range afterStatuses {
+ // Skip tests that are passing or skipped in the after state
+ if afterStatus == testStatusPass || afterStatus == testStatusSkip {
+ continue
+ }
+
+ // Get the before status (default to unknown if not present)
+ beforeStatus, exists := beforeStatuses[info]
+ if !exists {
+ beforeStatus = testStatusUnknown
+ }
+
+ // Log warning if we encounter unexpected unknown status in the 'after' state
+ if afterStatus == testStatusUnknown {
+ slog.WarnContext(context.Background(), "Unexpected unknown test status encountered",
+ "package", info.Package, "test", info.Test)
+ }
+
+ // Check for regressions
+ if isRegression(beforeStatus, afterStatus) {
+ regressions = append(regressions, testRegression{
+ Info: info,
+ BeforeStatus: beforeStatus,
+ AfterStatus: afterStatus,
+ Output: testOutputMap[info],
+ })
+ }
+ }
+
+ // Sort regressions for consistent output
+ slices.SortFunc(regressions, func(a, b testRegression) int {
+ // First by package
+ if c := strings.Compare(a.Info.Package, b.Info.Package); c != 0 {
+ return c
+ }
+ // Then by test name
+ return strings.Compare(a.Info.Test, b.Info.Test)
+ })
+
+ return regressions, nil
+}
+
+// badnessLevels maps test status to a badness level
+// Higher values indicate worse status (more severe issues)
+var badnessLevels = map[testStatus]int{
+ testStatusBuildFail: 4, // Worst
+ testStatusFail: 3,
+ testStatusSkip: 2,
+ testStatusPass: 1,
+ testStatusUnknown: 0, // Least bad - avoids false positives
+}
+
+// isRegression determines if a test has regressed based on before and after status
+// A regression is defined as an increase in badness level
+func isRegression(before, after testStatus) bool {
+ // Higher badness level means worse status
+ return badnessLevels[after] > badnessLevels[before]
+}
+
+// formatTestRegressions generates a human-readable summary of test regressions
+func (r *CodeReviewer) formatTestRegressions(regressions []testRegression) string {
+ if len(regressions) == 0 {
+ return ""
+ }
+
+ var sb strings.Builder
+ sb.WriteString(fmt.Sprintf("Test regressions detected between initial commit (%s) and HEAD:\n\n", r.initialCommit))
+
+ for i, reg := range regressions {
+ // Describe the regression
+ sb.WriteString(fmt.Sprintf("%d. %s: ", i+1, reg.Info.String()))
+
+ switch {
+ case reg.BeforeStatus == testStatusUnknown && reg.AfterStatus == testStatusFail:
+ sb.WriteString("New test is failing")
+ case reg.BeforeStatus == testStatusUnknown && reg.AfterStatus == testStatusBuildFail:
+ sb.WriteString("New test has build errors")
+ case reg.BeforeStatus == testStatusPass && reg.AfterStatus == testStatusFail:
+ sb.WriteString("Was passing, now failing")
+ case reg.BeforeStatus == testStatusPass && reg.AfterStatus == testStatusBuildFail:
+ sb.WriteString("Was passing, now has build errors")
+ case reg.BeforeStatus == testStatusSkip && reg.AfterStatus == testStatusFail:
+ sb.WriteString("Was skipped, now failing")
+ case reg.BeforeStatus == testStatusSkip && reg.AfterStatus == testStatusBuildFail:
+ sb.WriteString("Was skipped, now has build errors")
+ default:
+ sb.WriteString("Regression detected")
+ }
+ sb.WriteString("\n")
+
+ // Add failure output with indentation for readability
+ if reg.Output != "" {
+ outputLines := strings.Split(strings.TrimSpace(reg.Output), "\n")
+ // Limit output to first 10 lines to avoid overwhelming feedback
+ shownLines := min(len(outputLines), 10)
+
+ sb.WriteString(" Output:\n")
+ for _, line := range outputLines[:shownLines] {
+ sb.WriteString(fmt.Sprintf(" | %s\n", line))
+ }
+ if shownLines < len(outputLines) {
+ sb.WriteString(fmt.Sprintf(" | ... (%d more lines)\n", len(outputLines)-shownLines))
+ }
+ }
+ sb.WriteString("\n")
+ }
+
+ sb.WriteString("Please fix these test failures before proceeding.")
+ return sb.String()
+}
diff --git a/claudetool/edit.go b/claudetool/edit.go
new file mode 100644
index 0000000..df83139
--- /dev/null
+++ b/claudetool/edit.go
@@ -0,0 +1,451 @@
+package claudetool
+
+/*
+
+Note: sketch wrote this based on translating https://raw.githubusercontent.com/anthropics/anthropic-quickstarts/refs/heads/main/computer-use-demo/computer_use_demo/tools/edit.py
+
+## Implementation Notes
+This tool is based on Anthropic's Python implementation of the `text_editor_20250124` tool. It maintains a history of file edits to support the undo functionality, and verifies text uniqueness for the str_replace operation to ensure safe edits.
+
+*/
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+
+ "sketch.dev/ant"
+)
+
+// Constants for the AnthropicEditTool
+const (
+ editName = "str_replace_editor"
+)
+
+// Constants used by the tool
+const (
+ snippetLines = 4
+ maxResponseLen = 16000
+ truncatedMessage = "<response clipped><NOTE>To save on context only part of this file has been shown to you. You should retry this tool after you have searched inside the file with `grep -n` in order to find the line numbers of what you are looking for.</NOTE>"
+)
+
+// Command represents the type of operation to perform
+type editCommand string
+
+const (
+ viewCommand editCommand = "view"
+ createCommand editCommand = "create"
+ strReplaceCommand editCommand = "str_replace"
+ insertCommand editCommand = "insert"
+ undoEditCommand editCommand = "undo_edit"
+)
+
+// editInput represents the expected input format for the edit tool
+type editInput struct {
+ Command string `json:"command"`
+ Path string `json:"path"`
+ FileText *string `json:"file_text,omitempty"`
+ ViewRange []int `json:"view_range,omitempty"`
+ OldStr *string `json:"old_str,omitempty"`
+ NewStr *string `json:"new_str,omitempty"`
+ InsertLine *int `json:"insert_line,omitempty"`
+}
+
+// fileHistory maintains a history of edits for each file to support undo functionality
+var fileHistory = make(map[string][]string)
+
+// AnthropicEditTool is a tool for viewing, creating, and editing files
+var AnthropicEditTool = &ant.Tool{
+ // Note that Type is model-dependent, and would be different for Claude 3.5, for example.
+ Type: "text_editor_20250124",
+ Name: editName,
+ Run: EditRun,
+}
+
+// EditRun is the implementation of the edit tool
+func EditRun(ctx context.Context, input json.RawMessage) (string, error) {
+ var editRequest editInput
+ if err := json.Unmarshal(input, &editRequest); err != nil {
+ return "", fmt.Errorf("failed to parse edit input: %v", err)
+ }
+
+ // Validate the command
+ cmd := editCommand(editRequest.Command)
+ if !isValidCommand(cmd) {
+ return "", fmt.Errorf("unrecognized command %s. The allowed commands are: view, create, str_replace, insert, undo_edit", cmd)
+ }
+
+ path := editRequest.Path
+
+ // Validate the path
+ if err := validatePath(cmd, path); err != nil {
+ return "", err
+ }
+
+ // Execute the appropriate command
+ switch cmd {
+ case viewCommand:
+ return handleView(ctx, path, editRequest.ViewRange)
+ case createCommand:
+ if editRequest.FileText == nil {
+ return "", fmt.Errorf("parameter file_text is required for command: create")
+ }
+ return handleCreate(path, *editRequest.FileText)
+ case strReplaceCommand:
+ if editRequest.OldStr == nil {
+ return "", fmt.Errorf("parameter old_str is required for command: str_replace")
+ }
+ newStr := ""
+ if editRequest.NewStr != nil {
+ newStr = *editRequest.NewStr
+ }
+ return handleStrReplace(path, *editRequest.OldStr, newStr)
+ case insertCommand:
+ if editRequest.InsertLine == nil {
+ return "", fmt.Errorf("parameter insert_line is required for command: insert")
+ }
+ if editRequest.NewStr == nil {
+ return "", fmt.Errorf("parameter new_str is required for command: insert")
+ }
+ return handleInsert(path, *editRequest.InsertLine, *editRequest.NewStr)
+ case undoEditCommand:
+ return handleUndoEdit(path)
+ default:
+ return "", fmt.Errorf("command %s is not implemented", cmd)
+ }
+}
+
+// Utility function to check if a command is valid
+func isValidCommand(cmd editCommand) bool {
+ switch cmd {
+ case viewCommand, createCommand, strReplaceCommand, insertCommand, undoEditCommand:
+ return true
+ default:
+ return false
+ }
+}
+
+// validatePath checks if the path/command combination is valid
+func validatePath(cmd editCommand, path string) error {
+ // Check if it's an absolute path
+ if !filepath.IsAbs(path) {
+ suggestedPath := "/" + path
+ return fmt.Errorf("the path %s is not an absolute path, it should start with '/'. Maybe you meant %s?", path, suggestedPath)
+ }
+
+ // Get file info
+ info, err := os.Stat(path)
+
+ // Check if path exists (except for create command)
+ if err != nil {
+ if os.IsNotExist(err) && cmd != createCommand {
+ return fmt.Errorf("the path %s does not exist. Please provide a valid path", path)
+ } else if !os.IsNotExist(err) {
+ return fmt.Errorf("error accessing path %s: %v", path, err)
+ }
+ } else {
+ // Path exists, check if it's a directory
+ if info.IsDir() && cmd != viewCommand {
+ return fmt.Errorf("the path %s is a directory and only the 'view' command can be used on directories", path)
+ }
+
+ // For create command, check if file already exists
+ if cmd == createCommand {
+ return fmt.Errorf("file already exists at: %s. Cannot overwrite files using command 'create'", path)
+ }
+ }
+
+ return nil
+}
+
+// handleView implements the view command
+func handleView(ctx context.Context, path string, viewRange []int) (string, error) {
+ info, err := os.Stat(path)
+ if err != nil {
+ return "", fmt.Errorf("error accessing path %s: %v", path, err)
+ }
+
+ // Handle directory view
+ if info.IsDir() {
+ if viewRange != nil {
+ return "", fmt.Errorf("the view_range parameter is not allowed when path points to a directory")
+ }
+
+ // List files in the directory (up to 2 levels deep)
+ return listDirectory(ctx, path)
+ }
+
+ // Handle file view
+ fileContent, err := readFile(path)
+ if err != nil {
+ return "", err
+ }
+
+ initLine := 1
+ if viewRange != nil {
+ if len(viewRange) != 2 {
+ return "", fmt.Errorf("invalid view_range. It should be a list of two integers")
+ }
+
+ fileLines := strings.Split(fileContent, "\n")
+ nLinesFile := len(fileLines)
+ initLine, finalLine := viewRange[0], viewRange[1]
+
+ if initLine < 1 || initLine > nLinesFile {
+ return "", fmt.Errorf("invalid view_range: %v. Its first element %d should be within the range of lines of the file: [1, %d]",
+ viewRange, initLine, nLinesFile)
+ }
+
+ if finalLine != -1 && finalLine < initLine {
+ return "", fmt.Errorf("invalid view_range: %v. Its second element %d should be larger or equal than its first %d",
+ viewRange, finalLine, initLine)
+ }
+
+ if finalLine > nLinesFile {
+ return "", fmt.Errorf("invalid view_range: %v. Its second element %d should be smaller than the number of lines in the file: %d",
+ viewRange, finalLine, nLinesFile)
+ }
+
+ if finalLine == -1 {
+ fileContent = strings.Join(fileLines[initLine-1:], "\n")
+ } else {
+ fileContent = strings.Join(fileLines[initLine-1:finalLine], "\n")
+ }
+ }
+
+ return makeOutput(fileContent, path, initLine), nil
+}
+
+// handleCreate implements the create command
+func handleCreate(path string, fileText string) (string, error) {
+ // Ensure the directory exists
+ dir := filepath.Dir(path)
+ if err := os.MkdirAll(dir, 0o755); err != nil {
+ return "", fmt.Errorf("failed to create directory %s: %v", dir, err)
+ }
+
+ // Write the file
+ if err := writeFile(path, fileText); err != nil {
+ return "", err
+ }
+
+ // Save to history
+ fileHistory[path] = append(fileHistory[path], fileText)
+
+ return fmt.Sprintf("File created successfully at: %s", path), nil
+}
+
+// handleStrReplace implements the str_replace command
+func handleStrReplace(path, oldStr, newStr string) (string, error) {
+ // Read the file content
+ fileContent, err := readFile(path)
+ if err != nil {
+ return "", err
+ }
+
+ // Replace tabs with spaces
+ fileContent = maybeExpandTabs(path, fileContent)
+ oldStr = maybeExpandTabs(path, oldStr)
+ newStr = maybeExpandTabs(path, newStr)
+
+ // Check if oldStr is unique in the file
+ occurrences := strings.Count(fileContent, oldStr)
+ if occurrences == 0 {
+ return "", fmt.Errorf("no replacement was performed, old_str %q did not appear verbatim in %s", oldStr, path)
+ } else if occurrences > 1 {
+ // Find line numbers where oldStr appears
+ fileContentLines := strings.Split(fileContent, "\n")
+ var lines []int
+ for idx, line := range fileContentLines {
+ if strings.Contains(line, oldStr) {
+ lines = append(lines, idx+1)
+ }
+ }
+ return "", fmt.Errorf("no replacement was performed. Multiple occurrences of old_str %q in lines %v. Please ensure it is unique", oldStr, lines)
+ }
+
+ // Save the current content to history
+ fileHistory[path] = append(fileHistory[path], fileContent)
+
+ // Replace oldStr with newStr
+ newFileContent := strings.Replace(fileContent, oldStr, newStr, 1)
+
+ // Write the new content to the file
+ if err := writeFile(path, newFileContent); err != nil {
+ return "", err
+ }
+
+ // Create a snippet of the edited section
+ parts := strings.Split(fileContent, oldStr)
+ if len(parts) == 0 {
+ // This should never happen due to the earlier check, but let's be safe
+ parts = []string{""}
+ }
+ replacementLine := strings.Count(parts[0], "\n")
+ startLine := max(0, replacementLine-snippetLines)
+ endLine := replacementLine + snippetLines + strings.Count(newStr, "\n")
+ fileLines := strings.Split(newFileContent, "\n")
+ if len(fileLines) == 0 {
+ fileLines = []string{""}
+ }
+ endLine = min(endLine+1, len(fileLines))
+ snippet := strings.Join(fileLines[startLine:endLine], "\n")
+
+ // Prepare the success message
+ successMsg := fmt.Sprintf("The file %s has been edited. ", path)
+ successMsg += makeOutput(snippet, fmt.Sprintf("a snippet of %s", path), startLine+1)
+ successMsg += "Review the changes and make sure they are as expected. Edit the file again if necessary."
+
+ return successMsg, nil
+}
+
+// handleInsert implements the insert command
+func handleInsert(path string, insertLine int, newStr string) (string, error) {
+ // Read the file content
+ fileContent, err := readFile(path)
+ if err != nil {
+ return "", err
+ }
+
+ // Replace tabs with spaces
+ fileContent = maybeExpandTabs(path, fileContent)
+ newStr = maybeExpandTabs(path, newStr)
+
+ // Split the file content into lines
+ fileTextLines := strings.Split(fileContent, "\n")
+ nLinesFile := len(fileTextLines)
+
+ // Validate insert line
+ if insertLine < 0 || insertLine > nLinesFile {
+ return "", fmt.Errorf("invalid insert_line parameter: %d. It should be within the range of lines of the file: [0, %d]",
+ insertLine, nLinesFile)
+ }
+
+ // Save the current content to history
+ fileHistory[path] = append(fileHistory[path], fileContent)
+
+ // Split the new string into lines
+ newStrLines := strings.Split(newStr, "\n")
+
+ // Create new content by inserting the new lines
+ newFileTextLines := make([]string, 0, nLinesFile+len(newStrLines))
+ newFileTextLines = append(newFileTextLines, fileTextLines[:insertLine]...)
+ newFileTextLines = append(newFileTextLines, newStrLines...)
+ newFileTextLines = append(newFileTextLines, fileTextLines[insertLine:]...)
+
+ // Create a snippet of the edited section
+ snippetStart := max(0, insertLine-snippetLines)
+ snippetEnd := min(insertLine+snippetLines, nLinesFile)
+
+ snippetLines := make([]string, 0)
+ snippetLines = append(snippetLines, fileTextLines[snippetStart:insertLine]...)
+ snippetLines = append(snippetLines, newStrLines...)
+ snippetLines = append(snippetLines, fileTextLines[insertLine:snippetEnd]...)
+ snippet := strings.Join(snippetLines, "\n")
+
+ // Write the new content to the file
+ newFileText := strings.Join(newFileTextLines, "\n")
+ if err := writeFile(path, newFileText); err != nil {
+ return "", err
+ }
+
+ // Prepare the success message
+ successMsg := fmt.Sprintf("The file %s has been edited. ", path)
+ successMsg += makeOutput(snippet, "a snippet of the edited file", max(1, insertLine-4+1))
+ successMsg += "Review the changes and make sure they are as expected (correct indentation, no duplicate lines, etc). Edit the file again if necessary."
+
+ return successMsg, nil
+}
+
+// handleUndoEdit implements the undo_edit command
+func handleUndoEdit(path string) (string, error) {
+ history, exists := fileHistory[path]
+ if !exists || len(history) == 0 {
+ return "", fmt.Errorf("no edit history found for %s", path)
+ }
+
+ // Get the last edit and remove it from history
+ lastIdx := len(history) - 1
+ oldText := history[lastIdx]
+ fileHistory[path] = history[:lastIdx]
+
+ // Write the old content back to the file
+ if err := writeFile(path, oldText); err != nil {
+ return "", err
+ }
+
+ return fmt.Sprintf("Last edit to %s undone successfully. %s", path, makeOutput(oldText, path, 1)), nil
+}
+
+// listDirectory lists files and directories up to 2 levels deep
+func listDirectory(ctx context.Context, path string) (string, error) {
+ cmd := fmt.Sprintf("find %s -maxdepth 2 -not -path '*/\\.*'", path)
+ output, err := executeCommand(ctx, cmd)
+ if err != nil {
+ return "", fmt.Errorf("failed to list directory: %v", err)
+ }
+
+ return fmt.Sprintf("Here's the files and directories up to 2 levels deep in %s, excluding hidden items:\n%s\n", path, output), nil
+}
+
+// executeCommand executes a shell command and returns its output
+func executeCommand(ctx context.Context, cmd string) (string, error) {
+ // This is a simplified version without timeouts for now
+ bash := exec.CommandContext(ctx, "bash", "-c", cmd)
+ bash.Dir = WorkingDir(ctx)
+ output, err := bash.CombinedOutput()
+ if err != nil {
+ return "", fmt.Errorf("command execution failed: %v: %s", err, string(output))
+ }
+ return maybetruncate(string(output)), nil
+}
+
+// readFile reads the content of a file
+func readFile(path string) (string, error) {
+ content, err := os.ReadFile(path)
+ if err != nil {
+ return "", fmt.Errorf("failed to read file %s: %v", path, err)
+ }
+ return string(content), nil
+}
+
+// writeFile writes content to a file
+func writeFile(path, content string) error {
+ if err := os.WriteFile(path, []byte(content), 0o644); err != nil {
+ return fmt.Errorf("failed to write to file %s: %v", path, err)
+ }
+ return nil
+}
+
+// makeOutput generates a formatted output for the CLI
+func makeOutput(fileContent, fileDescriptor string, initLine int) string {
+ fileContent = maybetruncate(fileContent)
+ fileContent = maybeExpandTabs(fileDescriptor, fileContent)
+
+ var lines []string
+ for i, line := range strings.Split(fileContent, "\n") {
+ lines = append(lines, fmt.Sprintf("%6d\t%s", i+initLine, line))
+ }
+
+ return fmt.Sprintf("Here's the result of running `cat -n` on %s:\n%s\n", fileDescriptor, strings.Join(lines, "\n"))
+}
+
+// maybetruncate truncates content and appends a notice if content exceeds the specified length
+func maybetruncate(content string) string {
+ if len(content) <= maxResponseLen {
+ return content
+ }
+ return content[:maxResponseLen] + truncatedMessage
+}
+
+// maybeExpandTabs is currently a no-op. The python
+// implementation replaces tabs with spaces, but this strikes
+// me as unwise for our tool.
+func maybeExpandTabs(path, s string) string {
+ // return strings.ReplaceAll(s, "\t", " ")
+ return s
+}
diff --git a/claudetool/edit_regression_test.go b/claudetool/edit_regression_test.go
new file mode 100644
index 0000000..cb859fe
--- /dev/null
+++ b/claudetool/edit_regression_test.go
@@ -0,0 +1,152 @@
+package claudetool
+
+import (
+ "context"
+ "encoding/json"
+ "strings"
+ "testing"
+)
+
+// TestEmptyContentHandling tests handling of empty content in str_replace and related operations
+// This test specifically reproduces conditions that might lead to "index out of range [0]" panic
+func TestEmptyContentHandling(t *testing.T) {
+ // Create a file with empty content
+ emptyFile := setupTestFile(t, "")
+
+ // Test running EditRun directly with empty content
+ // This more closely simulates the actual call flow that led to the panic
+ input := map[string]any{
+ "command": "str_replace",
+ "path": emptyFile,
+ "old_str": "nonexistent text",
+ "new_str": "new content",
+ }
+
+ inputJSON, err := json.Marshal(input)
+ if err != nil {
+ t.Fatalf("Failed to marshal input: %v", err)
+ }
+
+ // This should not panic but return an error
+ _, err = EditRun(context.Background(), inputJSON)
+ if err == nil {
+ t.Fatalf("Expected error for empty file with str_replace but got none")
+ }
+
+ // Make sure the error message is as expected
+ if !strings.Contains(err.Error(), "did not appear verbatim") {
+ t.Errorf("Expected error message to indicate missing string, got: %s", err.Error())
+ }
+}
+
+// TestNilParameterHandling tests error cases with nil parameters
+// This test validates proper error handling when nil or invalid parameters are provided
+func TestNilParameterHandling(t *testing.T) {
+ // Create a test file
+ testFile := setupTestFile(t, "test content")
+
+ // Test case 1: nil old_str in str_replace
+ input1 := map[string]any{
+ "command": "str_replace",
+ "path": testFile,
+ // old_str is deliberately missing
+ "new_str": "replacement",
+ }
+
+ inputJSON1, err := json.Marshal(input1)
+ if err != nil {
+ t.Fatalf("Failed to marshal input: %v", err)
+ }
+
+ _, err = EditRun(context.Background(), inputJSON1)
+ if err == nil {
+ t.Fatalf("Expected error for missing old_str but got none")
+ }
+ if !strings.Contains(err.Error(), "parameter old_str is required") {
+ t.Errorf("Expected error message to indicate missing old_str, got: %s", err.Error())
+ }
+
+ // Test case 2: nil new_str in insert
+ input2 := map[string]any{
+ "command": "insert",
+ "path": testFile,
+ "insert_line": 1,
+ // new_str is deliberately missing
+ }
+
+ inputJSON2, err := json.Marshal(input2)
+ if err != nil {
+ t.Fatalf("Failed to marshal input: %v", err)
+ }
+
+ _, err = EditRun(context.Background(), inputJSON2)
+ if err == nil {
+ t.Fatalf("Expected error for missing new_str but got none")
+ }
+ if !strings.Contains(err.Error(), "parameter new_str is required") {
+ t.Errorf("Expected error message to indicate missing new_str, got: %s", err.Error())
+ }
+
+ // Test case 3: nil view_range in view
+ // This doesn't cause an error, but tests the code path
+ input3 := map[string]any{
+ "command": "view",
+ "path": testFile,
+ // No view_range
+ }
+
+ inputJSON3, err := json.Marshal(input3)
+ if err != nil {
+ t.Fatalf("Failed to marshal input: %v", err)
+ }
+
+ // This should not result in an error
+ _, err = EditRun(context.Background(), inputJSON3)
+ if err != nil {
+ t.Fatalf("Unexpected error for nil view_range: %v", err)
+ }
+}
+
+// TestEmptySplitResult tests the specific scenario where strings.Split might return empty results
+// This directly reproduces conditions that might have led to the "index out of range [0]" panic
+func TestEmptySplitResult(t *testing.T) {
+ // Direct test of strings.Split behavior and our handling of it
+ emptyCases := []struct {
+ content string
+ oldStr string
+ }{
+ {"", "any string"},
+ {"content", "not in string"},
+ {"\n\n", "also not here"},
+ }
+
+ for _, tc := range emptyCases {
+ parts := strings.Split(tc.content, tc.oldStr)
+
+ // Verify that strings.Split with non-matching separator returns a slice with original content
+ if len(parts) != 1 {
+ t.Errorf("Expected strings.Split to return a slice with 1 element when separator isn't found, got %d elements", len(parts))
+ }
+
+ // Double check the content
+ if len(parts) > 0 && parts[0] != tc.content {
+ t.Errorf("Expected parts[0] to be original content %q, got %q", tc.content, parts[0])
+ }
+ }
+
+ // Test the actual unsafe scenario with empty content
+ emptyFile := setupTestFile(t, "")
+
+ // Get the content and simulate the internal string splitting
+ content, _ := readFile(emptyFile)
+ oldStr := "nonexistent"
+ parts := strings.Split(content, oldStr)
+
+ // Validate that the defensive code would work
+ if len(parts) == 0 {
+ parts = []string{""} // This is the fix
+ }
+
+ // This would have panicked without the fix
+ _ = strings.Count(parts[0], "\n")
+}
diff --git a/claudetool/edit_test.go b/claudetool/edit_test.go
new file mode 100644
index 0000000..fe3d66c
--- /dev/null
+++ b/claudetool/edit_test.go
@@ -0,0 +1,399 @@
+package claudetool
+
+import (
+ "context"
+ "encoding/json"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+)
+
+// setupTestFile creates a temporary file with given content for testing
+func setupTestFile(t *testing.T, content string) string {
+ t.Helper()
+
+ // Create a temporary directory
+ tempDir, err := os.MkdirTemp("", "anthropic_edit_test_*")
+ if err != nil {
+ t.Fatalf("Failed to create temp directory: %v", err)
+ }
+
+ // Create a test file in the temp directory
+ testFile := filepath.Join(tempDir, "test_file.txt")
+ if err := os.WriteFile(testFile, []byte(content), 0o644); err != nil {
+ os.RemoveAll(tempDir)
+ t.Fatalf("Failed to write test file: %v", err)
+ }
+
+ // Register cleanup function
+ t.Cleanup(func() {
+ os.RemoveAll(tempDir)
+ })
+
+ return testFile
+}
+
+// callEditTool is a helper to call the edit tool with specific parameters
+func callEditTool(t *testing.T, input map[string]any) string {
+ t.Helper()
+
+ // Convert input to JSON
+ inputJSON, err := json.Marshal(input)
+ if err != nil {
+ t.Fatalf("Failed to marshal input: %v", err)
+ }
+
+ // Call the tool
+ result, err := EditRun(context.Background(), inputJSON)
+ if err != nil {
+ t.Fatalf("Tool execution failed: %v", err)
+ }
+
+ return result
+}
+
+// TestEditToolView tests the view command functionality
+func TestEditToolView(t *testing.T) {
+ content := "Line 1\nLine 2\nLine 3\nLine 4\nLine 5"
+ testFile := setupTestFile(t, content)
+
+ // Test the view command
+ result := callEditTool(t, map[string]any{
+ "command": "view",
+ "path": testFile,
+ })
+
+ // Verify results
+ if !strings.Contains(result, "Line 1") {
+ t.Errorf("View result should contain the file content, got: %s", result)
+ }
+
+ // Test view with range
+ result = callEditTool(t, map[string]any{
+ "command": "view",
+ "path": testFile,
+ "view_range": []int{2, 4},
+ })
+
+ // Verify range results
+ if strings.Contains(result, "Line 1") || !strings.Contains(result, "Line 2") {
+ t.Errorf("View with range should show only specified lines, got: %s", result)
+ }
+}
+
+// TestEditToolStrReplace tests the str_replace command functionality
+func TestEditToolStrReplace(t *testing.T) {
+ content := "Line 1\nLine 2\nLine 3\nLine 4\nLine 5"
+ testFile := setupTestFile(t, content)
+
+ // Test the str_replace command
+ result := callEditTool(t, map[string]any{
+ "command": "str_replace",
+ "path": testFile,
+ "old_str": "Line 3",
+ "new_str": "Modified Line 3",
+ })
+
+ // Verify the file was modified
+ modifiedContent, err := os.ReadFile(testFile)
+ if err != nil {
+ t.Fatalf("Failed to read test file: %v", err)
+ }
+
+ if !strings.Contains(string(modifiedContent), "Modified Line 3") {
+ t.Errorf("File content should be modified, got: %s", string(modifiedContent))
+ }
+
+ // Verify the result contains a snippet
+ if !strings.Contains(result, "Modified Line 3") {
+ t.Errorf("Result should contain the modified content, got: %s", result)
+ }
+}
+
+// TestEditToolInsert tests the insert command functionality
+func TestEditToolInsert(t *testing.T) {
+ content := "Line 1\nLine 2\nLine 3\nLine 4\nLine 5"
+ testFile := setupTestFile(t, content)
+
+ // Test the insert command
+ result := callEditTool(t, map[string]any{
+ "command": "insert",
+ "path": testFile,
+ "insert_line": 2,
+ "new_str": "Inserted Line",
+ })
+
+ // Verify the file was modified
+ modifiedContent, err := os.ReadFile(testFile)
+ if err != nil {
+ t.Fatalf("Failed to read test file: %v", err)
+ }
+
+ expected := "Line 1\nLine 2\nInserted Line\nLine 3\nLine 4\nLine 5"
+ if string(modifiedContent) != expected {
+ t.Errorf("File content incorrect after insert. Expected:\n%s\nGot:\n%s", expected, string(modifiedContent))
+ }
+
+ // Verify the result contains a snippet
+ if !strings.Contains(result, "Inserted Line") {
+ t.Errorf("Result should contain the inserted content, got: %s", result)
+ }
+}
+
+// TestEditToolCreate tests the create command functionality
+func TestEditToolCreate(t *testing.T) {
+ tempDir, err := os.MkdirTemp("", "anthropic_edit_test_create_*")
+ if err != nil {
+ t.Fatalf("Failed to create temp directory: %v", err)
+ }
+
+ t.Cleanup(func() {
+ os.RemoveAll(tempDir)
+ })
+
+ newFilePath := filepath.Join(tempDir, "new_file.txt")
+ content := "This is a new file\nWith multiple lines"
+
+ // Test the create command
+ result := callEditTool(t, map[string]any{
+ "command": "create",
+ "path": newFilePath,
+ "file_text": content,
+ })
+
+ // Verify the file was created with the right content
+ createdContent, err := os.ReadFile(newFilePath)
+ if err != nil {
+ t.Fatalf("Failed to read created file: %v", err)
+ }
+
+ if string(createdContent) != content {
+ t.Errorf("Created file content incorrect. Expected:\n%s\nGot:\n%s", content, string(createdContent))
+ }
+
+ // Verify the result message
+ if !strings.Contains(result, "File created successfully") {
+ t.Errorf("Result should confirm file creation, got: %s", result)
+ }
+}
+
+// TestEditToolUndoEdit tests the undo_edit command functionality
+func TestEditToolUndoEdit(t *testing.T) {
+ originalContent := "Line 1\nLine 2\nLine 3\nLine 4\nLine 5"
+ testFile := setupTestFile(t, originalContent)
+
+ // First modify the file
+ callEditTool(t, map[string]any{
+ "command": "str_replace",
+ "path": testFile,
+ "old_str": "Line 3",
+ "new_str": "Modified Line 3",
+ })
+
+ // Then undo the edit
+ result := callEditTool(t, map[string]any{
+ "command": "undo_edit",
+ "path": testFile,
+ })
+
+ // Verify the file was restored to original content
+ restoredContent, err := os.ReadFile(testFile)
+ if err != nil {
+ t.Fatalf("Failed to read test file: %v", err)
+ }
+
+ if string(restoredContent) != originalContent {
+ t.Errorf("File content should be restored to original, got: %s", string(restoredContent))
+ }
+
+ // Verify the result message
+ if !strings.Contains(result, "undone successfully") {
+ t.Errorf("Result should confirm undo operation, got: %s", result)
+ }
+}
+
+// TestEditToolErrors tests various error conditions
+func TestEditToolErrors(t *testing.T) {
+ content := "Line 1\nLine 2\nLine 3\nLine 4\nLine 5"
+ testFile := setupTestFile(t, content)
+
+ testCases := []struct {
+ name string
+ input map[string]any
+ errMsg string
+ }{
+ {
+ name: "Invalid command",
+ input: map[string]any{
+ "command": "invalid_command",
+ "path": testFile,
+ },
+ errMsg: "unrecognized command",
+ },
+ {
+ name: "Non-existent file",
+ input: map[string]any{
+ "command": "view",
+ "path": "/non/existent/file.txt",
+ },
+ errMsg: "does not exist",
+ },
+ {
+ name: "Missing required parameter",
+ input: map[string]any{
+ "command": "str_replace",
+ "path": testFile,
+ // Missing old_str
+ },
+ errMsg: "parameter old_str is required",
+ },
+ {
+ name: "Multiple occurrences in str_replace",
+ input: map[string]any{
+ "command": "str_replace",
+ "path": testFile,
+ "old_str": "Line", // Appears multiple times
+ "new_str": "Modified Line",
+ },
+ errMsg: "Multiple occurrences",
+ },
+ {
+ name: "Invalid view range",
+ input: map[string]any{
+ "command": "view",
+ "path": testFile,
+ "view_range": []int{10, 20}, // Out of range
+ },
+ errMsg: "invalid view_range",
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ inputJSON, err := json.Marshal(tc.input)
+ if err != nil {
+ t.Fatalf("Failed to marshal input: %v", err)
+ }
+
+ _, err = EditRun(context.Background(), inputJSON)
+ if err == nil {
+ t.Fatalf("Expected error but got none")
+ }
+
+ if !strings.Contains(err.Error(), tc.errMsg) {
+ t.Errorf("Error message does not contain expected text. Expected to contain: %q, Got: %q", tc.errMsg, err.Error())
+ }
+ })
+ }
+}
+
+// TestHandleStrReplaceEdgeCases tests the handleStrReplace function specifically for edge cases
+// that could cause panics like "index out of range [0] with length 0"
+func TestHandleStrReplaceEdgeCases(t *testing.T) {
+ // The issue was with strings.Split returning an empty slice when the separator wasn't found
+ // This test directly tests the internal implementation with conditions that might cause this
+
+ // Create a test file with empty content
+ emptyFile := setupTestFile(t, "")
+
+ // Test with empty file content and arbitrary oldStr
+ _, err := handleStrReplace(emptyFile, "some string that doesn't exist", "new content")
+ if err == nil {
+ t.Fatal("Expected error for empty file but got none")
+ }
+ if !strings.Contains(err.Error(), "did not appear verbatim") {
+ t.Errorf("Expected error message to indicate missing string, got: %s", err.Error())
+ }
+
+ // Create a file with content that doesn't match oldStr
+ nonMatchingFile := setupTestFile(t, "This is some content\nthat doesn't contain the target string")
+
+ // Test with content that doesn't contain oldStr
+ _, err = handleStrReplace(nonMatchingFile, "target string not present", "replacement")
+ if err == nil {
+ t.Fatal("Expected error for non-matching content but got none")
+ }
+ if !strings.Contains(err.Error(), "did not appear verbatim") {
+ t.Errorf("Expected error message to indicate missing string, got: %s", err.Error())
+ }
+
+ // Test handling of the edge case that could potentially cause the "index out of range" panic
+ // This directly verifies that the handleStrReplace function properly handles the case where
+ // strings.Split returns an empty or unexpected result
+
+ // Verify that the protection against empty parts slice works
+ fileContent := ""
+ oldStr := "some string"
+ parts := strings.Split(fileContent, oldStr)
+ if len(parts) == 0 {
+ // This should match the protection in the code
+ parts = []string{""}
+ }
+
+ // This should not panic with the fix in place
+ _ = strings.Count(parts[0], "\n") // This line would have panicked without the fix
+}
+
+// TestViewRangeWithStrReplace tests that the view_range parameter works correctly
+// with the str_replace command (tests the full workflow)
+func TestViewRangeWithStrReplace(t *testing.T) {
+ // Create test file with multiple lines
+ content := "Line 1: First line\nLine 2: Second line\nLine 3: Third line\nLine 4: Fourth line\nLine 5: Fifth line"
+ testFile := setupTestFile(t, content)
+
+ // First view a subset of the file using view_range
+ viewResult := callEditTool(t, map[string]any{
+ "command": "view",
+ "path": testFile,
+ "view_range": []int{2, 4}, // Only lines 2-4
+ })
+
+ // Verify that we only see the specified lines
+ if strings.Contains(viewResult, "Line 1:") || strings.Contains(viewResult, "Line 5:") {
+ t.Errorf("View with range should only show lines 2-4, got: %s", viewResult)
+ }
+ if !strings.Contains(viewResult, "Line 2:") || !strings.Contains(viewResult, "Line 4:") {
+ t.Errorf("View with range should show lines 2-4, got: %s", viewResult)
+ }
+
+ // Now perform a str_replace on one of the lines we viewed
+ replaceResult := callEditTool(t, map[string]any{
+ "command": "str_replace",
+ "path": testFile,
+ "old_str": "Line 3: Third line",
+ "new_str": "Line 3: MODIFIED Third line",
+ })
+
+ // Check that the replacement was successful
+ if !strings.Contains(replaceResult, "Line 3: MODIFIED Third line") {
+ t.Errorf("Replace result should contain the modified line, got: %s", replaceResult)
+ }
+
+ // Verify the file content was updated correctly
+ modifiedContent, err := os.ReadFile(testFile)
+ if err != nil {
+ t.Fatalf("Failed to read test file after modification: %v", err)
+ }
+
+ expectedContent := "Line 1: First line\nLine 2: Second line\nLine 3: MODIFIED Third line\nLine 4: Fourth line\nLine 5: Fifth line"
+ if string(modifiedContent) != expectedContent {
+ t.Errorf("File content after replacement is incorrect.\nExpected:\n%s\nGot:\n%s",
+ expectedContent, string(modifiedContent))
+ }
+
+ // View the modified file with a different view_range
+ finalViewResult := callEditTool(t, map[string]any{
+ "command": "view",
+ "path": testFile,
+ "view_range": []int{3, 3}, // Only the modified line
+ })
+
+ // Verify we can see only the modified line
+ if !strings.Contains(finalViewResult, "Line 3: MODIFIED Third line") {
+ t.Errorf("Final view should show the modified line, got: %s", finalViewResult)
+ }
+ if strings.Contains(finalViewResult, "Line 2:") || strings.Contains(finalViewResult, "Line 4:") {
+ t.Errorf("Final view should only show line 3, got: %s", finalViewResult)
+ }
+}
diff --git a/claudetool/editbuf/LICENSE b/claudetool/editbuf/LICENSE
new file mode 100644
index 0000000..ea5ea89
--- /dev/null
+++ b/claudetool/editbuf/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\ No newline at end of file
diff --git a/claudetool/editbuf/editbuf.go b/claudetool/editbuf/editbuf.go
new file mode 100644
index 0000000..6b04310
--- /dev/null
+++ b/claudetool/editbuf/editbuf.go
@@ -0,0 +1,92 @@
+// Modified from rsc.io/edit
+
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package edit implements buffered position-based editing of byte slices.
+package editbuf
+
+import (
+ "fmt"
+ "sort"
+)
+
+// A Buffer is a queue of edits to apply to a given byte slice.
+type Buffer struct {
+ old []byte
+ q edits
+}
+
+// An edit records a single text modification: change the bytes in [start,end) to new.
+type edit struct {
+ start int
+ end int
+ new string
+}
+
+// An edits is a list of edits that is sortable by start offset, breaking ties by end offset.
+type edits []edit
+
+func (x edits) Len() int { return len(x) }
+func (x edits) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+func (x edits) Less(i, j int) bool {
+ if x[i].start != x[j].start {
+ return x[i].start < x[j].start
+ }
+ return x[i].end < x[j].end
+}
+
+// NewBuffer returns a new buffer to accumulate changes to an initial data slice.
+// The returned buffer maintains a reference to the data, so the caller must ensure
+// the data is not modified until after the Buffer is done being used.
+func NewBuffer(old []byte) *Buffer {
+ return &Buffer{old: old}
+}
+
+// Insert inserts the new string at old[pos:pos].
+func (b *Buffer) Insert(pos int, new string) {
+ if pos < 0 || pos > len(b.old) {
+ panic("invalid edit position")
+ }
+ b.q = append(b.q, edit{pos, pos, new})
+}
+
+// Delete deletes the text old[start:end].
+func (b *Buffer) Delete(start, end int) {
+ if end < start || start < 0 || end > len(b.old) {
+ panic("invalid edit position")
+ }
+ b.q = append(b.q, edit{start, end, ""})
+}
+
+// Replace replaces old[start:end] with new.
+func (b *Buffer) Replace(start, end int, new string) {
+ if end < start || start < 0 || end > len(b.old) {
+ panic("invalid edit position")
+ }
+ b.q = append(b.q, edit{start, end, new})
+}
+
+// Bytes returns a new byte slice containing the original data
+// with the queued edits applied.
+func (b *Buffer) Bytes() ([]byte, error) {
+ // Sort edits by starting position and then by ending position.
+ // Breaking ties by ending position allows insertions at point x
+ // to be applied before a replacement of the text at [x, y).
+ sort.Stable(b.q)
+
+ var new []byte
+ offset := 0
+ for i, e := range b.q {
+ if e.start < offset {
+ e0 := b.q[i-1]
+ return nil, fmt.Errorf("overlapping edits: [%d,%d)->%q, [%d,%d)->%q", e0.start, e0.end, e0.new, e.start, e.end, e.new)
+ }
+ new = append(new, b.old[offset:e.start]...)
+ offset = e.end
+ new = append(new, e.new...)
+ }
+ new = append(new, b.old[offset:]...)
+ return new, nil
+}
diff --git a/claudetool/keyword.go b/claudetool/keyword.go
new file mode 100644
index 0000000..2438275
--- /dev/null
+++ b/claudetool/keyword.go
@@ -0,0 +1,175 @@
+package claudetool
+
+import (
+ "context"
+ _ "embed"
+ "encoding/json"
+ "fmt"
+ "log/slog"
+ "os/exec"
+ "strings"
+
+ "sketch.dev/ant"
+)
+
+// The Keyword tool provides keyword search.
+// TODO: use an embedding model + re-ranker or otherwise do something nicer than this kludge.
+// TODO: if we can get this fast enough, do it on the fly while the user is typing their prompt.
+var Keyword = &ant.Tool{
+ Name: keywordName,
+ Description: keywordDescription,
+ InputSchema: ant.MustSchema(keywordInputSchema),
+ Run: keywordRun,
+}
+
+const (
+ keywordName = "keyword_search"
+ keywordDescription = `
+keyword_search locates files with a search-and-filter approach.
+Use when navigating unfamiliar codebases with only conceptual understanding or vague user questions.
+
+Effective use:
+- Provide a detailed query for accurate relevance ranking
+- Include extensive but uncommon keywords to ensure comprehensive results
+- Order keywords by importance (most important first) - less important keywords may be dropped if there are too many results
+
+IMPORTANT: Do NOT use this tool if you have precise information like log lines, error messages, filenames, symbols, or package names. Use direct approaches (grep, cat, go doc, etc.) instead.
+`
+
+ // If you modify this, update the termui template for prettier rendering.
+ keywordInputSchema = `
+{
+ "type": "object",
+ "required": [
+ "query",
+ "keywords"
+ ],
+ "properties": {
+ "query": {
+ "type": "string",
+ "description": "A detailed statement of what you're trying to find or learn."
+ },
+ "keywords": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ },
+ "description": "List of keywords in descending order of importance."
+ }
+ }
+}
+`
+)
+
+type keywordInput struct {
+ Query string `json:"query"`
+ Keywords []string `json:"keywords"`
+}
+
+//go:embed keyword_system_prompt.txt
+var keywordSystemPrompt string
+
+// findRepoRoot attempts to find the git repository root from the current directory
+func findRepoRoot(wd string) (string, error) {
+ cmd := exec.Command("git", "rev-parse", "--show-toplevel")
+ cmd.Dir = wd
+ out, err := cmd.Output()
+ // todo: cwd here and throughout
+ if err != nil {
+ return "", fmt.Errorf("failed to find git repository root: %w", err)
+ }
+ return strings.TrimSpace(string(out)), nil
+}
+
+func keywordRun(ctx context.Context, m json.RawMessage) (string, error) {
+ var input keywordInput
+ if err := json.Unmarshal(m, &input); err != nil {
+ return "", err
+ }
+ wd := WorkingDir(ctx)
+ root, err := findRepoRoot(wd)
+ if err == nil {
+ wd = root
+ }
+ slog.InfoContext(ctx, "keyword search input", "query", input.Query, "keywords", input.Keywords, "wd", wd)
+
+ // first remove stopwords
+ var keep []string
+ for _, term := range input.Keywords {
+ out, err := ripgrep(ctx, wd, []string{term})
+ if err != nil {
+ return "", err
+ }
+ if len(out) > 64*1024 {
+ slog.InfoContext(ctx, "keyword search result too large", "term", term, "bytes", len(out))
+ continue
+ }
+ keep = append(keep, term)
+ }
+
+ // peel off keywords until we get a result that fits in the query window
+ var out string
+ for {
+ var err error
+ out, err = ripgrep(ctx, wd, keep)
+ if err != nil {
+ return "", err
+ }
+ if len(out) < 128*1024 {
+ break
+ }
+ keep = keep[:len(keep)-1]
+ }
+
+ info := ant.ToolCallInfoFromContext(ctx)
+ convo := info.Convo.SubConvo()
+ convo.SystemPrompt = strings.TrimSpace(keywordSystemPrompt)
+
+ initialMessage := ant.Message{
+ Role: ant.MessageRoleUser,
+ Content: []ant.Content{
+ ant.StringContent("<pwd>\n" + wd + "\n</pwd>"),
+ ant.StringContent("<ripgrep_results>\n" + out + "\n</ripgrep_results>"),
+ ant.StringContent("<query>\n" + input.Query + "\n</query>"),
+ },
+ }
+
+ resp, err := convo.SendMessage(initialMessage)
+ if err != nil {
+ return "", fmt.Errorf("failed to send relevance filtering message: %w", err)
+ }
+ if len(resp.Content) != 1 {
+ return "", fmt.Errorf("unexpected number of messages in relevance filtering response: %d", len(resp.Content))
+ }
+
+ filtered := resp.Content[0].Text
+
+ slog.InfoContext(ctx, "keyword search results processed",
+ "bytes", len(out),
+ "lines", strings.Count(out, "\n"),
+ "files", strings.Count(out, "\n\n"),
+ "query", input.Query,
+ "filtered", filtered,
+ )
+
+ return resp.Content[0].Text, nil
+}
+
+func ripgrep(ctx context.Context, wd string, terms []string) (string, error) {
+ args := []string{"-C", "10", "-i", "--line-number", "--with-filename"}
+ for _, term := range terms {
+ args = append(args, "-e", term)
+ }
+ cmd := exec.CommandContext(ctx, "rg", args...)
+ cmd.Dir = wd
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ // ripgrep returns exit code 1 when no matches are found, which is not an error for us
+ if exitErr, ok := err.(*exec.ExitError); ok && exitErr.ExitCode() == 1 {
+ return "no matches found", nil
+ }
+ return "", fmt.Errorf("search failed: %v\n%s", err, out)
+ }
+ outStr := string(out)
+ return outStr, nil
+}
diff --git a/claudetool/keyword_system_prompt.txt b/claudetool/keyword_system_prompt.txt
new file mode 100644
index 0000000..ac37acd
--- /dev/null
+++ b/claudetool/keyword_system_prompt.txt
@@ -0,0 +1,28 @@
+You are a code search relevance evaluator. Your task is to analyze ripgrep results and determine which files are most relevant to the user's query.
+
+INPUT FORMAT:
+- You will receive ripgrep output containing file matches for keywords with 10 lines of context
+- At the end will be "QUERY: <original search query>"
+
+ANALYSIS INSTRUCTIONS:
+1. Examine each file match and its surrounding context
+2. Evaluate relevance to the query based on:
+ - Direct relevance to concepts in the query
+ - Implementation of functionality described in the query
+ - Evidence of patterns or systems related to the query
+3. Exercise strict judgment - only return files that are genuinely relevant
+
+OUTPUT FORMAT:
+Respond with a plain text list of the most relevant files in decreasing order of relevance:
+
+/path/to/most/relevant/file: Concise relevance explanation
+/path/to/second/file: Concise relevance explanation
+...
+
+IMPORTANT:
+- Only include files with meaningful relevance to the query
+- Keep it short, don't blather
+- Do NOT list all files that had keyword matches
+- Focus on quality over quantity
+- If no files are truly relevant, return "No relevant files found"
+- Use absolute file paths
diff --git a/claudetool/patch.go b/claudetool/patch.go
new file mode 100644
index 0000000..9254319
--- /dev/null
+++ b/claudetool/patch.go
@@ -0,0 +1,307 @@
+package claudetool
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "go/parser"
+ "go/token"
+ "log/slog"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "sketch.dev/ant"
+ "sketch.dev/claudetool/editbuf"
+ "sketch.dev/claudetool/patchkit"
+)
+
+// Patch is a tool for precise text modifications in files.
+var Patch = &ant.Tool{
+ Name: PatchName,
+ Description: strings.TrimSpace(PatchDescription),
+ InputSchema: ant.MustSchema(PatchInputSchema),
+ Run: PatchRun,
+}
+
+const (
+ PatchName = "patch"
+ PatchDescription = `
+File modification tool for precise text edits.
+
+Operations:
+- replace: Substitute text with new content
+- append_eof: Append new text at the end of the file
+- prepend_bof: Insert new text at the beginning of the file
+- overwrite: Replace the entire file with new content (automatically creates the file)
+
+Usage notes:
+- All inputs are interpreted literally (no automatic newline or whitespace handling)
+- For replace operations, oldText must appear EXACTLY ONCE in the file
+`
+
+ // If you modify this, update the termui template for prettier rendering.
+ PatchInputSchema = `
+{
+ "type": "object",
+ "required": ["path", "patches"],
+ "properties": {
+ "path": {
+ "type": "string",
+ "description": "Absolute path to the file to patch"
+ },
+ "patches": {
+ "type": "array",
+ "description": "List of patch requests to apply",
+ "items": {
+ "type": "object",
+ "required": ["operation", "newText"],
+ "properties": {
+ "operation": {
+ "type": "string",
+ "enum": ["replace", "append_eof", "prepend_bof", "overwrite"],
+ "description": "Type of operation to perform"
+ },
+ "oldText": {
+ "type": "string",
+ "description": "Text to locate for the operation (must be unique in file, required for replace)"
+ },
+ "newText": {
+ "type": "string",
+ "description": "The new text to use (empty for deletions)"
+ }
+ }
+ }
+ }
+ }
+}
+`
+)
+
+// TODO: maybe rename PatchRequest to PatchOperation or PatchSpec or PatchPart or just Patch?
+
+type patchInput struct {
+ Path string `json:"path"`
+ Patches []patchRequest `json:"patches"`
+}
+
+type patchRequest struct {
+ Operation string `json:"operation"`
+ OldText string `json:"oldText,omitempty"`
+ NewText string `json:"newText,omitempty"`
+}
+
+// PatchRun is the entry point for the user_patch tool.
+func PatchRun(ctx context.Context, m json.RawMessage) (string, error) {
+ var input patchInput
+ if err := json.Unmarshal(m, &input); err != nil {
+ return "", fmt.Errorf("failed to unmarshal user_patch input: %w", err)
+ }
+
+ // Validate the input
+ if !filepath.IsAbs(input.Path) {
+ return "", fmt.Errorf("path %q is not absolute", input.Path)
+ }
+ if len(input.Patches) == 0 {
+ return "", fmt.Errorf("no patches provided")
+ }
+ // TODO: check whether the file is autogenerated, and if so, require a "force" flag to modify it.
+
+ orig, err := os.ReadFile(input.Path)
+ // If the file doesn't exist, we can still apply patches
+ // that don't require finding existing text.
+ switch {
+ case errors.Is(err, os.ErrNotExist):
+ for _, patch := range input.Patches {
+ switch patch.Operation {
+ case "prepend_bof", "append_eof", "overwrite":
+ default:
+ return "", fmt.Errorf("file %q does not exist", input.Path)
+ }
+ }
+ case err != nil:
+ return "", fmt.Errorf("failed to read file %q: %w", input.Path, err)
+ }
+
+ likelyGoFile := strings.HasSuffix(input.Path, ".go")
+
+ autogenerated := likelyGoFile && isAutogeneratedGoFile(orig)
+ parsed := likelyGoFile && parseGo(orig) != nil
+
+ origStr := string(orig)
+ // Process the patches "simultaneously", minimizing them along the way.
+ // Claude generates patches that interact with each other.
+ buf := editbuf.NewBuffer(orig)
+
+ // TODO: is it better to apply the patches that apply cleanly and report on the failures?
+ // or instead have it be all-or-nothing?
+ // For now, it is all-or-nothing.
+ // TODO: when the model gets into a "cannot apply patch" cycle of doom, how do we get it unstuck?
+ // Also: how do we detect that it's in a cycle?
+ var patchErr error
+ for i, patch := range input.Patches {
+ switch patch.Operation {
+ case "prepend_bof":
+ buf.Insert(0, patch.NewText)
+ case "append_eof":
+ buf.Insert(len(orig), patch.NewText)
+ case "overwrite":
+ buf.Replace(0, len(orig), patch.NewText)
+ case "replace":
+ if patch.OldText == "" {
+ return "", fmt.Errorf("patch %d: oldText cannot be empty for %s operation", i, patch.Operation)
+ }
+
+ // Attempt to apply the patch.
+ spec, count := patchkit.Unique(origStr, patch.OldText, patch.NewText)
+ switch count {
+ case 0:
+ // no matches, maybe recoverable, continued below
+ case 1:
+ // exact match, apply
+ slog.DebugContext(ctx, "patch_applied", "method", "unique")
+ spec.ApplyToEditBuf(buf)
+ continue
+ case 2:
+ // multiple matches
+ patchErr = errors.Join(patchErr, fmt.Errorf("old text not unique:\n%s", patch.OldText))
+ default:
+ // TODO: return an error instead of using agentPatch
+ slog.ErrorContext(ctx, "unique returned unexpected count", "count", count)
+ patchErr = errors.Join(patchErr, fmt.Errorf("internal error"))
+ continue
+ }
+
+ // The following recovery mechanisms are heuristic.
+ // They aren't perfect, but they appear safe,
+ // and the cases they cover appear with some regularity.
+
+ // Try adjusting the whitespace prefix.
+ spec, ok := patchkit.UniqueDedent(origStr, patch.OldText, patch.NewText)
+ if ok {
+ slog.DebugContext(ctx, "patch_applied", "method", "unique_dedent")
+ spec.ApplyToEditBuf(buf)
+ continue
+ }
+
+ // Try ignoring leading/trailing whitespace in a semantically safe way.
+ spec, ok = patchkit.UniqueInValidGo(origStr, patch.OldText, patch.NewText)
+ if ok {
+ slog.DebugContext(ctx, "patch_applied", "method", "unique_in_valid_go")
+ spec.ApplyToEditBuf(buf)
+ continue
+ }
+
+ // Try ignoring semantically insignificant whitespace.
+ spec, ok = patchkit.UniqueGoTokens(origStr, patch.OldText, patch.NewText)
+ if ok {
+ slog.DebugContext(ctx, "patch_applied", "method", "unique_go_tokens")
+ spec.ApplyToEditBuf(buf)
+ continue
+ }
+
+ // Try trimming the first line of the patch, if we can do so safely.
+ spec, ok = patchkit.UniqueTrim(origStr, patch.OldText, patch.NewText)
+ if ok {
+ slog.DebugContext(ctx, "patch_applied", "method", "unique_trim")
+ spec.ApplyToEditBuf(buf)
+ continue
+ }
+
+ // No dice.
+ patchErr = errors.Join(patchErr, fmt.Errorf("old text not found:\n%s", patch.OldText))
+ continue
+ default:
+ return "", fmt.Errorf("unrecognized operation %q", patch.Operation)
+ }
+ }
+
+ if patchErr != nil {
+ sendTelemetry(ctx, "patch_error", map[string]any{
+ "orig": origStr,
+ "patches": input.Patches,
+ "errors": patchErr,
+ })
+ return "", patchErr
+ }
+
+ patched, err := buf.Bytes()
+ if err != nil {
+ return "", err
+ }
+ if err := os.MkdirAll(filepath.Dir(input.Path), 0o700); err != nil {
+ return "", fmt.Errorf("failed to create directory %q: %w", filepath.Dir(input.Path), err)
+ }
+ if err := os.WriteFile(input.Path, patched, 0o600); err != nil {
+ return "", fmt.Errorf("failed to write patched contents to file %q: %w", input.Path, err)
+ }
+
+ response := new(strings.Builder)
+ fmt.Fprintf(response, "- Applied all patches\n")
+
+ if parsed {
+ parseErr := parseGo(patched)
+ if parseErr != nil {
+ return "", fmt.Errorf("after applying all patches, the file no longer parses:\n%w", parseErr)
+ }
+ }
+
+ if autogenerated {
+ fmt.Fprintf(response, "- WARNING: %q appears to be autogenerated. Patches were applied anyway.\n", input.Path)
+ }
+
+ // TODO: maybe report the patch result to the model, i.e. some/all of the new code after the patches and formatting.
+ return response.String(), nil
+}
+
+func parseGo(buf []byte) error {
+ fset := token.NewFileSet()
+ _, err := parser.ParseFile(fset, "", buf, parser.SkipObjectResolution)
+ return err
+}
+
+func isAutogeneratedGoFile(buf []byte) bool {
+ for _, sig := range autogeneratedSignals {
+ if bytes.Contains(buf, []byte(sig)) {
+ return true
+ }
+ }
+
+ // https://pkg.go.dev/cmd/go#hdr-Generate_Go_files_by_processing_source
+ // "This line must appear before the first non-comment, non-blank text in the file."
+ // Approximate that by looking for it at the top of the file, before the last of the imports.
+ // (Sometimes people put it after the package declaration, because of course they do.)
+ // At least in the imports region we know it's not part of their actual code;
+ // we don't want to ignore the generator (which also includes these strings!),
+ // just the generated code.
+ fset := token.NewFileSet()
+ f, err := parser.ParseFile(fset, "x.go", buf, parser.ImportsOnly|parser.ParseComments)
+ if err == nil {
+ for _, cg := range f.Comments {
+ t := strings.ToLower(cg.Text())
+ for _, sig := range autogeneratedHeaderSignals {
+ if strings.Contains(t, sig) {
+ return true
+ }
+ }
+ }
+ }
+
+ return false
+}
+
+// autogeneratedSignals are signals that a file is autogenerated, when present anywhere in the file.
+var autogeneratedSignals = [][]byte{
+ []byte("\nfunc bindataRead("), // pre-embed bindata packed file
+}
+
+// autogeneratedHeaderSignals are signals that a file is autogenerated, when present at the top of the file.
+var autogeneratedHeaderSignals = []string{
+ // canonical would be `(?m)^// Code generated .* DO NOT EDIT\.$`
+ // but people screw it up, a lot, so be more lenient
+ strings.ToLower("generate"),
+ strings.ToLower("DO NOT EDIT"),
+ strings.ToLower("export by"),
+}
diff --git a/claudetool/patchkit/patchkit.go b/claudetool/patchkit/patchkit.go
new file mode 100644
index 0000000..c7235e4
--- /dev/null
+++ b/claudetool/patchkit/patchkit.go
@@ -0,0 +1,415 @@
+package patchkit
+
+import (
+ "fmt"
+ "go/scanner"
+ "go/token"
+ "slices"
+ "strings"
+ "unicode"
+
+ "sketch.dev/claudetool/editbuf"
+)
+
+// A Spec specifies a single patch.
+type Spec struct {
+ Off int // Byte offset to apply the replacement
+ Len int // Length of the replacement
+ Src string // Original string (for debugging)
+ Old string // Search string
+ New string // Replacement string
+}
+
+// Unique generates a patch spec to apply op, given a unique occurrence of needle in haystack and replacement text replace.
+// It reports the number of matches found for needle in haystack: 0, 1, or 2 (for any value > 1).
+func Unique(haystack, needle, replace string) (*Spec, int) {
+ prefix, rest, ok := strings.Cut(haystack, needle)
+ if !ok {
+ return nil, 0
+ }
+ if strings.Contains(rest, needle) {
+ return nil, 2
+ }
+ s := &Spec{
+ Off: len(prefix),
+ Len: len(needle),
+ Src: haystack,
+ Old: needle,
+ New: replace,
+ }
+ return s, 1
+}
+
+// minimize reduces the size of the patch by removing any shared prefix and suffix.
+func (s *Spec) minimize() {
+ pre := commonPrefixLen(s.Old, s.New)
+ s.Off += pre
+ s.Len -= pre
+ s.Old = s.Old[pre:]
+ s.New = s.New[pre:]
+ suf := commonSuffixLen(s.Old, s.New)
+ s.Len -= suf
+ s.Old = s.Old[:len(s.Old)-suf]
+ s.New = s.New[:len(s.New)-suf]
+}
+
+// ApplyToEditBuf applies the patch to the given edit buffer.
+func (s *Spec) ApplyToEditBuf(buf *editbuf.Buffer) {
+ s.minimize()
+ buf.Replace(s.Off, s.Off+s.Len, s.New)
+}
+
+// UniqueDedent is Unique, but with flexibility around consistent whitespace prefix changes.
+// Unlike Unique, which returns a count of matches,
+// UniqueDedent returns a boolean indicating whether a unique match was found.
+// It is for LLMs that have a hard time reliably reproducing uniform whitespace prefixes.
+// For example, they may generate 8 spaces instead of 6 for all relevant lines.
+// UniqueDedent adjusts the needle's whitespace prefix to match the haystack's
+// and then replaces the unique instance of needle in haystack with replacement.
+func UniqueDedent(haystack, needle, replace string) (*Spec, bool) {
+ // TODO: this all definitely admits of some optimization
+ haystackLines := slices.Collect(strings.Lines(haystack))
+ needleLines := slices.Collect(strings.Lines(needle))
+ match := uniqueTrimmedLineMatch(haystackLines, needleLines)
+ if match == -1 {
+ return nil, false
+ }
+ // We now systematically adjust needle's whitespace prefix to match haystack.
+ // The first line gets special treatment, because its leading whitespace is irrelevant,
+ // and models often skip past it (or part of it).
+ if len(needleLines) == 0 {
+ return nil, false
+ }
+ // First line: cut leading whitespace and make corresponding fixes to replacement.
+ // The leading whitespace will come out in the wash in Unique.
+ // We need to make corresponding fixes to the replacement.
+ nl0 := needleLines[0]
+ noWS := strings.TrimLeftFunc(nl0, unicode.IsSpace)
+ ws0, _ := strings.CutSuffix(nl0, noWS) // can't fail
+ rest, ok := strings.CutPrefix(replace, ws0)
+ if ok {
+ // Adjust needle and replacement in tandem.
+ nl0 = noWS
+ replace = rest
+ }
+ // Calculate common whitespace prefixes for the rest.
+ haystackPrefix := commonWhitespacePrefix(haystackLines[match : match+len(needleLines)])
+ needlePrefix := commonWhitespacePrefix(needleLines[1:])
+ nbuf := new(strings.Builder)
+ for i, line := range needleLines {
+ if i == 0 {
+ nbuf.WriteString(nl0)
+ continue
+ }
+ // Allow empty (newline-only) lines not to be prefixed.
+ if strings.TrimRight(line, "\n\r") == "" {
+ nbuf.WriteString(line)
+ continue
+ }
+ // Swap in haystackPrefix for needlePrefix.
+ nbuf.WriteString(haystackPrefix)
+ nbuf.WriteString(line[len(needlePrefix):])
+ }
+ // Do a replacement with our new-and-improved needle.
+ needle = nbuf.String()
+ spec, count := Unique(haystack, needle, replace)
+ if count != 1 {
+ return nil, false
+ }
+ return spec, true
+}
+
+type tok struct {
+ pos token.Position
+ tok token.Token
+ lit string
+}
+
+func (t tok) String() string {
+ if t.lit == "" {
+ return fmt.Sprintf("%s", t.tok)
+ }
+ return fmt.Sprintf("%s(%q)", t.tok, t.lit)
+}
+
+func tokenize(code string) ([]tok, bool) {
+ var s scanner.Scanner
+ fset := token.NewFileSet()
+ file := fset.AddFile("", fset.Base(), len(code))
+ s.Init(file, []byte(code), nil, scanner.ScanComments)
+ var tokens []tok
+ for {
+ pos, t, lit := s.Scan()
+ if s.ErrorCount > 0 {
+ return nil, false // invalid Go code (or not Go code at all)
+ }
+ if t == token.EOF {
+ return tokens, true
+ }
+ tokens = append(tokens, tok{pos: fset.PositionFor(pos, false), tok: t, lit: lit})
+ }
+}
+
+func tokensEqual(a, b []tok) bool {
+ if len(a) != len(b) {
+ return false
+ }
+ for i := range a {
+ at, bt := a[i], b[i]
+ // positions are expected to differ
+ if at.tok != bt.tok || at.lit != bt.lit {
+ return false
+ }
+ }
+ return true
+}
+
+func tokensUniqueMatch(haystack, needle []tok) int {
+ // TODO: optimize
+ match := -1
+ for i := range haystack {
+ rest := haystack[i:]
+ if len(rest) < len(needle) {
+ break
+ }
+ rest = rest[:len(needle)]
+ if !tokensEqual(rest, needle) {
+ continue
+ }
+ if match != -1 {
+ return -1 // multiple matches
+ }
+ match = i
+ }
+ return match
+}
+
+// UniqueGoTokens is Unique, but with flexibility around all insignificant whitespace.
+// Like UniqueDedent, it returns a boolean indicating whether a unique match was found.
+// It is safe (enough) because it ensures that the needle alterations occurs only in places
+// where whitespace is not semantically significant.
+// In practice, this appears safe.
+func UniqueGoTokens(haystack, needle, replace string) (*Spec, bool) {
+ nt, ok := tokenize(needle)
+ if !ok {
+ return nil, false
+ }
+ ht, ok := tokenize(haystack)
+ if !ok {
+ return nil, false
+ }
+ match := tokensUniqueMatch(ht, nt)
+ if match == -1 {
+ return nil, false
+ }
+ matchEnd := match + len(nt) - 1
+ start := ht[match].pos.Offset
+ needle = haystack[start:]
+ if matchEnd+1 < len(ht) {
+ // todo: handle match at very end of file
+ end := ht[matchEnd+1].pos.Offset
+ needle = needle[:end-start]
+ }
+ // OK, declare this very fuzzy match to be our new needle.
+ spec, count := Unique(haystack, needle, replace)
+ if count != 1 {
+ return nil, false
+ }
+ return spec, true
+}
+
+// UniqueInValidGo is Unique, but with flexibility around all leading and trailing whitespace.
+// Like UniqueDedent, it returns a boolean indicating whether a unique match was found.
+// It is safe (enough) because it ensures that the needle alterations occurs only in places
+// where whitespace is not semantically significant.
+// In practice, this appears safe.
+func UniqueInValidGo(haystack, needle, replace string) (*Spec, bool) {
+ haystackLines := slices.Collect(strings.Lines(haystack))
+ needleLines := slices.Collect(strings.Lines(needle))
+ matchStart := uniqueTrimmedLineMatch(haystackLines, needleLines)
+ if matchStart == -1 {
+ return nil, false
+ }
+ needle, replace = improveNeedle(haystack, needle, replace, matchStart)
+ matchEnd := matchStart + strings.Count(needle, "\n")
+ // Ensure that none of the lines that we fuzzy-matched involve a multiline comment or string literal.
+ var s scanner.Scanner
+ fset := token.NewFileSet()
+ file := fset.AddFile("", fset.Base(), len(haystack))
+ s.Init(file, []byte(haystack), nil, scanner.ScanComments)
+ for {
+ pos, tok, lit := s.Scan()
+ if s.ErrorCount > 0 {
+ return nil, false // invalid Go code (or not Go code at all)
+ }
+ if tok == token.EOF {
+ break
+ }
+ if tok == token.SEMICOLON || !strings.Contains(lit, "\n") {
+ continue
+ }
+ // In a token that spans multiple lines,
+ // so not perfectly matching whitespace might be unsafe.
+ p := fset.Position(pos)
+ tokenStart := p.Line - 1 // 1-based to 0-based
+ tokenEnd := tokenStart + strings.Count(lit, "\n")
+ // Check whether [matchStart, matchEnd] overlaps [tokenStart, tokenEnd]
+ // TODO: think more about edge conditions here. Any off-by-one errors?
+ // For example, leading whitespace and trailing whitespace
+ // on this token's lines are not semantically significant.
+ if tokenStart <= matchEnd && matchStart <= tokenEnd {
+ // if tokenStart <= matchStart && tokenEnd <= tokenEnd {}
+ return nil, false // this token overlaps the range we're replacing, not safe
+ }
+ }
+
+ // TODO: restore this sanity check? it's mildly expensive and i've never seen it fail.
+ // replaced := strings.Join(haystackLines[:matchStart], "") + replacement + strings.Join(haystackLines[matchEnd:], "")
+ // _, err := format.Source([]byte(replaced))
+ // if err != nil {
+ // return nil, false
+ // }
+
+ // OK, declare this very fuzzy match to be our new needle.
+ needle = strings.Join(haystackLines[matchStart:matchEnd], "")
+ spec, count := Unique(haystack, needle, replace)
+ if count != 1 {
+ return nil, false
+ }
+ return spec, true
+}
+
+// UniqueTrim is Unique, but with flexibility to shrink old/replace in tandem.
+func UniqueTrim(haystack, needle, replace string) (*Spec, bool) {
+ // LLMs appear to particularly struggle with the first line of a patch.
+ // If that first line is replicated in replace,
+ // and removing it yields a unique match,
+ // we can remove that line entirely from both.
+ n0, nRest, nOK := strings.Cut(needle, "\n")
+ r0, rRest, rOK := strings.Cut(replace, "\n")
+ if !nOK || !rOK || n0 != r0 {
+ return nil, false
+ }
+ spec, count := Unique(haystack, nRest, rRest)
+ if count != 1 {
+ return nil, false
+ }
+ return spec, true
+}
+
+// uniqueTrimmedLineMatch returns the index of the first line in haystack that matches needle,
+// when ignoring leading and trailing whitespace.
+// uniqueTrimmedLineMatch returns -1 if there is no unique match.
+func uniqueTrimmedLineMatch(haystackLines, needleLines []string) int {
+ // TODO: optimize
+ trimmedHaystackLines := trimSpaceAll(haystackLines)
+ trimmedNeedleLines := trimSpaceAll(needleLines)
+ match := -1
+ for i := range trimmedHaystackLines {
+ rest := trimmedHaystackLines[i:]
+ if len(rest) < len(trimmedNeedleLines) {
+ break
+ }
+ rest = rest[:len(trimmedNeedleLines)]
+ if !slices.Equal(rest, trimmedNeedleLines) {
+ continue
+ }
+ if match != -1 {
+ return -1 // multiple matches
+ }
+ match = i
+ }
+ return match
+}
+
+func trimSpaceAll(x []string) []string {
+ trimmed := make([]string, len(x))
+ for i, s := range x {
+ trimmed[i] = strings.TrimSpace(s)
+ }
+ return trimmed
+}
+
+// improveNeedle adjusts both needle and replacement in tandem to better match haystack.
+// Note that we adjust search and replace together.
+func improveNeedle(haystack string, needle, replacement string, matchLine int) (string, string) {
+ // TODO: we make new slices too much
+ needleLines := slices.Collect(strings.Lines(needle))
+ if len(needleLines) == 0 {
+ return needle, replacement
+ }
+ haystackLines := slices.Collect(strings.Lines(haystack))
+ if matchLine+len(needleLines) > len(haystackLines) {
+ // should be impossible, but just in case
+ return needle, replacement
+ }
+ // Add trailing last-line newline if needed to better match haystack.
+ if !strings.HasSuffix(needle, "\n") && strings.HasSuffix(haystackLines[matchLine+len(needleLines)-1], "\n") {
+ needle += "\n"
+ replacement += "\n"
+ }
+ // Add leading first-line prefix if needed to better match haystack.
+ rest, ok := strings.CutSuffix(haystackLines[matchLine], needleLines[0])
+ if ok {
+ needle = rest + needle
+ replacement = rest + replacement
+ }
+ return needle, replacement
+}
+
+func isNonSpace(r rune) bool {
+ return !unicode.IsSpace(r)
+}
+
+func whitespacePrefix(s string) string {
+ firstNonSpace := strings.IndexFunc(s, isNonSpace)
+ return s[:max(0, firstNonSpace)] // map -1 for "not found" onto 0
+}
+
+// commonWhitespacePrefix returns the longest common whitespace prefix of the elements of x, somewhat flexibly.
+func commonWhitespacePrefix(x []string) string {
+ var pre string
+ for i, s := range x {
+ if i == 0 {
+ pre = s
+ continue
+ }
+ // ignore line endings for the moment
+ // (this is just for prefixes)
+ s = strings.TrimRight(s, "\n\r")
+ if s == "" {
+ continue
+ }
+ n := commonPrefixLen(pre, whitespacePrefix(s))
+ if n == 0 {
+ return ""
+ }
+ pre = pre[:n]
+ }
+ pre = strings.TrimRightFunc(pre, isNonSpace)
+ return pre
+}
+
+// commonPrefixLen returns the length of the common prefix of two strings.
+// TODO: optimize, see e.g. https://go-review.googlesource.com/c/go/+/408116
+func commonPrefixLen(a, b string) int {
+ shortest := min(len(a), len(b))
+ for i := range shortest {
+ if a[i] != b[i] {
+ return i
+ }
+ }
+ return shortest
+}
+
+// commonSuffixLen returns the length of the common suffix of two strings.
+// TODO: optimize
+func commonSuffixLen(a, b string) int {
+ shortest := min(len(a), len(b))
+ for i := 0; i < shortest; i++ {
+ if a[len(a)-i-1] != b[len(b)-i-1] {
+ return i
+ }
+ }
+ return shortest
+}
diff --git a/claudetool/shared.go b/claudetool/shared.go
new file mode 100644
index 0000000..83048b9
--- /dev/null
+++ b/claudetool/shared.go
@@ -0,0 +1,72 @@
+// Package claudetool provides tools for Claude AI models.
+//
+// When adding, removing, or modifying tools in this package,
+// remember to update the tool display template in termui/termui.go
+// to ensure proper tool output formatting.
+package claudetool
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "log/slog"
+ "net/http"
+ "os"
+ "time"
+)
+
+type workingDirCtxKeyType string
+
+const workingDirCtxKey workingDirCtxKeyType = "workingDir"
+
+func WithWorkingDir(ctx context.Context, wd string) context.Context {
+ return context.WithValue(ctx, workingDirCtxKey, wd)
+}
+
+func WorkingDir(ctx context.Context) string {
+ // If cmd.Dir is empty, it uses the current working directory,
+ // so we can use that as a fallback.
+ wd, _ := ctx.Value(workingDirCtxKey).(string)
+ return wd
+}
+
+// sendTelemetry posts debug data to an internal logging server.
+// It is meant for use by people developing sketch and is disabled by default.
+// This is a best-effort operation; errors are logged but not returned.
+func sendTelemetry(ctx context.Context, typ string, data any) {
+ telemetryEndpoint := os.Getenv("SKETCH_TELEMETRY_ENDPOINT")
+ if telemetryEndpoint == "" {
+ return
+ }
+ err := doPostTelemetry(ctx, telemetryEndpoint, typ, data)
+ if err != nil {
+ slog.DebugContext(ctx, "failed to send JSON to server", "type", typ, "error", err)
+ }
+}
+
+func doPostTelemetry(ctx context.Context, telemetryEndpoint, typ string, data any) error {
+ jsonData, err := json.Marshal(data)
+ if err != nil {
+ return fmt.Errorf("failed to marshal %#v as JSON: %w", data, err)
+ }
+ timestamp := time.Now().Unix()
+ url := fmt.Sprintf(telemetryEndpoint+"/%s_%d.json", typ, timestamp)
+ req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonData))
+ if err != nil {
+ return fmt.Errorf("failed to create HTTP request for %s: %w", typ, err)
+ }
+ req.Header.Set("Content-Type", "application/json")
+
+ resp, err := http.DefaultClient.Do(req)
+ if err != nil {
+ return fmt.Errorf("failed to send %s JSON to server: %w", typ, err)
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode/100 != 2 {
+ return fmt.Errorf("server returned non-success status for %s: %d", typ, resp.StatusCode)
+ }
+ slog.DebugContext(ctx, "successfully sent JSON to server", "file_type", typ, "url", url)
+ return nil
+}
diff --git a/claudetool/think.go b/claudetool/think.go
new file mode 100644
index 0000000..293cc0b
--- /dev/null
+++ b/claudetool/think.go
@@ -0,0 +1,39 @@
+package claudetool
+
+import (
+ "context"
+ "encoding/json"
+
+ "sketch.dev/ant"
+)
+
+// The Think tool provides space to think.
+var Think = &ant.Tool{
+ Name: thinkName,
+ Description: thinkDescription,
+ InputSchema: ant.MustSchema(thinkInputSchema),
+ Run: thinkRun,
+}
+
+const (
+ thinkName = "think"
+ thinkDescription = `Think out loud, take notes, form plans. Has no external effects.`
+
+ // If you modify this, update the termui template for prettier rendering.
+ thinkInputSchema = `
+{
+ "type": "object",
+ "required": ["thoughts"],
+ "properties": {
+ "thoughts": {
+ "type": "string",
+ "description": "The thoughts, notes, or plans to record"
+ }
+ }
+}
+`
+)
+
+func thinkRun(ctx context.Context, m json.RawMessage) (string, error) {
+ return "recorded", nil
+}
diff --git a/cmd/sketch/main.go b/cmd/sketch/main.go
new file mode 100644
index 0000000..6ef8049
--- /dev/null
+++ b/cmd/sketch/main.go
@@ -0,0 +1,358 @@
+package main
+
+import (
+ "bytes"
+ "context"
+ "flag"
+ "fmt"
+ "io"
+ "log/slog"
+ "math/rand/v2"
+ "net"
+ "net/http"
+ "os"
+ "os/exec"
+ "runtime"
+ "runtime/debug"
+ "strings"
+
+ "github.com/richardlehane/crock32"
+ "sketch.dev/ant"
+ "sketch.dev/dockerimg"
+ "sketch.dev/httprr"
+ "sketch.dev/loop"
+ "sketch.dev/loop/server"
+ "sketch.dev/skabandclient"
+ "sketch.dev/skribe"
+ "sketch.dev/termui"
+)
+
+func main() {
+ err := run()
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "%v: %v\n", os.Args[0], err)
+ os.Exit(1)
+ }
+}
+
+func run() error {
+ addr := flag.String("addr", "localhost:0", "local debug HTTP server address")
+ skabandAddr := flag.String("skaband-addr", "https://sketch.dev", "URL of the skaband server")
+ unsafe := flag.Bool("unsafe", false, "run directly without a docker container")
+ openBrowser := flag.Bool("open", false, "open sketch URL in system browser")
+ httprrFile := flag.String("httprr", "", "if set, record HTTP interactions to file")
+ record := flag.Bool("httprecord", true, "Record trace (if httprr is set)")
+ maxIterations := flag.Uint64("max-iterations", 0, "maximum number of iterations the agent should perform per turn, 0 to disable limit")
+ maxWallTime := flag.Duration("max-wall-time", 0, "maximum time the agent should run per turn, 0 to disable limit")
+ maxDollars := flag.Float64("max-dollars", 5.0, "maximum dollars the agent should spend per turn, 0 to disable limit")
+ one := flag.Bool("one", false, "run a single iteration and exit without termui")
+ sessionID := flag.String("session-id", newSessionID(), "unique session-id for a sketch process")
+ gitUsername := flag.String("git-username", "", "username for git commits")
+ gitEmail := flag.String("git-email", "", "email for git commits")
+ verbose := flag.Bool("verbose", false, "enable verbose output")
+ version := flag.Bool("version", false, "print the version and exit")
+ noCleanup := flag.Bool("nocleanup", false, "do not clean up docker containers on exit")
+ containerLogDest := flag.String("save-container-logs", "", "host path to save container logs to on exit")
+ sketchBinaryLinux := flag.String("sketch-binary-linux", "", "path to a pre-built sketch binary for linux")
+ workingDir := flag.String("C", "", "when set, change to this directory before running")
+ flag.Parse()
+
+ if *version {
+ bi, ok := debug.ReadBuildInfo()
+ if ok {
+ fmt.Printf("%s@%v\n", bi.Path, bi.Main.Version)
+ }
+ return nil
+ }
+
+ firstMessage := flag.Args()
+
+ // Add a global "session_id" to all logs using this context.
+ // A "session" is a single full run of the agent.
+ ctx := skribe.ContextWithAttr(context.Background(), slog.String("session_id", *sessionID))
+
+ var slogHandler slog.Handler
+ var err error
+ var logFile *os.File
+ if !*one {
+ // Log to a file
+ logFile, err = os.CreateTemp("", "sketch-cli-log-*")
+ if err != nil {
+ return fmt.Errorf("cannot create log file: %v", err)
+ }
+ fmt.Printf("structured logs: %v\n", logFile.Name())
+ defer logFile.Close()
+ slogHandler = slog.NewJSONHandler(logFile, &slog.HandlerOptions{Level: slog.LevelDebug})
+ slogHandler = skribe.AttrsWrap(slogHandler)
+ } else {
+ // Log straight to stdout, no task_id
+ // TODO: verbosity controls?
+ slogHandler = slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: slog.LevelDebug})
+ // TODO: we skipped "AttrsWrap" here because it adds a bunch of line noise. do we want it anyway?
+ }
+ slog.SetDefault(slog.New(slogHandler))
+
+ if *workingDir != "" {
+ if err := os.Chdir(*workingDir); err != nil {
+ return fmt.Errorf("sketch: cannot change directory to %q: %v", *workingDir, err)
+ }
+ }
+
+ if *gitUsername == "" {
+ *gitUsername = defaultGitUsername()
+ }
+ if *gitEmail == "" {
+ *gitEmail = defaultGitEmail()
+ }
+
+ inDocker := false
+ if _, err := os.Stat("/.dockerenv"); err == nil {
+ inDocker = true
+ }
+
+ if !inDocker {
+ msgs, err := hostReqsCheck()
+ if *verbose {
+ fmt.Println("Host requirement checks:")
+ for _, m := range msgs {
+ fmt.Println(m)
+ }
+ }
+ if err != nil {
+ return err
+ }
+ }
+
+ if *one && len(firstMessage) == 0 {
+ return fmt.Errorf("-one flag requires a message to send to the agent")
+ }
+
+ var pubKey, antURL, apiKey string
+ if *skabandAddr == "" {
+ apiKey = os.Getenv("ANTHROPIC_API_KEY")
+ if apiKey == "" {
+ return fmt.Errorf("ANTHROPIC_API_KEY environment variable is not set")
+ }
+ } else {
+ if inDocker {
+ apiKey = os.Getenv("ANTHROPIC_API_KEY")
+ pubKey = os.Getenv("SKETCH_PUB_KEY")
+ antURL, err = skabandclient.LocalhostToDockerInternal(os.Getenv("ANT_URL"))
+ if err != nil {
+ return err
+ }
+ } else {
+ privKey, err := skabandclient.LoadOrCreatePrivateKey(skabandclient.DefaultKeyPath())
+ if err != nil {
+ return err
+ }
+ pubKey, antURL, apiKey, err = skabandclient.Login(os.Stdout, privKey, *skabandAddr, *sessionID)
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ if !*unsafe {
+ cwd, err := os.Getwd()
+ if err != nil {
+ return fmt.Errorf("sketch: cannot determine current working directory: %v", err)
+ }
+ // TODO: this is a bit of a mess.
+ // The "stdout" and "stderr" used here are "just" for verbose logs from LaunchContainer.
+ // LaunchContainer has to attach the termui, and does that directly to os.Stdout/os.Stderr
+ // regardless of what is attached here.
+ // This is probably wrong. Instead of having a big "if verbose" switch here, the verbosity
+ // switches should be inside LaunchContainer and os.Stdout/os.Stderr should be passed in
+ // here (with the parameters being kept for future testing).
+ var stdout, stderr io.Writer
+ var outbuf, errbuf *bytes.Buffer
+ if *verbose {
+ stdout, stderr = os.Stdout, os.Stderr
+ } else {
+ outbuf, errbuf = &bytes.Buffer{}, &bytes.Buffer{}
+ stdout, stderr = outbuf, errbuf
+ }
+ fmt.Printf("launching container...\n")
+ config := dockerimg.ContainerConfig{
+ SessionID: *sessionID,
+ LocalAddr: *addr,
+ SkabandAddr: *skabandAddr,
+ AntURL: antURL,
+ AntAPIKey: apiKey,
+ Path: cwd,
+ GitUsername: *gitUsername,
+ GitEmail: *gitEmail,
+ OpenBrowser: *openBrowser,
+ NoCleanup: *noCleanup,
+ ContainerLogDest: *containerLogDest,
+ SketchBinaryLinux: *sketchBinaryLinux,
+ SketchPubKey: pubKey,
+ ForceRebuild: false,
+ }
+ if err := dockerimg.LaunchContainer(ctx, stdout, stderr, config); err != nil {
+ if *verbose {
+ fmt.Fprintf(os.Stderr, "dockerimg.LaunchContainer failed: %v\ndockerimg.LaunchContainer stderr:\n%s\ndockerimg.LaunchContainer stdout:\n%s\n", err, errbuf.String(), outbuf.String())
+ }
+ return err
+ }
+ return nil
+ }
+
+ var client *http.Client
+ if *httprrFile != "" {
+ var err error
+ var rr *httprr.RecordReplay
+ if *record {
+ rr, err = httprr.OpenForRecording(*httprrFile, http.DefaultTransport)
+ } else {
+ rr, err = httprr.Open(*httprrFile, http.DefaultTransport)
+ }
+ if err != nil {
+ return fmt.Errorf("httprr: %v", err)
+ }
+ // Scrub API keys from requests for security
+ rr.ScrubReq(func(req *http.Request) error {
+ req.Header.Del("x-api-key")
+ req.Header.Del("anthropic-api-key")
+ return nil
+ })
+ client = rr.Client()
+ }
+ wd, err := os.Getwd()
+ if err != nil {
+ return err
+ }
+
+ agentConfig := loop.AgentConfig{
+ Context: ctx,
+ AntURL: antURL,
+ APIKey: apiKey,
+ HTTPC: client,
+ Budget: ant.Budget{MaxResponses: *maxIterations, MaxWallTime: *maxWallTime, MaxDollars: *maxDollars},
+ GitUsername: *gitUsername,
+ GitEmail: *gitEmail,
+ SessionID: *sessionID,
+ ClientGOOS: runtime.GOOS,
+ ClientGOARCH: runtime.GOARCH,
+ UseAnthropicEdit: os.Getenv("SKETCH_ANTHROPIC_EDIT") == "1",
+ }
+ agent := loop.NewAgent(agentConfig)
+
+ srv, err := server.New(agent, logFile)
+ if err != nil {
+ return err
+ }
+
+ if !inDocker {
+ ini := loop.AgentInit{
+ WorkingDir: wd,
+ }
+ if err = agent.Init(ini); err != nil {
+ return fmt.Errorf("failed to initialize agent: %v", err)
+ }
+ }
+
+ // Start the agent
+ go agent.Loop(ctx)
+
+ // Start the local HTTP server.
+ ln, err := net.Listen("tcp", *addr)
+ if err != nil {
+ return fmt.Errorf("cannot create debug server listener: %v", err)
+ }
+ go (&http.Server{Handler: srv}).Serve(ln)
+ var ps1URL string
+ if *skabandAddr != "" {
+ ps1URL = fmt.Sprintf("%s/s/%s", *skabandAddr, *sessionID)
+ } else if !inDocker {
+ // Do not tell users about the port inside the container, let the
+ // process running on the host report this.
+ ps1URL = fmt.Sprintf("http://%s", ln.Addr())
+ }
+
+ if len(firstMessage) > 0 {
+ agent.UserMessage(ctx, strings.Join(firstMessage, " "))
+ }
+
+ if inDocker {
+ <-agent.Ready()
+ if ps1URL == "" {
+ ps1URL = agent.URL()
+ }
+ }
+
+ // Open the debug URL in the system browser if requested
+ if *openBrowser {
+ dockerimg.OpenBrowser(ctx, ps1URL)
+ }
+
+ // Create the termui instance
+ s := termui.New(agent, ps1URL)
+ defer func() {
+ r := recover()
+ if err := s.RestoreOldState(); err != nil {
+ fmt.Fprintf(os.Stderr, "couldn't restore old terminal state: %s\n", err)
+ }
+ if r != nil {
+ panic(r)
+ }
+ }()
+
+ // Start skaband connection loop if needed
+ if *skabandAddr != "" {
+ connectFn := func(connected bool) {
+ if connected {
+ s.AppendSystemMessage("skaband connected")
+ } else {
+ s.AppendSystemMessage("skaband disconnected")
+ }
+ }
+ go skabandclient.DialAndServeLoop(ctx, *skabandAddr, *sessionID, pubKey, srv, connectFn)
+ }
+
+ if *one {
+ for {
+ m := agent.WaitForMessage(ctx)
+ if m.Content != "" {
+ fmt.Printf("š¬ %s %s: %s\n", m.Timestamp.Format("15:04:05"), m.Type, m.Content)
+ }
+ if m.EndOfTurn && m.ParentConversationID == nil {
+ fmt.Printf("Total cost: $%0.2f\n", agent.TotalUsage().TotalCostUSD)
+ return nil
+ }
+ }
+ }
+
+ if err := s.Run(ctx); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// newSessionID generates a new 10-byte random Session ID.
+func newSessionID() string {
+ u1, u2 := rand.Uint64(), rand.Uint64N(1<<16)
+ s := crock32.Encode(u1) + crock32.Encode(uint64(u2))
+ if len(s) < 16 {
+ s += strings.Repeat("0", 16-len(s))
+ }
+ return s[0:4] + "-" + s[4:8] + "-" + s[8:12] + "-" + s[12:16]
+}
+
+func defaultGitUsername() string {
+ out, err := exec.Command("git", "config", "user.name").CombinedOutput()
+ if err != nil {
+ return "Sketchš“ļø" // TODO: what should this be?
+ }
+ return strings.TrimSpace(string(out))
+}
+
+func defaultGitEmail() string {
+ out, err := exec.Command("git", "config", "user.email").CombinedOutput()
+ if err != nil {
+ return "skallywag@sketch.dev" // TODO: what should this be?
+ }
+ return strings.TrimSpace(string(out))
+}
diff --git a/cmd/sketch/reqchecks.go b/cmd/sketch/reqchecks.go
new file mode 100644
index 0000000..2a1210f
--- /dev/null
+++ b/cmd/sketch/reqchecks.go
@@ -0,0 +1,76 @@
+package main
+
+import (
+ "fmt"
+ "os/exec"
+ "strings"
+ "sync"
+
+ "golang.org/x/sync/errgroup"
+)
+
+// verify that the following are installed on the host: docker colima npm
+// TODO: check versions
+
+func checkDocker() (string, error) {
+ path, err := exec.LookPath("docker")
+ if err != nil {
+ return "", fmt.Errorf("cannot find `docker` binary; run: brew install docker colima && colima start")
+ }
+ cmd := exec.Command(path, "-v")
+ output, err := cmd.CombinedOutput()
+ if err != nil {
+ return "", fmt.Errorf("docker version check failed: %w\n%s\n", err, string(output))
+ }
+ return fmt.Sprintf("%s %s", path, strings.TrimSpace(string(output))), nil
+}
+
+func checkColima() (string, error) {
+ path, err := exec.LookPath("colima")
+ if err != nil {
+ return "", fmt.Errorf("cannot find `colima` binary; run: brew install docker colima && colima start")
+ }
+ cmd := exec.Command(path, "version")
+ output, err := cmd.CombinedOutput()
+ if err != nil {
+ return "", fmt.Errorf("colima version check failed: %w\n%s\n", err, string(output))
+ }
+ return fmt.Sprintf("%s %s", path, strings.TrimSpace(string(output))), nil
+}
+
+func checkNPM() (string, error) {
+ path, err := exec.LookPath("npm")
+ if err != nil {
+ return "", fmt.Errorf("cannot find `npm` binary; run: brew install npm")
+ }
+ cmd := exec.Command(path, "-version")
+ output, err := cmd.CombinedOutput()
+ if err != nil {
+ return "", fmt.Errorf("npm version check failed: %w\n%s\n", err, string(output))
+ }
+ return fmt.Sprintf("%s %s", path, strings.TrimSpace(string(output))), nil
+}
+
+type reqCheckFunc func() (string, error)
+
+func hostReqsCheck() ([]string, error) {
+ var mu sync.Mutex
+ ret := []string{}
+ eg := errgroup.Group{}
+ cfs := []reqCheckFunc{checkDocker, checkColima, checkNPM}
+ eg.SetLimit(len(cfs))
+ for _, f := range cfs {
+ eg.Go(func() error {
+ msg, err := f()
+ mu.Lock()
+ defer mu.Unlock()
+ if err == nil {
+ ret = append(ret, msg)
+ } else {
+ ret = append(ret, err.Error())
+ }
+ return err
+ })
+ }
+ return ret, eg.Wait()
+}
diff --git a/dockerimg/createdockerfile.go b/dockerimg/createdockerfile.go
new file mode 100644
index 0000000..e9e01b6
--- /dev/null
+++ b/dockerimg/createdockerfile.go
@@ -0,0 +1,240 @@
+package dockerimg
+
+import (
+ "bytes"
+ "context"
+ "crypto/sha256"
+ "encoding/hex"
+ "encoding/json"
+ "fmt"
+ "io/fs"
+ "maps"
+ "net/http"
+ "slices"
+ "strings"
+ "text/template"
+
+ "sketch.dev/ant"
+)
+
+func hashInitFiles(initFiles map[string]string) string {
+ h := sha256.New()
+ for _, path := range slices.Sorted(maps.Keys(initFiles)) {
+ fmt.Fprintf(h, "%s\n%s\n\n", path, initFiles[path])
+ }
+ return hex.EncodeToString(h.Sum(nil))
+}
+
+// createDockerfile creates a Dockerfile for a git repo.
+// It expects the relevant initFiles to have been provided.
+// If the sketch binary is being executed in a sub-directory of the repository,
+// the relative path is provided on subPathWorkingDir.
+func createDockerfile(ctx context.Context, httpc *http.Client, antURL, antAPIKey string, initFiles map[string]string, subPathWorkingDir string) (string, error) {
+ if subPathWorkingDir == "." {
+ subPathWorkingDir = ""
+ } else if subPathWorkingDir != "" && subPathWorkingDir[0] != '/' {
+ subPathWorkingDir = "/" + subPathWorkingDir
+ }
+ toolCalled := false
+ var dockerfileFROM, dockerfileExtraCmds string
+ runDockerfile := func(ctx context.Context, input json.RawMessage) (string, error) {
+ // TODO: unmarshal straight into a struct
+ var m map[string]any
+ if err := json.Unmarshal(input, &m); err != nil {
+ return "", fmt.Errorf(`input=%[1]v (%[1]T), wanted a map[string]any, got: %w`, input, err)
+ }
+ var ok bool
+ dockerfileFROM, ok = m["from"].(string)
+ if !ok {
+ return "", fmt.Errorf(`input["from"]=%[1]v (%[1]T), wanted a string`, m["path"])
+ }
+ dockerfileExtraCmds, ok = m["extra_cmds"].(string)
+ if !ok {
+ return "", fmt.Errorf(`input["extra_cmds"]=%[1]v (%[1]T), wanted a string`, m["path"])
+ }
+ toolCalled = true
+ return "OK", nil
+ }
+ convo := ant.NewConvo(ctx, antAPIKey)
+ if httpc != nil {
+ convo.HTTPC = httpc
+ }
+ if antURL != "" {
+ convo.URL = antURL
+ }
+ convo.Tools = []*ant.Tool{{
+ Name: "dockerfile",
+ Description: "Helps define a Dockerfile that sets up a dev environment for this project.",
+ Run: runDockerfile,
+ InputSchema: ant.MustSchema(`{
+ "type": "object",
+ "required": ["from", "extra_cmds"],
+ "properties": {
+ "from": {
+ "type": "string",
+ "description": "The alpine base image provided to the dockerfile FROM command"
+ },
+ "extra_cmds": {
+ "type": "string",
+ "description": "Extra commands to add to the dockerfile."
+ }
+ }
+}`),
+ }}
+
+ // TODO: add semgrep, prettier -- they require node/npm/etc which is more complicated than apk
+ // If/when we do this, add them into the list of available tools in bash.go.
+ const dockerfileBase = `FROM {{.From}}
+
+RUN apk add bash git make jq sqlite gcc musl-dev linux-headers npm nodejs go github-cli ripgrep fzf
+
+ENV GOTOOLCHAIN=auto
+ENV GOPATH=/go
+ENV PATH="$GOPATH/bin:$PATH"
+
+RUN go install golang.org/x/tools/cmd/goimports@latest
+RUN go install golang.org/x/tools/gopls@latest
+RUN go install mvdan.cc/gofumpt@latest
+
+{{.ExtraCmds}}
+
+ARG GIT_USER_EMAIL
+ARG GIT_USER_NAME
+
+RUN git config --global user.email "$GIT_USER_EMAIL" && \
+ git config --global user.name "$GIT_USER_NAME"
+
+LABEL sketch_context="{{.InitFilesHash}}"
+COPY . /app
+
+WORKDIR /app{{.SubDir}}
+RUN if [ -f go.mod ]; then go mod download; fi
+
+CMD ["/bin/sketch"]`
+
+ // TODO: it's basically impossible to one-shot a python env. We need an agent loop for that.
+ // Right now the prompt contains a set of half-baked workarounds.
+
+ // If you want to edit the model prompt, run:
+ //
+ // go test ./sketch/dockerimg -httprecord ".*" -rewritewant
+ //
+ // Then look at the changes with:
+ //
+ // git diff sketch/dockerimg/testdata/*.dockerfile
+ //
+ // If the dockerfile changes are a strict improvement, commit all the changes.
+ msg := ant.Message{
+ Role: ant.MessageRoleUser,
+ Content: []ant.Content{{
+ Type: ant.ContentTypeText,
+ Text: `
+Call the dockerfile tool to create a Dockerfile.
+The parameters to dockerfile fill out the From and ExtraCmds
+template variables in the following Go template:
+
+` + "```\n" + dockerfileBase + "\n```" + `
+
+In particular:
+- Assume it is primarily a Go project. For a minimal env, prefer 1.24.2-alpine3.21 as a base image.
+- If any python is needed at all, switch to using a python alpine image as a the base and apk add go.
+ Favor using uv, and use one of these base images, depending on the preferred python version:
+ ghcr.io/astral-sh/uv:python3.13-alpine
+ ghcr.io/astral-sh/uv:python3.12-alpine
+ ghcr.io/astral-sh/uv:python3.11-alpine
+- When using pip to install packages, use: uv pip install --system.
+- Python env setup is challenging and often no required, so any RUN commands involving python tooling should be written to let docker build continue if there is a failure.
+- Include any tools particular to this repository that can be inferred from the given context.
+- Append || true to any apk add commands in case the package does not exist.
+- Do not expose any ports.
+`,
+ }},
+ }
+ if len(initFiles) > 0 {
+ msg.Content[0].Text += "Here is the content of several files from the repository that may be relevant:\n\n"
+ }
+
+ for _, name := range slices.Sorted(maps.Keys(initFiles)) {
+ msg.Content = append(msg.Content, ant.Content{
+ Type: ant.ContentTypeText,
+ Text: fmt.Sprintf("Here is the contents %s:\n<file>\n%s\n</file>\n\n", name, initFiles[name]),
+ })
+ }
+ msg.Content = append(msg.Content, ant.Content{
+ Type: ant.ContentTypeText,
+ Text: "Now call the dockerfile tool.",
+ })
+ res, err := convo.SendMessage(msg)
+ if err != nil {
+ return "", err
+ }
+ if res.StopReason != ant.StopReasonToolUse {
+ return "", fmt.Errorf("expected stop reason %q, got %q", ant.StopReasonToolUse, res.StopReason)
+ }
+ if _, err := convo.ToolResultContents(context.TODO(), res); err != nil {
+ return "", err
+ }
+ if !toolCalled {
+ return "", fmt.Errorf("no dockerfile returned")
+ }
+
+ buf := new(bytes.Buffer)
+ err = template.Must(template.New("dockerfile").Parse(dockerfileBase)).Execute(buf, map[string]string{
+ "From": dockerfileFROM,
+ "ExtraCmds": dockerfileExtraCmds,
+ "InitFilesHash": hashInitFiles(initFiles),
+ "SubDir": subPathWorkingDir,
+ })
+ if err != nil {
+ return "", fmt.Errorf("dockerfile template failed: %w", err)
+ }
+
+ return buf.String(), nil
+}
+
+// For future reference: we can find the current git branch/checkout with: git symbolic-ref -q --short HEAD || git describe --tags --exact-match 2>/dev/null || git rev-parse HEAD
+
+func readInitFiles(fsys fs.FS) (map[string]string, error) {
+ result := make(map[string]string)
+
+ err := fs.WalkDir(fsys, ".", func(path string, d fs.DirEntry, err error) error {
+ if err != nil {
+ return err
+ }
+ if d.IsDir() && (d.Name() == ".git" || d.Name() == "node_modules") {
+ return fs.SkipDir
+ }
+ if !d.Type().IsRegular() {
+ return nil
+ }
+
+ // Case 1: Check for README files
+ // TODO: find README files between the .git root (where we start)
+ // and the dir that sketch was initialized. This needs more info
+ // plumbed to this function.
+ if strings.HasPrefix(strings.ToLower(path), "readme") {
+ content, err := fs.ReadFile(fsys, path)
+ if err != nil {
+ return err
+ }
+ result[path] = string(content)
+ return nil
+ }
+
+ // Case 2: Check for GitHub workflow files
+ if strings.HasPrefix(path, ".github/workflows/") {
+ content, err := fs.ReadFile(fsys, path)
+ if err != nil {
+ return err
+ }
+ result[path] = string(content)
+ return nil
+ }
+
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ return result, nil
+}
diff --git a/dockerimg/dockerimg.go b/dockerimg/dockerimg.go
new file mode 100644
index 0000000..72110da
--- /dev/null
+++ b/dockerimg/dockerimg.go
@@ -0,0 +1,626 @@
+// Package dockerimg
+package dockerimg
+
+import (
+ "bytes"
+ "context"
+ "crypto/sha256"
+ "encoding/hex"
+ "encoding/json"
+ "fmt"
+ "io"
+ "log/slog"
+ "net"
+ "net/http"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "time"
+
+ "sketch.dev/skribe"
+)
+
+// ContainerConfig holds all configuration for launching a container
+type ContainerConfig struct {
+ // SessionID is the unique identifier for this session
+ SessionID string
+
+ // LocalAddr is the initial address to use (though it may be overwritten later)
+ LocalAddr string
+
+ // SkabandAddr is the address of the skaband service if available
+ SkabandAddr string
+
+ // AntURL is the URL of the LLM service.
+ AntURL string
+
+ // AntAPIKey is the API key for LLM service.
+ AntAPIKey string
+
+ // Path is the local filesystem path to use
+ Path string
+
+ // GitUsername is the username to use for git operations
+ GitUsername string
+
+ // GitEmail is the email to use for git operations
+ GitEmail string
+
+ // OpenBrowser determines whether to open a browser automatically
+ OpenBrowser bool
+
+ // NoCleanup prevents container cleanup when set to true
+ NoCleanup bool
+
+ // ForceRebuild forces rebuilding of the Docker image even if it exists
+ ForceRebuild bool
+
+ // Host directory to copy container logs into, if not set to ""
+ ContainerLogDest string
+
+ // Path to pre-built linux sketch binary, or build a new one if set to ""
+ SketchBinaryLinux string
+
+ // Sketch client public key.
+ SketchPubKey string
+}
+
+// LaunchContainer creates a docker container for a project, installs sketch and opens a connection to it.
+// It writes status to stdout.
+func LaunchContainer(ctx context.Context, stdout, stderr io.Writer, config ContainerConfig) error {
+ if _, err := exec.LookPath("docker"); err != nil {
+ return fmt.Errorf("cannot find `docker` binary; run: brew install docker colima && colima start")
+ }
+
+ if out, err := combinedOutput(ctx, "docker", "ps"); err != nil {
+ // `docker ps` provides a good error message here that can be
+ // easily chatgpt'ed by users, so send it to the user as-is:
+ // Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?
+ return fmt.Errorf("docker ps: %s (%w)", out, err)
+ }
+
+ _, hostPort, err := net.SplitHostPort(config.LocalAddr)
+ if err != nil {
+ return err
+ }
+
+ gitRoot, err := findGitRoot(ctx, config.Path)
+ if err != nil {
+ return err
+ }
+
+ imgName, err := findOrBuildDockerImage(ctx, stdout, stderr, config.Path, gitRoot, config.AntURL, config.AntAPIKey, config.ForceRebuild)
+ if err != nil {
+ return err
+ }
+
+ linuxSketchBin := config.SketchBinaryLinux
+ if linuxSketchBin == "" {
+ linuxSketchBin, err = buildLinuxSketchBin(ctx, config.Path)
+ if err != nil {
+ return err
+ }
+ defer os.Remove(linuxSketchBin)
+ }
+
+ cntrName := imgName + "-" + config.SessionID
+ defer func() {
+ if config.NoCleanup {
+ return
+ }
+ if out, err := combinedOutput(ctx, "docker", "kill", cntrName); err != nil {
+ // TODO: print in verbose mode? fmt.Fprintf(os.Stderr, "docker kill: %s: %v\n", out, err)
+ _ = out
+ }
+ if out, err := combinedOutput(ctx, "docker", "rm", cntrName); err != nil {
+ // TODO: print in verbose mode? fmt.Fprintf(os.Stderr, "docker kill: %s: %v\n", out, err)
+ _ = out
+ }
+ }()
+
+ // errCh receives errors from operations that this function calls in separate goroutines.
+ errCh := make(chan error)
+
+ // Start the git server
+ gitSrv, err := newGitServer(gitRoot)
+ if err != nil {
+ return fmt.Errorf("failed to start git server: %w", err)
+ }
+ defer gitSrv.shutdown(ctx)
+
+ go func() {
+ errCh <- gitSrv.serve(ctx)
+ }()
+
+ // Get the current host git commit
+ var commit string
+ if out, err := combinedOutput(ctx, "git", "rev-parse", "HEAD"); err != nil {
+ return fmt.Errorf("git rev-parse HEAD: %w", err)
+ } else {
+ commit = strings.TrimSpace(string(out))
+ }
+ if out, err := combinedOutput(ctx, "git", "config", "http.receivepack", "true"); err != nil {
+ return fmt.Errorf("git config http.receivepack true: %s: %w", out, err)
+ }
+
+ relPath, err := filepath.Rel(gitRoot, config.Path)
+ if err != nil {
+ return err
+ }
+
+ // Create the sketch container
+ if err := createDockerContainer(ctx, cntrName, hostPort, relPath, imgName, config); err != nil {
+ return err
+ }
+
+ // Copy the sketch linux binary into the container
+ if out, err := combinedOutput(ctx, "docker", "cp", linuxSketchBin, cntrName+":/bin/sketch"); err != nil {
+ return fmt.Errorf("docker cp: %s, %w", out, err)
+ }
+
+ fmt.Printf("starting container %s\ncommits made by the agent will be pushed to \033[1msketch/*\033[0m\n", cntrName)
+
+ // Start the sketch container
+ if out, err := combinedOutput(ctx, "docker", "start", cntrName); err != nil {
+ return fmt.Errorf("docker start: %s, %w", out, err)
+ }
+
+ // Copies structured logs from the container to the host.
+ copyLogs := func() {
+ if config.ContainerLogDest == "" {
+ return
+ }
+ out, err := combinedOutput(ctx, "docker", "logs", cntrName)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "docker logs failed: %v\n", err)
+ return
+ }
+ logLines := strings.Split(string(out), "\n")
+ for _, logLine := range logLines {
+ if !strings.HasPrefix(logLine, "structured logs:") {
+ continue
+ }
+ logFile := strings.TrimSpace(strings.TrimPrefix(logLine, "structured logs:"))
+ srcPath := fmt.Sprintf("%s:%s", cntrName, logFile)
+ logFileName := filepath.Base(logFile)
+ dstPath := filepath.Join(config.ContainerLogDest, logFileName)
+ _, err := combinedOutput(ctx, "docker", "cp", srcPath, dstPath)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "docker cp %s %s failed: %v\n", srcPath, dstPath, err)
+ }
+ fmt.Fprintf(os.Stderr, "\ncopied container log %s to %s\n", srcPath, dstPath)
+ }
+ }
+
+ // NOTE: we want to see what the internal sketch binary prints
+ // regardless of the setting of the verbosity flag on the external
+ // binary, so reading "docker logs", which is the stdout/stderr of
+ // the internal binary is not conditional on the verbose flag.
+ appendInternalErr := func(err error) error {
+ if err == nil {
+ return nil
+ }
+ out, logsErr := combinedOutput(ctx, "docker", "logs", cntrName)
+ if err != nil {
+ return fmt.Errorf("%w; and docker logs failed: %s, %v", err, out, logsErr)
+ }
+ out = bytes.TrimSpace(out)
+ if len(out) > 0 {
+ return fmt.Errorf("docker logs: %s;\n%w", out, err)
+ }
+ return err
+ }
+
+ // Get the sketch server port from the container
+ localAddr, err := getContainerPort(ctx, cntrName)
+ if err != nil {
+ return appendInternalErr(err)
+ }
+
+ // Tell the sketch container which git server port and commit to initialize with.
+ go func() {
+ // TODO: Why is this called in a goroutine? I have found that when I pull this out
+ // of the goroutine and call it inline, then the terminal UI clears itself and all
+ // the scrollback (which is not good, but also not fatal). I can't see why it does this
+ // though, since none of the calls in postContainerInitConfig obviously write to stdout
+ // or stderr.
+ if err := postContainerInitConfig(ctx, localAddr, commit, gitSrv.gitPort); err != nil {
+ slog.ErrorContext(ctx, "LaunchContainer.postContainerInitConfig", slog.String("err", err.Error()))
+ errCh <- appendInternalErr(err)
+ }
+ }()
+
+ if config.OpenBrowser {
+ OpenBrowser(ctx, "http://"+localAddr)
+ }
+
+ go func() {
+ cmd := exec.CommandContext(ctx, "docker", "attach", cntrName)
+ cmd.Stdin = os.Stdin
+ cmd.Stdout = os.Stdout
+ cmd.Stderr = os.Stderr
+ errCh <- run(ctx, "docker attach", cmd)
+ }()
+
+ defer copyLogs()
+
+ for {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ case err := <-errCh:
+ if err != nil {
+ return appendInternalErr(fmt.Errorf("container process: %w", err))
+ }
+ return nil
+ }
+ }
+}
+
+func combinedOutput(ctx context.Context, cmdName string, args ...string) ([]byte, error) {
+ cmd := exec.CommandContext(ctx, cmdName, args...)
+ // Really only needed for the "go build" command for the linux sketch binary
+ cmd.Env = append(os.Environ(), "GOOS=linux", "CGO_ENABLED=0")
+ start := time.Now()
+
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ slog.ErrorContext(ctx, cmdName, slog.Duration("elapsed", time.Now().Sub(start)), slog.String("err", err.Error()), slog.String("path", cmd.Path), slog.String("args", fmt.Sprintf("%v", skribe.Redact(cmd.Args))))
+ } else {
+ slog.DebugContext(ctx, cmdName, slog.Duration("elapsed", time.Now().Sub(start)), slog.String("path", cmd.Path), slog.String("args", fmt.Sprintf("%v", skribe.Redact(cmd.Args))))
+ }
+ return out, err
+}
+
+func run(ctx context.Context, cmdName string, cmd *exec.Cmd) error {
+ start := time.Now()
+ err := cmd.Run()
+ if err != nil {
+ slog.ErrorContext(ctx, cmdName, slog.Duration("elapsed", time.Now().Sub(start)), slog.String("err", err.Error()), slog.String("path", cmd.Path), slog.String("args", fmt.Sprintf("%v", skribe.Redact(cmd.Args))))
+ } else {
+ slog.DebugContext(ctx, cmdName, slog.Duration("elapsed", time.Now().Sub(start)), slog.String("path", cmd.Path), slog.String("args", fmt.Sprintf("%v", skribe.Redact(cmd.Args))))
+ }
+ return err
+}
+
+type gitServer struct {
+ gitLn net.Listener
+ gitPort string
+ srv *http.Server
+}
+
+func (gs *gitServer) shutdown(ctx context.Context) {
+ gs.srv.Shutdown(ctx)
+ gs.gitLn.Close()
+}
+
+// Serve a git remote from the host for the container to fetch from and push to.
+func (gs *gitServer) serve(ctx context.Context) error {
+ slog.DebugContext(ctx, "starting git server", slog.String("git_remote_addr", "http://host.docker.internal:"+gs.gitPort+"/.git"))
+ return gs.srv.Serve(gs.gitLn)
+}
+
+func newGitServer(gitRoot string) (*gitServer, error) {
+ ret := &gitServer{}
+ gitLn, err := net.Listen("tcp4", ":0")
+ if err != nil {
+ return nil, fmt.Errorf("git listen: %w", err)
+ }
+ ret.gitLn = gitLn
+
+ srv := http.Server{
+ Handler: &gitHTTP{gitRepoRoot: gitRoot},
+ }
+ ret.srv = &srv
+
+ _, gitPort, err := net.SplitHostPort(gitLn.Addr().String())
+ if err != nil {
+ return nil, fmt.Errorf("git port: %w", err)
+ }
+ ret.gitPort = gitPort
+ return ret, nil
+}
+
+func createDockerContainer(ctx context.Context, cntrName, hostPort, relPath, imgName string, config ContainerConfig) error {
+ //, config.SessionID, config.GitUsername, config.GitEmail, config.SkabandAddr
+ // sessionID, gitUsername, gitEmail, skabandAddr string
+ cmdArgs := []string{"create",
+ "-it",
+ "--name", cntrName,
+ "-p", hostPort + ":80", // forward container port 80 to a host port
+ "-e", "ANTHROPIC_API_KEY=" + config.AntAPIKey,
+ }
+ if config.AntURL != "" {
+ cmdArgs = append(cmdArgs, "-e", "ANT_URL="+config.AntURL)
+ }
+ if config.SketchPubKey != "" {
+ cmdArgs = append(cmdArgs, "-e", "SKETCH_PUB_KEY="+config.SketchPubKey)
+ }
+ if relPath != "." {
+ cmdArgs = append(cmdArgs, "-w", "/app/"+relPath)
+ }
+ cmdArgs = append(
+ cmdArgs,
+ imgName,
+ "/bin/sketch",
+ "-unsafe",
+ "-addr=:80",
+ "-session-id="+config.SessionID,
+ "-git-username="+config.GitUsername, "-git-email="+config.GitEmail,
+ )
+ if config.SkabandAddr != "" {
+ cmdArgs = append(cmdArgs, "-skaband-addr="+config.SkabandAddr)
+ }
+ if out, err := combinedOutput(ctx, "docker", cmdArgs...); err != nil {
+ return fmt.Errorf("docker create: %s, %w", out, err)
+ }
+ return nil
+}
+
+func buildLinuxSketchBin(ctx context.Context, path string) (string, error) {
+ start := time.Now()
+ linuxSketchBin := filepath.Join(path, "tmp-sketch-binary-linux")
+ cmd := exec.CommandContext(ctx, "go", "build", "-o", linuxSketchBin, "sketch.dev/cmd/sketch")
+ cmd.Env = append(os.Environ(), "GOOS=linux", "CGO_ENABLED=0")
+
+ fmt.Printf("building linux agent binary...\n")
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ slog.ErrorContext(ctx, "go", slog.Duration("elapsed", time.Now().Sub(start)), slog.String("err", err.Error()), slog.String("path", cmd.Path), slog.String("args", fmt.Sprintf("%v", skribe.Redact(cmd.Args))))
+ return "", fmt.Errorf("failed to build linux sketch binary: %s: %w", out, err)
+ } else {
+ slog.DebugContext(ctx, "go", slog.Duration("elapsed", time.Now().Sub(start)), slog.String("path", cmd.Path), slog.String("args", fmt.Sprintf("%v", skribe.Redact(cmd.Args))))
+ }
+
+ fmt.Printf("built linux agent binary in %s\n", time.Since(start).Round(100*time.Millisecond))
+
+ return linuxSketchBin, nil
+}
+
+func getContainerPort(ctx context.Context, cntrName string) (string, error) {
+ localAddr := ""
+ if out, err := combinedOutput(ctx, "docker", "port", cntrName, "80"); err != nil {
+ return "", fmt.Errorf("failed to find container port: %s: %v", out, err)
+ } else {
+ v4, _, found := strings.Cut(string(out), "\n")
+ if !found {
+ return "", fmt.Errorf("failed to find container port: %s: %v", out, err)
+ }
+ localAddr = v4
+ if strings.HasPrefix(localAddr, "0.0.0.0") {
+ localAddr = "127.0.0.1" + strings.TrimPrefix(localAddr, "0.0.0.0")
+ }
+ }
+ return localAddr, nil
+}
+
+// Contact the container and configure it.
+func postContainerInitConfig(ctx context.Context, localAddr, commit, gitPort string) error {
+ localURL := "http://" + localAddr
+ initMsg, err := json.Marshal(map[string]string{
+ "commit": commit,
+ "git_remote_addr": "http://host.docker.internal:" + gitPort + "/.git",
+ "host_addr": localAddr,
+ })
+ if err != nil {
+ return fmt.Errorf("init msg: %w", err)
+ }
+
+ slog.DebugContext(ctx, "/init POST", slog.String("initMsg", string(initMsg)))
+
+ // Note: this /init POST is handled in loop/server/loophttp.go:
+ initMsgByteReader := bytes.NewReader(initMsg)
+ req, err := http.NewRequest("POST", localURL+"/init", initMsgByteReader)
+ if err != nil {
+ return err
+ }
+
+ var res *http.Response
+ for i := 0; ; i++ {
+ time.Sleep(100 * time.Millisecond)
+ // If you DON'T reset this byteReader, then subsequent retries may end up sending 0 bytes.
+ initMsgByteReader.Reset(initMsg)
+ res, err = http.DefaultClient.Do(req)
+ if err != nil {
+ // In addition to "connection refused", we also occasionally see "EOF" errors that can succeed on retries.
+ if i < 100 && (strings.Contains(err.Error(), "connection refused") || strings.Contains(err.Error(), "EOF")) {
+ slog.DebugContext(ctx, "postContainerInitConfig retrying", slog.Int("retry", i), slog.String("err", err.Error()))
+ continue
+ }
+ return fmt.Errorf("failed to %s/init sketch in container, NOT retrying: err: %v", localURL, err)
+ }
+ break
+ }
+ resBytes, _ := io.ReadAll(res.Body)
+ if res.StatusCode != http.StatusOK {
+ return fmt.Errorf("failed to initialize sketch in container, response status code %d: %s", res.StatusCode, resBytes)
+ }
+ return nil
+}
+
+func findOrBuildDockerImage(ctx context.Context, stdout, stderr io.Writer, cwd, gitRoot, antURL, antAPIKey string, forceRebuild bool) (imgName string, err error) {
+ h := sha256.Sum256([]byte(gitRoot))
+ imgName = "sketch-" + hex.EncodeToString(h[:6])
+
+ var curImgInitFilesHash string
+ if out, err := combinedOutput(ctx, "docker", "inspect", "--format", "{{json .Config.Labels}}", imgName); err != nil {
+ if strings.Contains(string(out), "No such object") {
+ // Image does not exist, continue and build it.
+ curImgInitFilesHash = ""
+ } else {
+ return "", fmt.Errorf("docker inspect failed: %s, %v", out, err)
+ }
+ } else {
+ m := map[string]string{}
+ if err := json.Unmarshal(bytes.TrimSpace(out), &m); err != nil {
+ return "", fmt.Errorf("docker inspect output unparsable: %s, %v", out, err)
+ }
+ curImgInitFilesHash = m["sketch_context"]
+ }
+
+ candidates, err := findRepoDockerfiles(cwd, gitRoot)
+ if err != nil {
+ return "", fmt.Errorf("find dockerfile: %w", err)
+ }
+
+ var initFiles map[string]string
+ var dockerfilePath string
+
+ // TODO: prefer a "Dockerfile.sketch" so users can tailor any env to this tool.
+ if len(candidates) == 1 && strings.ToLower(filepath.Base(candidates[0])) == "dockerfile" {
+ dockerfilePath = candidates[0]
+ contents, err := os.ReadFile(dockerfilePath)
+ if err != nil {
+ return "", err
+ }
+ fmt.Printf("using %s as dev env\n", candidates[0])
+ if hashInitFiles(map[string]string{dockerfilePath: string(contents)}) == curImgInitFilesHash && !forceRebuild {
+ fmt.Printf("using existing docker image %s\n", imgName)
+ return imgName, nil
+ }
+ } else {
+ initFiles, err = readInitFiles(os.DirFS(gitRoot))
+ if err != nil {
+ return "", err
+ }
+ subPathWorkingDir, err := filepath.Rel(gitRoot, cwd)
+ if err != nil {
+ return "", err
+ }
+ initFileHash := hashInitFiles(initFiles)
+ if curImgInitFilesHash == initFileHash && !forceRebuild {
+ fmt.Printf("using existing docker image %s\n", imgName)
+ return imgName, nil
+ }
+
+ start := time.Now()
+ dockerfile, err := createDockerfile(ctx, http.DefaultClient, antURL, antAPIKey, initFiles, subPathWorkingDir)
+ if err != nil {
+ return "", fmt.Errorf("create dockerfile: %w", err)
+ }
+ dockerfilePath = filepath.Join(cwd, "tmp-sketch-dockerfile")
+ if err := os.WriteFile(dockerfilePath, []byte(dockerfile), 0o666); err != nil {
+ return "", err
+ }
+ defer os.Remove(dockerfilePath)
+
+ fmt.Fprintf(stderr, "generated Dockerfile in %s:\n\t%s\n\n", time.Since(start).Round(time.Millisecond), strings.Replace(dockerfile, "\n", "\n\t", -1))
+ }
+
+ var gitUserEmail, gitUserName string
+ if out, err := combinedOutput(ctx, "git", "config", "--get", "user.email"); err != nil {
+ return "", fmt.Errorf("git config: %s: %v", out, err)
+ } else {
+ gitUserEmail = strings.TrimSpace(string(out))
+ }
+ if out, err := combinedOutput(ctx, "git", "config", "--get", "user.name"); err != nil {
+ return "", fmt.Errorf("git config: %s: %v", out, err)
+ } else {
+ gitUserName = strings.TrimSpace(string(out))
+ }
+
+ start := time.Now()
+ cmd := exec.CommandContext(ctx,
+ "docker", "build",
+ "-t", imgName,
+ "-f", dockerfilePath,
+ "--build-arg", "GIT_USER_EMAIL="+gitUserEmail,
+ "--build-arg", "GIT_USER_NAME="+gitUserName,
+ ".",
+ )
+ cmd.Dir = gitRoot
+ cmd.Stdout = stdout
+ cmd.Stderr = stderr
+ fmt.Printf("building docker image %s...\n", imgName)
+
+ err = run(ctx, "docker build", cmd)
+ if err != nil {
+ return "", fmt.Errorf("docker build failed: %v", err)
+ }
+ fmt.Printf("built docker image %s in %s\n", imgName, time.Since(start).Round(time.Millisecond))
+ return imgName, nil
+}
+
+func findRepoDockerfiles(cwd, gitRoot string) ([]string, error) {
+ files, err := findDirDockerfiles(cwd)
+ if err != nil {
+ return nil, err
+ }
+ if len(files) > 0 {
+ return files, nil
+ }
+
+ path := cwd
+ for path != gitRoot {
+ path = filepath.Dir(path)
+ files, err := findDirDockerfiles(path)
+ if err != nil {
+ return nil, err
+ }
+ if len(files) > 0 {
+ return files, nil
+ }
+ }
+ return files, nil
+}
+
+// findDirDockerfiles finds all "Dockerfile*" files in a directory.
+func findDirDockerfiles(root string) (res []string, err error) {
+ err = filepath.Walk(root, func(path string, info os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+ if info.IsDir() && root != path {
+ return filepath.SkipDir
+ }
+ name := strings.ToLower(info.Name())
+ if name == "dockerfile" || strings.HasPrefix(name, "dockerfile.") {
+ res = append(res, path)
+ }
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ return res, nil
+}
+
+func findGitRoot(ctx context.Context, path string) (string, error) {
+ cmd := exec.CommandContext(ctx, "git", "rev-parse", "--git-common-dir")
+ cmd.Dir = path
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ if strings.Contains(string(out), "not a git repository") {
+ return "", fmt.Errorf(`sketch needs to run from within a git repo, but %s is not part of a git repo.
+Consider one of the following options:
+ - cd to a different dir that is already part of a git repo first, or
+ - to create a new git repo from this directory (%s), run this command:
+
+ git init . && git commit --allow-empty -m "initial commit"
+
+and try running sketch again.
+`, path, path)
+ }
+ return "", fmt.Errorf("git rev-parse --git-common-dir: %s: %w", out, err)
+ }
+ gitDir := strings.TrimSpace(string(out)) // location of .git dir, often as a relative path
+ absGitDir := filepath.Join(path, gitDir)
+ return filepath.Dir(absGitDir), err
+}
+
+func OpenBrowser(ctx context.Context, url string) {
+ var cmd *exec.Cmd
+ switch runtime.GOOS {
+ case "darwin":
+ cmd = exec.CommandContext(ctx, "open", url)
+ case "windows":
+ cmd = exec.CommandContext(ctx, "cmd", "/c", "start", url)
+ default: // Linux and other Unix-like systems
+ cmd = exec.CommandContext(ctx, "xdg-open", url)
+ }
+ if b, err := cmd.CombinedOutput(); err != nil {
+ fmt.Fprintf(os.Stderr, "failed to open browser: %v: %s\n", err, b)
+ }
+}
diff --git a/dockerimg/dockerimg_test.go b/dockerimg/dockerimg_test.go
new file mode 100644
index 0000000..cef5173
--- /dev/null
+++ b/dockerimg/dockerimg_test.go
@@ -0,0 +1,193 @@
+package dockerimg
+
+import (
+ "context"
+ "flag"
+ "io/fs"
+ "net/http"
+ "os"
+ "strings"
+ "testing"
+ "testing/fstest"
+
+ "github.com/google/go-cmp/cmp"
+ "sketch.dev/httprr"
+)
+
+var flagRewriteWant = flag.Bool("rewritewant", false, "rewrite the dockerfiles we want from the model")
+
+func TestCreateDockerfile(t *testing.T) {
+ ctx := context.Background()
+
+ tests := []struct {
+ name string
+ fsys fs.FS
+ }{
+ {
+ name: "Basic repo with README",
+ fsys: fstest.MapFS{
+ "README.md": &fstest.MapFile{Data: []byte("# Test Project\nA Go project for testing.")},
+ },
+ },
+ {
+ // TODO: this looks bogus.
+ name: "Repo with README and workflow",
+ fsys: fstest.MapFS{
+ "README.md": &fstest.MapFile{Data: []byte("# Test Project\nA Go project for testing.")},
+ ".github/workflows/test.yml": &fstest.MapFile{Data: []byte(`name: Test
+on: [push]
+jobs:
+ test:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v2
+ - uses: actions/setup-node@v3
+ with:
+ node-version: '18'
+ - name: Install and activate corepack
+ run: |
+ npm install -g corepack
+ corepack enable
+ - run: go test ./...`)},
+ },
+ },
+ {
+ name: "mention a devtool in the readme",
+ fsys: fstest.MapFS{
+ "readme.md": &fstest.MapFile{Data: []byte("# Test Project\nYou must install `dot` to run the tests.")},
+ },
+ },
+ {
+ name: "empty repo",
+ fsys: fstest.MapFS{
+ "main.go": &fstest.MapFile{Data: []byte("package main\n\nfunc main() {}")},
+ },
+ },
+ {
+ name: "python misery",
+ fsys: fstest.MapFS{
+ "README.md": &fstest.MapFile{Data: []byte("# Our amazing repo\n\nTo use this project you need python 3.11 and the dvc tool")},
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ basePath := "testdata/" + strings.ToLower(strings.Replace(t.Name(), "/", "_", -1))
+ rrPath := basePath + ".httprr"
+ rr, err := httprr.Open(rrPath, http.DefaultTransport)
+ if err != nil && !os.IsNotExist(err) {
+ t.Fatal(err)
+ }
+ rr.ScrubReq(func(req *http.Request) error {
+ req.Header.Del("x-api-key")
+ return nil
+ })
+ initFiles, err := readInitFiles(tt.fsys)
+ if err != nil {
+ t.Fatal(err)
+ }
+ result, err := createDockerfile(ctx, rr.Client(), "", os.Getenv("ANTHROPIC_API_KEY"), initFiles, "")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ wantPath := basePath + ".dockerfile"
+
+ if *flagRewriteWant {
+ if err := os.WriteFile(wantPath, []byte(result), 0o666); err != nil {
+ t.Fatal(err)
+ }
+ return
+ }
+
+ wantBytes, err := os.ReadFile(wantPath)
+ if err != nil {
+ t.Fatal(err)
+ }
+ want := string(wantBytes)
+ if diff := cmp.Diff(want, result); diff != "" {
+ t.Errorf("dockerfile does not match. got:\n----\n%s\n----\n\ndiff: %s", result, diff)
+ }
+ })
+ }
+}
+
+func TestReadInitFiles(t *testing.T) {
+ testFS := fstest.MapFS{
+ "README.md": &fstest.MapFile{Data: []byte("# Test Repo")},
+ ".github/workflows/test.yml": &fstest.MapFile{Data: []byte("name: Test Workflow")},
+ "main.go": &fstest.MapFile{Data: []byte("package main")},
+ ".git/HEAD": &fstest.MapFile{Data: []byte("ref: refs/heads/main")},
+ "random/README.md": &fstest.MapFile{Data: []byte("ignore me")},
+ }
+
+ files, err := readInitFiles(testFS)
+ if err != nil {
+ t.Fatalf("readInitFiles failed: %v", err)
+ }
+
+ // Should have 2 files: README.md and .github/workflows/test.yml
+ if len(files) != 2 {
+ t.Errorf("Expected 2 files, got %d", len(files))
+ }
+
+ if content, ok := files["README.md"]; !ok {
+ t.Error("README.md not found")
+ } else if content != "# Test Repo" {
+ t.Errorf("README.md has incorrect content: %q", content)
+ }
+
+ if content, ok := files[".github/workflows/test.yml"]; !ok {
+ t.Error(".github/workflows/test.yml not found")
+ } else if content != "name: Test Workflow" {
+ t.Errorf("Workflow file has incorrect content: %q", content)
+ }
+
+ if _, ok := files["main.go"]; ok {
+ t.Error("main.go should not be included")
+ }
+
+ if _, ok := files[".git/HEAD"]; ok {
+ t.Error(".git/HEAD should not be included")
+ }
+}
+
+func TestReadInitFilesWithSubdir(t *testing.T) {
+ // Create a file system with files in a subdirectory
+ testFS := fstest.MapFS{
+ "subdir/README.md": &fstest.MapFile{Data: []byte("# Test Repo")},
+ "subdir/.github/workflows/test.yml": &fstest.MapFile{Data: []byte("name: Test Workflow")},
+ "subdir/main.go": &fstest.MapFile{Data: []byte("package main")},
+ }
+
+ // Use fs.Sub to get a sub-filesystem
+ subFS, err := fs.Sub(testFS, "subdir")
+ if err != nil {
+ t.Fatalf("fs.Sub failed: %v", err)
+ }
+
+ files, err := readInitFiles(subFS)
+ if err != nil {
+ t.Fatalf("readInitFiles failed: %v", err)
+ }
+
+ // Should have 2 files: README.md and .github/workflows/test.yml
+ if len(files) != 2 {
+ t.Errorf("Expected 2 files, got %d", len(files))
+ }
+
+ // Verify README.md was found
+ if content, ok := files["README.md"]; !ok {
+ t.Error("README.md not found")
+ } else if content != "# Test Repo" {
+ t.Errorf("README.md has incorrect content: %q", content)
+ }
+
+ // Verify workflow file was found
+ if content, ok := files[".github/workflows/test.yml"]; !ok {
+ t.Error(".github/workflows/test.yml not found")
+ } else if content != "name: Test Workflow" {
+ t.Errorf("Workflow file has incorrect content: %q", content)
+ }
+}
diff --git a/dockerimg/githttp.go b/dockerimg/githttp.go
new file mode 100644
index 0000000..d618ab6
--- /dev/null
+++ b/dockerimg/githttp.go
@@ -0,0 +1,53 @@
+package dockerimg
+
+import (
+ "fmt"
+ "log/slog"
+ "net/http"
+ "net/http/cgi"
+ "os/exec"
+ "strings"
+)
+
+type gitHTTP struct {
+ gitRepoRoot string
+}
+
+func (g *gitHTTP) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ defer func() {
+ if err := recover(); err != nil {
+ slog.ErrorContext(r.Context(), "gitHTTP.ServeHTTP panic", slog.Any("recovered_err", err))
+
+ // Return an error response to the client
+ http.Error(w, fmt.Sprintf("panic: %v\n", err), http.StatusInternalServerError)
+ }
+ }()
+ if !strings.HasPrefix(r.RemoteAddr, "127.0.0.1:") {
+ slog.InfoContext(r.Context(), "githttp: denied", "remote addr", r.RemoteAddr)
+ http.Error(w, "no", http.StatusUnauthorized)
+ return
+ }
+ gitBin, err := exec.LookPath("git")
+ if err != nil {
+ http.Error(w, "no git: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ w.Header().Set("Cache-Control", "no-cache")
+ h := &cgi.Handler{
+ Path: gitBin,
+ Args: []string{"http-backend"},
+ Dir: g.gitRepoRoot,
+ Env: []string{
+ "GIT_PROJECT_ROOT=" + g.gitRepoRoot,
+ "PATH_INFO=" + r.URL.Path,
+ "QUERY_STRING=" + r.URL.RawQuery,
+ "REQUEST_METHOD=" + r.Method,
+ "GIT_HTTP_EXPORT_ALL=true",
+ "GIT_HTTP_ALLOW_REPACK=true",
+ "GIT_HTTP_ALLOW_PUSH=true",
+ "GIT_HTTP_VERBOSE=1",
+ },
+ }
+ h.ServeHTTP(w, r)
+}
diff --git a/dockerimg/testdata/dockerfile_convo.httprr b/dockerimg/testdata/dockerfile_convo.httprr
new file mode 100644
index 0000000..0470323
--- /dev/null
+++ b/dockerimg/testdata/dockerfile_convo.httprr
@@ -0,0 +1,30 @@
+HTTP/1.1 200 OK
+Date: Wed, 05 Mar 2025 10:00:00 GMT
+Content-Type: application/json
+Content-Length: 987
+
+{
+ "id": "msg_0123456789abcdef",
+ "type": "message",
+ "role": "assistant",
+ "model": "claude-3-opus-20240229",
+ "stop_reason": "tool_use",
+ "stop_sequence": null,
+ "usage": {
+ "input_tokens": 256,
+ "output_tokens": 180
+ },
+ "content": [
+ {
+ "type": "text",
+ "text": "I'll create a Dockerfile for this Go project."
+ }
+ ],
+ "tool_use": {
+ "id": "tu_0123456789abcdef",
+ "name": "dockerfile",
+ "input": {
+ "contents": "FROM golang:1.21-alpine\n\nWORKDIR /app\n\n# Install git for dependencies\nRUN apk add --no-cache git\n\n# Pre-copy/cache go.mod for efficient Docker caching\nCOPY go.mod go.sum* ./\nRUN go mod download\n\n# Copy the source code\nCOPY . .\n\n# Build the application\nRUN go build -o app ./cmd/sketch\n\n# Environment setup\nENV CGO_ENABLED=0\n\nCMD [\"./app\"]"
+ }
+ }
+}
diff --git a/dockerimg/testdata/testcreatedockerfile_basic_repo_with_readme.dockerfile b/dockerimg/testdata/testcreatedockerfile_basic_repo_with_readme.dockerfile
new file mode 100644
index 0000000..128c852
--- /dev/null
+++ b/dockerimg/testdata/testcreatedockerfile_basic_repo_with_readme.dockerfile
@@ -0,0 +1,27 @@
+FROM golang:1.24.2-alpine3.21
+
+RUN apk add bash git make jq sqlite gcc musl-dev linux-headers npm nodejs go github-cli ripgrep fzf
+
+ENV GOTOOLCHAIN=auto
+ENV GOPATH=/go
+ENV PATH="$GOPATH/bin:$PATH"
+
+RUN go install golang.org/x/tools/cmd/goimports@latest
+RUN go install golang.org/x/tools/gopls@latest
+RUN go install mvdan.cc/gofumpt@latest
+
+RUN apk add --no-cache curl || true
+
+ARG GIT_USER_EMAIL
+ARG GIT_USER_NAME
+
+RUN git config --global user.email "$GIT_USER_EMAIL" && \
+ git config --global user.name "$GIT_USER_NAME"
+
+LABEL sketch_context="72646c3fcb61b4bb1017a3b5b76d5b2126e9f563ce7da5d58a379539595f0344"
+COPY . /app
+
+WORKDIR /app
+RUN if [ -f go.mod ]; then go mod download; fi
+
+CMD ["/bin/sketch"]
\ No newline at end of file
diff --git a/dockerimg/testdata/testcreatedockerfile_basic_repo_with_readme.httprr b/dockerimg/testdata/testcreatedockerfile_basic_repo_with_readme.httprr
new file mode 100644
index 0000000..1c3ee06
--- /dev/null
+++ b/dockerimg/testdata/testcreatedockerfile_basic_repo_with_readme.httprr
@@ -0,0 +1,81 @@
+httprr trace v1
+3037 1681
+POST https://api.anthropic.com/v1/messages HTTP/1.1
+Host: api.anthropic.com
+User-Agent: Go-http-client/1.1
+Content-Length: 2840
+Anthropic-Version: 2023-06-01
+Content-Type: application/json
+
+{
+ "model": "claude-3-7-sonnet-20250219",
+ "messages": [
+ {
+ "role": "user",
+ "content": [
+ {
+ "type": "text",
+ "text": "\nCall the dockerfile tool to create a Dockerfile.\nThe parameters to dockerfile fill out the From and ExtraCmds\ntemplate variables in the following Go template:\n\n```\nFROM {{.From}}\n\nRUN apk add bash git make jq sqlite gcc musl-dev linux-headers npm nodejs go github-cli ripgrep fzf\n\nENV GOTOOLCHAIN=auto\nENV GOPATH=/go\nENV PATH=\"$GOPATH/bin:$PATH\"\n\nRUN go install golang.org/x/tools/cmd/goimports@latest\nRUN go install golang.org/x/tools/gopls@latest\nRUN go install mvdan.cc/gofumpt@latest\n\n{{.ExtraCmds}}\n\nARG GIT_USER_EMAIL\nARG GIT_USER_NAME\n\nRUN git config --global user.email \"$GIT_USER_EMAIL\" \u0026\u0026 \\\n git config --global user.name \"$GIT_USER_NAME\"\n\nLABEL sketch_context=\"{{.InitFilesHash}}\"\nCOPY . /app\n\nWORKDIR /app{{.SubDir}}\nRUN if [ -f go.mod ]; then go mod download; fi\n\nCMD [\"/bin/sketch\"]\n```\n\nIn particular:\n- Assume it is primarily a Go project. For a minimal env, prefer 1.24.2-alpine3.21 as a base image.\n- If any python is needed at all, switch to using a python alpine image as a the base and apk add go.\n Favor using uv, and use one of these base images, depending on the preferred python version:\n ghcr.io/astral-sh/uv:python3.13-alpine\n ghcr.io/astral-sh/uv:python3.12-alpine\n ghcr.io/astral-sh/uv:python3.11-alpine\n- When using pip to install packages, use: uv pip install --system.\n- Python env setup is challenging and often no required, so any RUN commands involving python tooling should be written to let docker build continue if there is a failure.\n- Include any tools particular to this repository that can be inferred from the given context.\n- Append || true to any apk add commands in case the package does not exist.\n- Do not expose any ports.\nHere is the content of several files from the repository that may be relevant:\n\n"
+ },
+ {
+ "type": "text",
+ "text": "Here is the contents README.md:\n\u003cfile\u003e\n# Test Project\nA Go project for testing.\n\u003c/file\u003e\n\n"
+ },
+ {
+ "type": "text",
+ "text": "Now call the dockerfile tool.",
+ "cache_control": {
+ "type": "ephemeral"
+ }
+ }
+ ]
+ }
+ ],
+ "max_tokens": 8192,
+ "tools": [
+ {
+ "name": "dockerfile",
+ "description": "Helps define a Dockerfile that sets up a dev environment for this project.",
+ "input_schema": {
+ "type": "object",
+ "required": [
+ "from",
+ "extra_cmds"
+ ],
+ "properties": {
+ "from": {
+ "type": "string",
+ "description": "The alpine base image provided to the dockerfile FROM command"
+ },
+ "extra_cmds": {
+ "type": "string",
+ "description": "Extra commands to add to the dockerfile."
+ }
+ }
+ }
+ }
+ ]
+}HTTP/2.0 200 OK
+Anthropic-Organization-Id: 3c473a21-7208-450a-a9f8-80aebda45c1b
+Anthropic-Ratelimit-Input-Tokens-Limit: 200000
+Anthropic-Ratelimit-Input-Tokens-Remaining: 200000
+Anthropic-Ratelimit-Input-Tokens-Reset: 2025-04-05T00:17:28Z
+Anthropic-Ratelimit-Output-Tokens-Limit: 80000
+Anthropic-Ratelimit-Output-Tokens-Remaining: 80000
+Anthropic-Ratelimit-Output-Tokens-Reset: 2025-04-05T00:17:30Z
+Anthropic-Ratelimit-Requests-Limit: 4000
+Anthropic-Ratelimit-Requests-Remaining: 3999
+Anthropic-Ratelimit-Requests-Reset: 2025-04-05T00:17:28Z
+Anthropic-Ratelimit-Tokens-Limit: 280000
+Anthropic-Ratelimit-Tokens-Remaining: 280000
+Anthropic-Ratelimit-Tokens-Reset: 2025-04-05T00:17:28Z
+Cf-Cache-Status: DYNAMIC
+Cf-Ray: 92b4dcd6de42943a-SJC
+Content-Type: application/json
+Date: Sat, 05 Apr 2025 00:17:30 GMT
+Request-Id: req_01ABvmpoBii9QTziayoGGv2a
+Server: cloudflare
+Via: 1.1 google
+X-Robots-Tag: none
+
+{"id":"msg_01TCvHPyXog5ysYCPaETR9JD","type":"message","role":"assistant","model":"claude-3-7-sonnet-20250219","content":[{"type":"text","text":"I'll help you create a Dockerfile for this Go project.\n\nBased on the information provided, this appears to be a straightforward Go project without any Python requirements. The README confirms it's \"A Go project for testing.\" Therefore, I'll use the Go alpine image as the base."},{"type":"tool_use","id":"toolu_01MYYXLM3H8esGKeDGsjcrSY","name":"dockerfile","input":{"from":"golang:1.24.2-alpine3.21","extra_cmds":"RUN apk add --no-cache curl || true"}}],"stop_reason":"tool_use","stop_sequence":null,"usage":{"input_tokens":1014,"cache_creation_input_tokens":0,"cache_read_input_tokens":0,"output_tokens":153}}
\ No newline at end of file
diff --git a/dockerimg/testdata/testcreatedockerfile_empty_repo.dockerfile b/dockerimg/testdata/testcreatedockerfile_empty_repo.dockerfile
new file mode 100644
index 0000000..a346cd3
--- /dev/null
+++ b/dockerimg/testdata/testcreatedockerfile_empty_repo.dockerfile
@@ -0,0 +1,27 @@
+FROM golang:1.24.2-alpine3.21
+
+RUN apk add bash git make jq sqlite gcc musl-dev linux-headers npm nodejs go github-cli ripgrep fzf
+
+ENV GOTOOLCHAIN=auto
+ENV GOPATH=/go
+ENV PATH="$GOPATH/bin:$PATH"
+
+RUN go install golang.org/x/tools/cmd/goimports@latest
+RUN go install golang.org/x/tools/gopls@latest
+RUN go install mvdan.cc/gofumpt@latest
+
+RUN echo "Go development environment ready"
+
+ARG GIT_USER_EMAIL
+ARG GIT_USER_NAME
+
+RUN git config --global user.email "$GIT_USER_EMAIL" && \
+ git config --global user.name "$GIT_USER_NAME"
+
+LABEL sketch_context="e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+COPY . /app
+
+WORKDIR /app
+RUN if [ -f go.mod ]; then go mod download; fi
+
+CMD ["/bin/sketch"]
\ No newline at end of file
diff --git a/dockerimg/testdata/testcreatedockerfile_empty_repo.httprr b/dockerimg/testdata/testcreatedockerfile_empty_repo.httprr
new file mode 100644
index 0000000..abc3dbf
--- /dev/null
+++ b/dockerimg/testdata/testcreatedockerfile_empty_repo.httprr
@@ -0,0 +1,77 @@
+httprr trace v1
+2790 1642
+POST https://api.anthropic.com/v1/messages HTTP/1.1
+Host: api.anthropic.com
+User-Agent: Go-http-client/1.1
+Content-Length: 2593
+Anthropic-Version: 2023-06-01
+Content-Type: application/json
+
+{
+ "model": "claude-3-7-sonnet-20250219",
+ "messages": [
+ {
+ "role": "user",
+ "content": [
+ {
+ "type": "text",
+ "text": "\nCall the dockerfile tool to create a Dockerfile.\nThe parameters to dockerfile fill out the From and ExtraCmds\ntemplate variables in the following Go template:\n\n```\nFROM {{.From}}\n\nRUN apk add bash git make jq sqlite gcc musl-dev linux-headers npm nodejs go github-cli ripgrep fzf\n\nENV GOTOOLCHAIN=auto\nENV GOPATH=/go\nENV PATH=\"$GOPATH/bin:$PATH\"\n\nRUN go install golang.org/x/tools/cmd/goimports@latest\nRUN go install golang.org/x/tools/gopls@latest\nRUN go install mvdan.cc/gofumpt@latest\n\n{{.ExtraCmds}}\n\nARG GIT_USER_EMAIL\nARG GIT_USER_NAME\n\nRUN git config --global user.email \"$GIT_USER_EMAIL\" \u0026\u0026 \\\n git config --global user.name \"$GIT_USER_NAME\"\n\nLABEL sketch_context=\"{{.InitFilesHash}}\"\nCOPY . /app\n\nWORKDIR /app{{.SubDir}}\nRUN if [ -f go.mod ]; then go mod download; fi\n\nCMD [\"/bin/sketch\"]\n```\n\nIn particular:\n- Assume it is primarily a Go project. For a minimal env, prefer 1.24.2-alpine3.21 as a base image.\n- If any python is needed at all, switch to using a python alpine image as a the base and apk add go.\n Favor using uv, and use one of these base images, depending on the preferred python version:\n ghcr.io/astral-sh/uv:python3.13-alpine\n ghcr.io/astral-sh/uv:python3.12-alpine\n ghcr.io/astral-sh/uv:python3.11-alpine\n- When using pip to install packages, use: uv pip install --system.\n- Python env setup is challenging and often no required, so any RUN commands involving python tooling should be written to let docker build continue if there is a failure.\n- Include any tools particular to this repository that can be inferred from the given context.\n- Append || true to any apk add commands in case the package does not exist.\n- Do not expose any ports.\n"
+ },
+ {
+ "type": "text",
+ "text": "Now call the dockerfile tool.",
+ "cache_control": {
+ "type": "ephemeral"
+ }
+ }
+ ]
+ }
+ ],
+ "max_tokens": 8192,
+ "tools": [
+ {
+ "name": "dockerfile",
+ "description": "Helps define a Dockerfile that sets up a dev environment for this project.",
+ "input_schema": {
+ "type": "object",
+ "required": [
+ "from",
+ "extra_cmds"
+ ],
+ "properties": {
+ "from": {
+ "type": "string",
+ "description": "The alpine base image provided to the dockerfile FROM command"
+ },
+ "extra_cmds": {
+ "type": "string",
+ "description": "Extra commands to add to the dockerfile."
+ }
+ }
+ }
+ }
+ ]
+}HTTP/2.0 200 OK
+Anthropic-Organization-Id: 3c473a21-7208-450a-a9f8-80aebda45c1b
+Anthropic-Ratelimit-Input-Tokens-Limit: 200000
+Anthropic-Ratelimit-Input-Tokens-Remaining: 200000
+Anthropic-Ratelimit-Input-Tokens-Reset: 2025-04-05T00:17:37Z
+Anthropic-Ratelimit-Output-Tokens-Limit: 80000
+Anthropic-Ratelimit-Output-Tokens-Remaining: 80000
+Anthropic-Ratelimit-Output-Tokens-Reset: 2025-04-05T00:17:39Z
+Anthropic-Ratelimit-Requests-Limit: 4000
+Anthropic-Ratelimit-Requests-Remaining: 3999
+Anthropic-Ratelimit-Requests-Reset: 2025-04-05T00:17:36Z
+Anthropic-Ratelimit-Tokens-Limit: 280000
+Anthropic-Ratelimit-Tokens-Remaining: 280000
+Anthropic-Ratelimit-Tokens-Reset: 2025-04-05T00:17:37Z
+Cf-Cache-Status: DYNAMIC
+Cf-Ray: 92b4dd0b09c6943a-SJC
+Content-Type: application/json
+Date: Sat, 05 Apr 2025 00:17:39 GMT
+Request-Id: req_01CT8x3PBEoaYUopedMVuEqp
+Server: cloudflare
+Via: 1.1 google
+X-Robots-Tag: none
+
+{"id":"msg_01SFTkFTza8Sagk4GWC5g1SJ","type":"message","role":"assistant","model":"claude-3-7-sonnet-20250219","content":[{"type":"text","text":"I'll help you create a Dockerfile using the dockerfile tool. Based on your requirements, I'll use the Go 1.24.2-alpine3.21 base image since you indicated it's primarily a Go project and no specific Python requirements were mentioned."},{"type":"tool_use","id":"toolu_01EN6SxM4w1B9onGtpLGnwNF","name":"dockerfile","input":{"from":"golang:1.24.2-alpine3.21","extra_cmds":"RUN echo \"Go development environment ready\""}}],"stop_reason":"tool_use","stop_sequence":null,"usage":{"input_tokens":970,"cache_creation_input_tokens":0,"cache_read_input_tokens":0,"output_tokens":145}}
\ No newline at end of file
diff --git a/dockerimg/testdata/testcreatedockerfile_mention_a_devtool_in_the_readme.dockerfile b/dockerimg/testdata/testcreatedockerfile_mention_a_devtool_in_the_readme.dockerfile
new file mode 100644
index 0000000..c1a8bd6
--- /dev/null
+++ b/dockerimg/testdata/testcreatedockerfile_mention_a_devtool_in_the_readme.dockerfile
@@ -0,0 +1,27 @@
+FROM golang:1.24.2-alpine3.21
+
+RUN apk add bash git make jq sqlite gcc musl-dev linux-headers npm nodejs go github-cli ripgrep fzf
+
+ENV GOTOOLCHAIN=auto
+ENV GOPATH=/go
+ENV PATH="$GOPATH/bin:$PATH"
+
+RUN go install golang.org/x/tools/cmd/goimports@latest
+RUN go install golang.org/x/tools/gopls@latest
+RUN go install mvdan.cc/gofumpt@latest
+
+RUN apk add graphviz || true
+
+ARG GIT_USER_EMAIL
+ARG GIT_USER_NAME
+
+RUN git config --global user.email "$GIT_USER_EMAIL" && \
+ git config --global user.name "$GIT_USER_NAME"
+
+LABEL sketch_context="077dd6d8e701af79c72b77ea9f851278a82eb35ea0c63e1999a2ef78272ce284"
+COPY . /app
+
+WORKDIR /app
+RUN if [ -f go.mod ]; then go mod download; fi
+
+CMD ["/bin/sketch"]
\ No newline at end of file
diff --git a/dockerimg/testdata/testcreatedockerfile_mention_a_devtool_in_the_readme.httprr b/dockerimg/testdata/testcreatedockerfile_mention_a_devtool_in_the_readme.httprr
new file mode 100644
index 0000000..f4542e9
--- /dev/null
+++ b/dockerimg/testdata/testcreatedockerfile_mention_a_devtool_in_the_readme.httprr
@@ -0,0 +1,81 @@
+httprr trace v1
+3052 1634
+POST https://api.anthropic.com/v1/messages HTTP/1.1
+Host: api.anthropic.com
+User-Agent: Go-http-client/1.1
+Content-Length: 2855
+Anthropic-Version: 2023-06-01
+Content-Type: application/json
+
+{
+ "model": "claude-3-7-sonnet-20250219",
+ "messages": [
+ {
+ "role": "user",
+ "content": [
+ {
+ "type": "text",
+ "text": "\nCall the dockerfile tool to create a Dockerfile.\nThe parameters to dockerfile fill out the From and ExtraCmds\ntemplate variables in the following Go template:\n\n```\nFROM {{.From}}\n\nRUN apk add bash git make jq sqlite gcc musl-dev linux-headers npm nodejs go github-cli ripgrep fzf\n\nENV GOTOOLCHAIN=auto\nENV GOPATH=/go\nENV PATH=\"$GOPATH/bin:$PATH\"\n\nRUN go install golang.org/x/tools/cmd/goimports@latest\nRUN go install golang.org/x/tools/gopls@latest\nRUN go install mvdan.cc/gofumpt@latest\n\n{{.ExtraCmds}}\n\nARG GIT_USER_EMAIL\nARG GIT_USER_NAME\n\nRUN git config --global user.email \"$GIT_USER_EMAIL\" \u0026\u0026 \\\n git config --global user.name \"$GIT_USER_NAME\"\n\nLABEL sketch_context=\"{{.InitFilesHash}}\"\nCOPY . /app\n\nWORKDIR /app{{.SubDir}}\nRUN if [ -f go.mod ]; then go mod download; fi\n\nCMD [\"/bin/sketch\"]\n```\n\nIn particular:\n- Assume it is primarily a Go project. For a minimal env, prefer 1.24.2-alpine3.21 as a base image.\n- If any python is needed at all, switch to using a python alpine image as a the base and apk add go.\n Favor using uv, and use one of these base images, depending on the preferred python version:\n ghcr.io/astral-sh/uv:python3.13-alpine\n ghcr.io/astral-sh/uv:python3.12-alpine\n ghcr.io/astral-sh/uv:python3.11-alpine\n- When using pip to install packages, use: uv pip install --system.\n- Python env setup is challenging and often no required, so any RUN commands involving python tooling should be written to let docker build continue if there is a failure.\n- Include any tools particular to this repository that can be inferred from the given context.\n- Append || true to any apk add commands in case the package does not exist.\n- Do not expose any ports.\nHere is the content of several files from the repository that may be relevant:\n\n"
+ },
+ {
+ "type": "text",
+ "text": "Here is the contents readme.md:\n\u003cfile\u003e\n# Test Project\nYou must install `dot` to run the tests.\n\u003c/file\u003e\n\n"
+ },
+ {
+ "type": "text",
+ "text": "Now call the dockerfile tool.",
+ "cache_control": {
+ "type": "ephemeral"
+ }
+ }
+ ]
+ }
+ ],
+ "max_tokens": 8192,
+ "tools": [
+ {
+ "name": "dockerfile",
+ "description": "Helps define a Dockerfile that sets up a dev environment for this project.",
+ "input_schema": {
+ "type": "object",
+ "required": [
+ "from",
+ "extra_cmds"
+ ],
+ "properties": {
+ "from": {
+ "type": "string",
+ "description": "The alpine base image provided to the dockerfile FROM command"
+ },
+ "extra_cmds": {
+ "type": "string",
+ "description": "Extra commands to add to the dockerfile."
+ }
+ }
+ }
+ }
+ ]
+}HTTP/2.0 200 OK
+Anthropic-Organization-Id: 3c473a21-7208-450a-a9f8-80aebda45c1b
+Anthropic-Ratelimit-Input-Tokens-Limit: 200000
+Anthropic-Ratelimit-Input-Tokens-Remaining: 200000
+Anthropic-Ratelimit-Input-Tokens-Reset: 2025-04-05T00:17:34Z
+Anthropic-Ratelimit-Output-Tokens-Limit: 80000
+Anthropic-Ratelimit-Output-Tokens-Remaining: 80000
+Anthropic-Ratelimit-Output-Tokens-Reset: 2025-04-05T00:17:36Z
+Anthropic-Ratelimit-Requests-Limit: 4000
+Anthropic-Ratelimit-Requests-Remaining: 3999
+Anthropic-Ratelimit-Requests-Reset: 2025-04-05T00:17:33Z
+Anthropic-Ratelimit-Tokens-Limit: 280000
+Anthropic-Ratelimit-Tokens-Remaining: 280000
+Anthropic-Ratelimit-Tokens-Reset: 2025-04-05T00:17:34Z
+Cf-Cache-Status: DYNAMIC
+Cf-Ray: 92b4dcf9cae5943a-SJC
+Content-Type: application/json
+Date: Sat, 05 Apr 2025 00:17:36 GMT
+Request-Id: req_018EvH3HMhPpXWH9gksCthDn
+Server: cloudflare
+Via: 1.1 google
+X-Robots-Tag: none
+
+{"id":"msg_0178hjcqP4yFcpvD8FSm9LRp","type":"message","role":"assistant","model":"claude-3-7-sonnet-20250219","content":[{"type":"text","text":"Based on the information you've provided, I'll create a Dockerfile for your Go project. From the readme.md, I can see that the project requires the `dot` command, which is part of the Graphviz package, so I'll include this in the Dockerfile."},{"type":"tool_use","id":"toolu_01AGSztHxramwDkeKSevXYHz","name":"dockerfile","input":{"from":"golang:1.24.2-alpine3.21","extra_cmds":"RUN apk add graphviz || true"}}],"stop_reason":"tool_use","stop_sequence":null,"usage":{"input_tokens":1019,"cache_creation_input_tokens":0,"cache_read_input_tokens":0,"output_tokens":151}}
\ No newline at end of file
diff --git a/dockerimg/testdata/testcreatedockerfile_python_misery.dockerfile b/dockerimg/testdata/testcreatedockerfile_python_misery.dockerfile
new file mode 100644
index 0000000..c27fb1a
--- /dev/null
+++ b/dockerimg/testdata/testcreatedockerfile_python_misery.dockerfile
@@ -0,0 +1,33 @@
+FROM ghcr.io/astral-sh/uv:python3.11-alpine
+
+RUN apk add bash git make jq sqlite gcc musl-dev linux-headers npm nodejs go github-cli ripgrep fzf
+
+ENV GOTOOLCHAIN=auto
+ENV GOPATH=/go
+ENV PATH="$GOPATH/bin:$PATH"
+
+RUN go install golang.org/x/tools/cmd/goimports@latest
+RUN go install golang.org/x/tools/gopls@latest
+RUN go install mvdan.cc/gofumpt@latest
+
+RUN apk add go || true
+
+# Install DVC (Data Version Control)
+RUN uv pip install --system dvc || true
+
+# Additional Python setup
+RUN uv pip install --system pytest pytest-cov || true
+
+ARG GIT_USER_EMAIL
+ARG GIT_USER_NAME
+
+RUN git config --global user.email "$GIT_USER_EMAIL" && \
+ git config --global user.name "$GIT_USER_NAME"
+
+LABEL sketch_context="5908dbf564085457e184c617549809359247c4f6e45aa8789e94122cecd538fb"
+COPY . /app
+
+WORKDIR /app
+RUN if [ -f go.mod ]; then go mod download; fi
+
+CMD ["/bin/sketch"]
\ No newline at end of file
diff --git a/dockerimg/testdata/testcreatedockerfile_python_misery.httprr b/dockerimg/testdata/testcreatedockerfile_python_misery.httprr
new file mode 100644
index 0000000..4498293
--- /dev/null
+++ b/dockerimg/testdata/testcreatedockerfile_python_misery.httprr
@@ -0,0 +1,81 @@
+httprr trace v1
+3075 1793
+POST https://api.anthropic.com/v1/messages HTTP/1.1
+Host: api.anthropic.com
+User-Agent: Go-http-client/1.1
+Content-Length: 2878
+Anthropic-Version: 2023-06-01
+Content-Type: application/json
+
+{
+ "model": "claude-3-7-sonnet-20250219",
+ "messages": [
+ {
+ "role": "user",
+ "content": [
+ {
+ "type": "text",
+ "text": "\nCall the dockerfile tool to create a Dockerfile.\nThe parameters to dockerfile fill out the From and ExtraCmds\ntemplate variables in the following Go template:\n\n```\nFROM {{.From}}\n\nRUN apk add bash git make jq sqlite gcc musl-dev linux-headers npm nodejs go github-cli ripgrep fzf\n\nENV GOTOOLCHAIN=auto\nENV GOPATH=/go\nENV PATH=\"$GOPATH/bin:$PATH\"\n\nRUN go install golang.org/x/tools/cmd/goimports@latest\nRUN go install golang.org/x/tools/gopls@latest\nRUN go install mvdan.cc/gofumpt@latest\n\n{{.ExtraCmds}}\n\nARG GIT_USER_EMAIL\nARG GIT_USER_NAME\n\nRUN git config --global user.email \"$GIT_USER_EMAIL\" \u0026\u0026 \\\n git config --global user.name \"$GIT_USER_NAME\"\n\nLABEL sketch_context=\"{{.InitFilesHash}}\"\nCOPY . /app\n\nWORKDIR /app{{.SubDir}}\nRUN if [ -f go.mod ]; then go mod download; fi\n\nCMD [\"/bin/sketch\"]\n```\n\nIn particular:\n- Assume it is primarily a Go project. For a minimal env, prefer 1.24.2-alpine3.21 as a base image.\n- If any python is needed at all, switch to using a python alpine image as a the base and apk add go.\n Favor using uv, and use one of these base images, depending on the preferred python version:\n ghcr.io/astral-sh/uv:python3.13-alpine\n ghcr.io/astral-sh/uv:python3.12-alpine\n ghcr.io/astral-sh/uv:python3.11-alpine\n- When using pip to install packages, use: uv pip install --system.\n- Python env setup is challenging and often no required, so any RUN commands involving python tooling should be written to let docker build continue if there is a failure.\n- Include any tools particular to this repository that can be inferred from the given context.\n- Append || true to any apk add commands in case the package does not exist.\n- Do not expose any ports.\nHere is the content of several files from the repository that may be relevant:\n\n"
+ },
+ {
+ "type": "text",
+ "text": "Here is the contents README.md:\n\u003cfile\u003e\n# Our amazing repo\n\nTo use this project you need python 3.11 and the dvc tool\n\u003c/file\u003e\n\n"
+ },
+ {
+ "type": "text",
+ "text": "Now call the dockerfile tool.",
+ "cache_control": {
+ "type": "ephemeral"
+ }
+ }
+ ]
+ }
+ ],
+ "max_tokens": 8192,
+ "tools": [
+ {
+ "name": "dockerfile",
+ "description": "Helps define a Dockerfile that sets up a dev environment for this project.",
+ "input_schema": {
+ "type": "object",
+ "required": [
+ "from",
+ "extra_cmds"
+ ],
+ "properties": {
+ "from": {
+ "type": "string",
+ "description": "The alpine base image provided to the dockerfile FROM command"
+ },
+ "extra_cmds": {
+ "type": "string",
+ "description": "Extra commands to add to the dockerfile."
+ }
+ }
+ }
+ }
+ ]
+}HTTP/2.0 200 OK
+Anthropic-Organization-Id: 3c473a21-7208-450a-a9f8-80aebda45c1b
+Anthropic-Ratelimit-Input-Tokens-Limit: 200000
+Anthropic-Ratelimit-Input-Tokens-Remaining: 200000
+Anthropic-Ratelimit-Input-Tokens-Reset: 2025-04-05T00:17:39Z
+Anthropic-Ratelimit-Output-Tokens-Limit: 80000
+Anthropic-Ratelimit-Output-Tokens-Remaining: 80000
+Anthropic-Ratelimit-Output-Tokens-Reset: 2025-04-05T00:17:42Z
+Anthropic-Ratelimit-Requests-Limit: 4000
+Anthropic-Ratelimit-Requests-Remaining: 3999
+Anthropic-Ratelimit-Requests-Reset: 2025-04-05T00:17:39Z
+Anthropic-Ratelimit-Tokens-Limit: 280000
+Anthropic-Ratelimit-Tokens-Remaining: 280000
+Anthropic-Ratelimit-Tokens-Reset: 2025-04-05T00:17:39Z
+Cf-Cache-Status: DYNAMIC
+Cf-Ray: 92b4dd1b7f3d943a-SJC
+Content-Type: application/json
+Date: Sat, 05 Apr 2025 00:17:42 GMT
+Request-Id: req_01TzHQukmmrk6cTHsyxuMSuZ
+Server: cloudflare
+Via: 1.1 google
+X-Robots-Tag: none
+
+{"id":"msg_01DVLCsdrDvzRHv7221La4Mw","type":"message","role":"assistant","model":"claude-3-7-sonnet-20250219","content":[{"type":"text","text":"Based on the README.md file, I can see that this project requires Python 3.11 and the DVC tool. Given this information, I'll create a Dockerfile that uses a Python 3.11 Alpine image as the base and includes the necessary tools."},{"type":"tool_use","id":"toolu_012FnmKYSqbhwr4xkAySuwgV","name":"dockerfile","input":{"from":"ghcr.io/astral-sh/uv:python3.11-alpine","extra_cmds":"RUN apk add go || true\n\n# Install DVC (Data Version Control)\nRUN uv pip install --system dvc || true\n\n# Additional Python setup\nRUN uv pip install --system pytest pytest-cov || true"}}],"stop_reason":"tool_use","stop_sequence":null,"usage":{"input_tokens":1026,"cache_creation_input_tokens":0,"cache_read_input_tokens":0,"output_tokens":199}}
\ No newline at end of file
diff --git a/dockerimg/testdata/testcreatedockerfile_repo_with_readme_and_workflow.dockerfile b/dockerimg/testdata/testcreatedockerfile_repo_with_readme_and_workflow.dockerfile
new file mode 100644
index 0000000..a6acdb7
--- /dev/null
+++ b/dockerimg/testdata/testcreatedockerfile_repo_with_readme_and_workflow.dockerfile
@@ -0,0 +1,28 @@
+FROM golang:1.24.2-alpine3.21
+
+RUN apk add bash git make jq sqlite gcc musl-dev linux-headers npm nodejs go github-cli ripgrep fzf
+
+ENV GOTOOLCHAIN=auto
+ENV GOPATH=/go
+ENV PATH="$GOPATH/bin:$PATH"
+
+RUN go install golang.org/x/tools/cmd/goimports@latest
+RUN go install golang.org/x/tools/gopls@latest
+RUN go install mvdan.cc/gofumpt@latest
+
+RUN npm install -g corepack && \
+ corepack enable || true
+
+ARG GIT_USER_EMAIL
+ARG GIT_USER_NAME
+
+RUN git config --global user.email "$GIT_USER_EMAIL" && \
+ git config --global user.name "$GIT_USER_NAME"
+
+LABEL sketch_context="b38624baa0989968c26bf1a8b3f1c322e80ae65fa51c57743bd34eac2d5e0529"
+COPY . /app
+
+WORKDIR /app
+RUN if [ -f go.mod ]; then go mod download; fi
+
+CMD ["/bin/sketch"]
\ No newline at end of file
diff --git a/dockerimg/testdata/testcreatedockerfile_repo_with_readme_and_workflow.httprr b/dockerimg/testdata/testcreatedockerfile_repo_with_readme_and_workflow.httprr
new file mode 100644
index 0000000..32887bf
--- /dev/null
+++ b/dockerimg/testdata/testcreatedockerfile_repo_with_readme_and_workflow.httprr
@@ -0,0 +1,85 @@
+httprr trace v1
+3524 1724
+POST https://api.anthropic.com/v1/messages HTTP/1.1
+Host: api.anthropic.com
+User-Agent: Go-http-client/1.1
+Content-Length: 3327
+Anthropic-Version: 2023-06-01
+Content-Type: application/json
+
+{
+ "model": "claude-3-7-sonnet-20250219",
+ "messages": [
+ {
+ "role": "user",
+ "content": [
+ {
+ "type": "text",
+ "text": "\nCall the dockerfile tool to create a Dockerfile.\nThe parameters to dockerfile fill out the From and ExtraCmds\ntemplate variables in the following Go template:\n\n```\nFROM {{.From}}\n\nRUN apk add bash git make jq sqlite gcc musl-dev linux-headers npm nodejs go github-cli ripgrep fzf\n\nENV GOTOOLCHAIN=auto\nENV GOPATH=/go\nENV PATH=\"$GOPATH/bin:$PATH\"\n\nRUN go install golang.org/x/tools/cmd/goimports@latest\nRUN go install golang.org/x/tools/gopls@latest\nRUN go install mvdan.cc/gofumpt@latest\n\n{{.ExtraCmds}}\n\nARG GIT_USER_EMAIL\nARG GIT_USER_NAME\n\nRUN git config --global user.email \"$GIT_USER_EMAIL\" \u0026\u0026 \\\n git config --global user.name \"$GIT_USER_NAME\"\n\nLABEL sketch_context=\"{{.InitFilesHash}}\"\nCOPY . /app\n\nWORKDIR /app{{.SubDir}}\nRUN if [ -f go.mod ]; then go mod download; fi\n\nCMD [\"/bin/sketch\"]\n```\n\nIn particular:\n- Assume it is primarily a Go project. For a minimal env, prefer 1.24.2-alpine3.21 as a base image.\n- If any python is needed at all, switch to using a python alpine image as a the base and apk add go.\n Favor using uv, and use one of these base images, depending on the preferred python version:\n ghcr.io/astral-sh/uv:python3.13-alpine\n ghcr.io/astral-sh/uv:python3.12-alpine\n ghcr.io/astral-sh/uv:python3.11-alpine\n- When using pip to install packages, use: uv pip install --system.\n- Python env setup is challenging and often no required, so any RUN commands involving python tooling should be written to let docker build continue if there is a failure.\n- Include any tools particular to this repository that can be inferred from the given context.\n- Append || true to any apk add commands in case the package does not exist.\n- Do not expose any ports.\nHere is the content of several files from the repository that may be relevant:\n\n"
+ },
+ {
+ "type": "text",
+ "text": "Here is the contents .github/workflows/test.yml:\n\u003cfile\u003e\nname: Test\non: [push]\njobs:\n test:\n runs-on: ubuntu-latest\n steps:\n - uses: actions/checkout@v2\n - uses: actions/setup-node@v3\n with:\n node-version: '18'\n - name: Install and activate corepack\n run: |\n npm install -g corepack\n corepack enable\n - run: go test ./...\n\u003c/file\u003e\n\n"
+ },
+ {
+ "type": "text",
+ "text": "Here is the contents README.md:\n\u003cfile\u003e\n# Test Project\nA Go project for testing.\n\u003c/file\u003e\n\n"
+ },
+ {
+ "type": "text",
+ "text": "Now call the dockerfile tool.",
+ "cache_control": {
+ "type": "ephemeral"
+ }
+ }
+ ]
+ }
+ ],
+ "max_tokens": 8192,
+ "tools": [
+ {
+ "name": "dockerfile",
+ "description": "Helps define a Dockerfile that sets up a dev environment for this project.",
+ "input_schema": {
+ "type": "object",
+ "required": [
+ "from",
+ "extra_cmds"
+ ],
+ "properties": {
+ "from": {
+ "type": "string",
+ "description": "The alpine base image provided to the dockerfile FROM command"
+ },
+ "extra_cmds": {
+ "type": "string",
+ "description": "Extra commands to add to the dockerfile."
+ }
+ }
+ }
+ }
+ ]
+}HTTP/2.0 200 OK
+Anthropic-Organization-Id: 3c473a21-7208-450a-a9f8-80aebda45c1b
+Anthropic-Ratelimit-Input-Tokens-Limit: 200000
+Anthropic-Ratelimit-Input-Tokens-Remaining: 200000
+Anthropic-Ratelimit-Input-Tokens-Reset: 2025-04-05T00:17:31Z
+Anthropic-Ratelimit-Output-Tokens-Limit: 80000
+Anthropic-Ratelimit-Output-Tokens-Remaining: 80000
+Anthropic-Ratelimit-Output-Tokens-Reset: 2025-04-05T00:17:33Z
+Anthropic-Ratelimit-Requests-Limit: 4000
+Anthropic-Ratelimit-Requests-Remaining: 3999
+Anthropic-Ratelimit-Requests-Reset: 2025-04-05T00:17:31Z
+Anthropic-Ratelimit-Tokens-Limit: 280000
+Anthropic-Ratelimit-Tokens-Remaining: 280000
+Anthropic-Ratelimit-Tokens-Reset: 2025-04-05T00:17:31Z
+Cf-Cache-Status: DYNAMIC
+Cf-Ray: 92b4dce8bbcf943a-SJC
+Content-Type: application/json
+Date: Sat, 05 Apr 2025 00:17:33 GMT
+Request-Id: req_01HPsqm46UTtSfiiJ5GY6Fvg
+Server: cloudflare
+Via: 1.1 google
+X-Robots-Tag: none
+
+{"id":"msg_01HFWnuUBbvGmusGRZJbCKq2","type":"message","role":"assistant","model":"claude-3-7-sonnet-20250219","content":[{"type":"text","text":"Based on the provided files and requirements, I'll create a Dockerfile for this Go project.\n\nThe `.github/workflows/test.yml` indicates this is primarily a Go project with some Node.js dependencies. The workflow shows it uses Node.js 18 and corepack, so I'll include these in the extra commands."},{"type":"tool_use","id":"toolu_01AAUCno3BKmEJcZb4XWzQJ4","name":"dockerfile","input":{"from":"golang:1.24.2-alpine3.21","extra_cmds":"RUN npm install -g corepack && \\\n corepack enable || true"}}],"stop_reason":"tool_use","stop_sequence":null,"usage":{"input_tokens":4,"cache_creation_input_tokens":1139,"cache_read_input_tokens":0,"output_tokens":171}}
\ No newline at end of file
diff --git a/dockerimg/update_tests.sh b/dockerimg/update_tests.sh
new file mode 100755
index 0000000..5a42f7b
--- /dev/null
+++ b/dockerimg/update_tests.sh
@@ -0,0 +1,11 @@
+#!/bin/bash
+
+SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+CURRENT_DIR=$(pwd)
+
+if [ "$SCRIPT_DIR" != "$CURRENT_DIR" ]; then
+ echo "Error: This script must be run from its own directory: $SCRIPT_DIR" >&2
+ exit 1
+fi
+
+go test -httprecord ".*" -rewritewant
diff --git a/go.mod b/go.mod
new file mode 100644
index 0000000..7c3c857
--- /dev/null
+++ b/go.mod
@@ -0,0 +1,24 @@
+module sketch.dev
+
+go 1.24.2
+
+require (
+ github.com/creack/pty v1.1.24
+ github.com/evanw/esbuild v0.25.2
+ github.com/fatih/color v1.18.0
+ github.com/google/go-cmp v0.7.0
+ github.com/richardlehane/crock32 v1.0.1
+ golang.org/x/net v0.38.0
+ golang.org/x/sync v0.12.0
+ golang.org/x/term v0.30.0
+ golang.org/x/tools v0.31.0
+ mvdan.cc/sh/v3 v3.11.0
+)
+
+require (
+ github.com/mattn/go-colorable v0.1.13 // indirect
+ github.com/mattn/go-isatty v0.0.20 // indirect
+ golang.org/x/mod v0.24.0 // indirect
+ golang.org/x/sys v0.31.0 // indirect
+ golang.org/x/text v0.23.0 // indirect
+)
diff --git a/go.sum b/go.sum
new file mode 100644
index 0000000..5c23fba
--- /dev/null
+++ b/go.sum
@@ -0,0 +1,42 @@
+github.com/creack/pty v1.1.24 h1:bJrF4RRfyJnbTJqzRLHzcGaZK1NeM5kTC9jGgovnR1s=
+github.com/creack/pty v1.1.24/go.mod h1:08sCNb52WyoAwi2QDyzUCTgcvVFhUzewun7wtTfvcwE=
+github.com/evanw/esbuild v0.25.2 h1:ublSEmZSjzOc6jLO1OTQy/vHc1wiqyDF4oB3hz5sM6s=
+github.com/evanw/esbuild v0.25.2/go.mod h1:D2vIQZqV/vIf/VRHtViaUtViZmG7o+kKmlBfVQuRi48=
+github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
+github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=
+github.com/go-quicktest/qt v1.101.0 h1:O1K29Txy5P2OK0dGo59b7b0LR6wKfIhttaAhHUyn7eI=
+github.com/go-quicktest/qt v1.101.0/go.mod h1:14Bz/f7NwaXPtdYEgzsx46kqSxVwTbzVZsDC26tQJow=
+github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
+github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
+github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
+github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
+github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
+github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
+github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
+github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
+github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
+github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
+github.com/richardlehane/crock32 v1.0.1 h1:GV9EqtAr7RminQ8oGrDt3gYXkzDDPJ5fROaO1Mux14g=
+github.com/richardlehane/crock32 v1.0.1/go.mod h1:xUIlLABtHBgs1bNIBdUQR9F2xtRzS0TujtbR68hmEWU=
+github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
+github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
+golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU=
+golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
+golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8=
+golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
+golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
+golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
+golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
+golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
+golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y=
+golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g=
+golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
+golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
+golang.org/x/tools v0.31.0 h1:0EedkvKDbh+qistFTd0Bcwe/YLh4vHwWEkiI0toFIBU=
+golang.org/x/tools v0.31.0/go.mod h1:naFTU+Cev749tSJRXJlna0T3WxKvb1kWEx15xA4SdmQ=
+mvdan.cc/sh/v3 v3.11.0 h1:q5h+XMDRfUGUedCqFFsjoFjrhwf2Mvtt1rkMvVz0blw=
+mvdan.cc/sh/v3 v3.11.0/go.mod h1:LRM+1NjoYCzuq/WZ6y44x14YNAI0NK7FLPeQSaFagGg=
diff --git a/httprr/LICENSE b/httprr/LICENSE
new file mode 100644
index 0000000..0aa5c13
--- /dev/null
+++ b/httprr/LICENSE
@@ -0,0 +1,27 @@
+Copyright 2024 The Go Authors
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google LLC nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/httprr/rr.go b/httprr/rr.go
new file mode 100644
index 0000000..6acde80
--- /dev/null
+++ b/httprr/rr.go
@@ -0,0 +1,394 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package httprr implements HTTP record and replay, mainly for use in tests.
+//
+// [Open] creates a new [RecordReplay]. Whether it is recording or replaying
+// is controlled by the -httprecord flag, which is defined by this package
+// only in test programs (built by “go test”).
+// See the [Open] documentation for more details.
+package httprr
+
+import (
+ "bufio"
+ "bytes"
+ "cmp"
+ "context"
+ "flag"
+ "fmt"
+ "io"
+ "net/http"
+ "os"
+ "regexp"
+ "strconv"
+ "strings"
+ "sync"
+ "testing"
+)
+
+var record = new(string)
+
+func init() {
+ if testing.Testing() {
+ record = flag.String("httprecord", "", "re-record traces for files matching `regexp`")
+ }
+}
+
+// A RecordReplay is an [http.RoundTripper] that can operate in two modes: record and replay.
+//
+// In record mode, the RecordReplay invokes another RoundTripper
+// and logs the (request, response) pairs to a file.
+//
+// In replay mode, the RecordReplay responds to requests by finding
+// an identical request in the log and sending the logged response.
+type RecordReplay struct {
+ file string // file being read or written
+ real http.RoundTripper // real HTTP connection
+
+ mu sync.Mutex
+ reqScrub []func(*http.Request) error // scrubbers for logging requests
+ respScrub []func(*bytes.Buffer) error // scrubbers for logging responses
+ replay map[string]string // if replaying, the log
+ record *os.File // if recording, the file being written
+ writeErr error // if recording, any write error encountered
+}
+
+// ScrubReq adds new request scrubbing functions to rr.
+//
+// Before using a request as a lookup key or saving it in the record/replay log,
+// the RecordReplay calls each scrub function, in the order they were registered,
+// to canonicalize non-deterministic parts of the request and remove secrets.
+// Scrubbing only applies to a copy of the request used in the record/replay log;
+// the unmodified original request is sent to the actual server in recording mode.
+// A scrub function can assume that if req.Body is not nil, then it has type [*Body].
+//
+// Calling ScrubReq adds to the list of registered request scrubbing functions;
+// it does not replace those registered by earlier calls.
+func (rr *RecordReplay) ScrubReq(scrubs ...func(req *http.Request) error) {
+ rr.reqScrub = append(rr.reqScrub, scrubs...)
+}
+
+// ScrubResp adds new response scrubbing functions to rr.
+//
+// Before using a response as a lookup key or saving it in the record/replay log,
+// the RecordReplay calls each scrub function on a byte representation of the
+// response, in the order they were registered, to canonicalize non-deterministic
+// parts of the response and remove secrets.
+//
+// Calling ScrubResp adds to the list of registered response scrubbing functions;
+// it does not replace those registered by earlier calls.
+//
+// Clients should be careful when loading the bytes into [*http.Response] using
+// [http.ReadResponse]. This function can set [http.Response].Close to true even
+// when the original response had it false. See code in go/src/net/http.Response.Write
+// and go/src/net/http.Write for more info.
+func (rr *RecordReplay) ScrubResp(scrubs ...func(*bytes.Buffer) error) {
+ rr.respScrub = append(rr.respScrub, scrubs...)
+}
+
+// Recording reports whether the rr is in recording mode.
+func (rr *RecordReplay) Recording() bool {
+ return rr.record != nil
+}
+
+// Open opens a new record/replay log in the named file and
+// returns a [RecordReplay] backed by that file.
+//
+// By default Open expects the file to exist and contain a
+// previously-recorded log of (request, response) pairs,
+// which [RecordReplay.RoundTrip] consults to prepare its responses.
+//
+// If the command-line flag -httprecord is set to a non-empty
+// regular expression that matches file, then Open creates
+// the file as a new log. In that mode, [RecordReplay.RoundTrip]
+// makes actual HTTP requests using rt but then logs the requests and
+// responses to the file for replaying in a future run.
+func Open(file string, rt http.RoundTripper) (*RecordReplay, error) {
+ record, err := Recording(file)
+ if err != nil {
+ return nil, err
+ }
+ if record {
+ return create(file, rt)
+ }
+ return open(file, rt)
+}
+
+// OpenForRecording opens the file for recording.
+func OpenForRecording(file string, rt http.RoundTripper) (*RecordReplay, error) {
+ return create(file, rt)
+}
+
+// Recording reports whether the "-httprecord" flag is set
+// for the given file.
+// It return an error if the flag is set to an invalid value.
+func Recording(file string) (bool, error) {
+ if *record != "" {
+ re, err := regexp.Compile(*record)
+ if err != nil {
+ return false, fmt.Errorf("invalid -httprecord flag: %v", err)
+ }
+ if re.MatchString(file) {
+ return true, nil
+ }
+ }
+ return false, nil
+}
+
+// creates creates a new record-mode RecordReplay in the file.
+func create(file string, rt http.RoundTripper) (*RecordReplay, error) {
+ f, err := os.Create(file)
+ if err != nil {
+ return nil, err
+ }
+
+ // Write header line.
+ // Each round-trip will write a new request-response record.
+ if _, err := fmt.Fprintf(f, "httprr trace v1\n"); err != nil {
+ // unreachable unless write error immediately after os.Create
+ f.Close()
+ return nil, err
+ }
+ rr := &RecordReplay{
+ file: file,
+ real: rt,
+ record: f,
+ }
+ return rr, nil
+}
+
+// open opens a replay-mode RecordReplay using the data in the file.
+func open(file string, rt http.RoundTripper) (*RecordReplay, error) {
+ // Note: To handle larger traces without storing entirely in memory,
+ // could instead read the file incrementally, storing a map[hash]offsets
+ // and then reread the relevant part of the file during RoundTrip.
+ bdata, err := os.ReadFile(file)
+ if err != nil {
+ return nil, err
+ }
+
+ // Trace begins with header line.
+ data := string(bdata)
+ line, data, ok := strings.Cut(data, "\n")
+ if !ok || line != "httprr trace v1" {
+ return nil, fmt.Errorf("read %s: not an httprr trace", file)
+ }
+
+ replay := make(map[string]string)
+ for data != "" {
+ // Each record starts with a line of the form "n1 n2\n"
+ // followed by n1 bytes of request encoding and
+ // n2 bytes of response encoding.
+ line, data, ok = strings.Cut(data, "\n")
+ f1, f2, _ := strings.Cut(line, " ")
+ n1, err1 := strconv.Atoi(f1)
+ n2, err2 := strconv.Atoi(f2)
+ if !ok || err1 != nil || err2 != nil || n1 > len(data) || n2 > len(data[n1:]) {
+ return nil, fmt.Errorf("read %s: corrupt httprr trace", file)
+ }
+ var req, resp string
+ req, resp, data = data[:n1], data[n1:n1+n2], data[n1+n2:]
+ replay[req] = resp
+ }
+
+ rr := &RecordReplay{
+ file: file,
+ real: rt,
+ replay: replay,
+ }
+ return rr, nil
+}
+
+// Client returns an http.Client using rr as its transport.
+// It is a shorthand for:
+//
+// return &http.Client{Transport: rr}
+//
+// For more complicated uses, use rr or the [RecordReplay.RoundTrip] method directly.
+func (rr *RecordReplay) Client() *http.Client {
+ return &http.Client{Transport: rr}
+}
+
+// A Body is an io.ReadCloser used as an HTTP request body.
+// In a Scrubber, if req.Body != nil, then req.Body is guaranteed
+// to have type *Body, making it easy to access the body to change it.
+type Body struct {
+ Data []byte
+ ReadOffset int
+}
+
+// Read reads from the body, implementing io.Reader.
+func (b *Body) Read(p []byte) (int, error) {
+ n := copy(p, b.Data[b.ReadOffset:])
+ if n == 0 {
+ return 0, io.EOF
+ }
+ b.ReadOffset += n
+ return n, nil
+}
+
+// Close is a no-op, implementing io.Closer.
+func (b *Body) Close() error {
+ return nil
+}
+
+// RoundTrip implements [http.RoundTripper].
+//
+// If rr has been opened in record mode, RoundTrip passes the requests on to
+// the RoundTripper specified in the call to [Open] and then logs the
+// (request, response) pair to the underlying file.
+//
+// If rr has been opened in replay mode, RoundTrip looks up the request in the log
+// and then responds with the previously logged response.
+// If the log does not contain req, RoundTrip returns an error.
+func (rr *RecordReplay) RoundTrip(req *http.Request) (*http.Response, error) {
+ reqWire, err := rr.reqWire(req)
+ if err != nil {
+ return nil, err
+ }
+
+ // If we're in replay mode, replay a response.
+ if rr.replay != nil {
+ return rr.replayRoundTrip(req, reqWire)
+ }
+
+ // Otherwise run a real round trip and save the request-response pair.
+ // But if we've had a log write error already, don't bother.
+ if err := rr.writeError(); err != nil {
+ return nil, err
+ }
+ resp, err := rr.real.RoundTrip(req)
+ if err != nil {
+ return nil, err
+ }
+
+ // Encode resp and decode to get a copy for our caller.
+ respWire, err := rr.respWire(resp)
+ if err != nil {
+ return nil, err
+ }
+ if err := rr.writeLog(reqWire, respWire); err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// reqWire returns the wire-format HTTP request key to be
+// used for request when saving to the log or looking up in a
+// previously written log. It consumes the original req.Body
+// but modifies req.Body to be an equivalent [*Body].
+func (rr *RecordReplay) reqWire(req *http.Request) (string, error) {
+ // rkey is the scrubbed request used as a lookup key.
+ // Clone req including req.Body.
+ rkey := req.Clone(context.Background())
+ if req.Body != nil {
+ body, err := io.ReadAll(req.Body)
+ req.Body.Close()
+ if err != nil {
+ return "", err
+ }
+ req.Body = &Body{Data: body}
+ rkey.Body = &Body{Data: bytes.Clone(body)}
+ }
+
+ // Canonicalize and scrub request key.
+ for _, scrub := range rr.reqScrub {
+ if err := scrub(rkey); err != nil {
+ return "", err
+ }
+ }
+
+ // Now that scrubbers are done potentially modifying body, set length.
+ if rkey.Body != nil {
+ rkey.ContentLength = int64(len(rkey.Body.(*Body).Data))
+ }
+
+ // Serialize rkey to produce the log entry.
+ // Use WriteProxy instead of Write to preserve the URL's scheme.
+ var key strings.Builder
+ if err := rkey.WriteProxy(&key); err != nil {
+ return "", err
+ }
+ return key.String(), nil
+}
+
+// respWire returns the wire-format HTTP response log entry.
+// It modifies resp but leaves an equivalent response in its place.
+func (rr *RecordReplay) respWire(resp *http.Response) (string, error) {
+ var key bytes.Buffer
+ if err := resp.Write(&key); err != nil {
+ return "", err
+ }
+ resp2, err := http.ReadResponse(bufio.NewReader(bytes.NewReader(key.Bytes())), resp.Request)
+ if err != nil {
+ // unreachable unless resp.Write does not round-trip with http.ReadResponse
+ return "", err
+ }
+ *resp = *resp2
+
+ for _, scrub := range rr.respScrub {
+ if err := scrub(&key); err != nil {
+ return "", err
+ }
+ }
+ return key.String(), nil
+}
+
+// replayRoundTrip implements RoundTrip using the replay log.
+func (rr *RecordReplay) replayRoundTrip(req *http.Request, reqLog string) (*http.Response, error) {
+ respLog, ok := rr.replay[reqLog]
+ if !ok {
+ return nil, fmt.Errorf("cached HTTP response not found for:\n%s", reqLog)
+ }
+ resp, err := http.ReadResponse(bufio.NewReader(strings.NewReader(respLog)), req)
+ if err != nil {
+ return nil, fmt.Errorf("read %s: corrupt httprr trace: %v", rr.file, err)
+ }
+ return resp, nil
+}
+
+// writeError reports any previous log write error.
+func (rr *RecordReplay) writeError() error {
+ rr.mu.Lock()
+ defer rr.mu.Unlock()
+ return rr.writeErr
+}
+
+// writeLog writes the request-response pair to the log.
+// If a write fails, writeLog arranges for rr.broken to return
+// an error and deletes the underlying log.
+func (rr *RecordReplay) writeLog(reqWire, respWire string) error {
+ rr.mu.Lock()
+ defer rr.mu.Unlock()
+
+ if rr.writeErr != nil {
+ // Unreachable unless concurrent I/O error.
+ // Caller should have checked already.
+ return rr.writeErr
+ }
+
+ _, err1 := fmt.Fprintf(rr.record, "%d %d\n", len(reqWire), len(respWire))
+ _, err2 := rr.record.WriteString(reqWire)
+ _, err3 := rr.record.WriteString(respWire)
+ if err := cmp.Or(err1, err2, err3); err != nil {
+ rr.writeErr = err
+ rr.record.Close()
+ os.Remove(rr.file)
+ return err
+ }
+
+ return nil
+}
+
+// Close closes the RecordReplay.
+// It is a no-op in replay mode.
+func (rr *RecordReplay) Close() error {
+ if rr.writeErr != nil {
+ return rr.writeErr
+ }
+ if rr.record != nil {
+ return rr.record.Close()
+ }
+ return nil
+}
diff --git a/httprr/rr_test.go b/httprr/rr_test.go
new file mode 100644
index 0000000..b20bc7d
--- /dev/null
+++ b/httprr/rr_test.go
@@ -0,0 +1,336 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package httprr
+
+import (
+ "bytes"
+ "errors"
+ "io"
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "strings"
+ "testing"
+ "testing/iotest"
+)
+
+func handler(w http.ResponseWriter, r *http.Request) {
+ if strings.HasSuffix(r.URL.Path, "/redirect") {
+ http.Error(w, "redirect me!", 304)
+ return
+ }
+ if r.Method == "GET" {
+ if r.Header.Get("Secret") != "key" {
+ http.Error(w, "missing secret", 666)
+ return
+ }
+ }
+ if r.Method == "POST" {
+ data, err := io.ReadAll(r.Body)
+ if err != nil {
+ panic(err)
+ }
+ if !strings.Contains(string(data), "my Secret") {
+ http.Error(w, "missing body secret", 667)
+ return
+ }
+ }
+}
+
+func always555(w http.ResponseWriter, r *http.Request) {
+ http.Error(w, "should not be making HTTP requests", 555)
+}
+
+func dropPort(r *http.Request) error {
+ if r.URL.Port() != "" {
+ r.URL.Host = r.URL.Host[:strings.LastIndex(r.URL.Host, ":")]
+ r.Host = r.Host[:strings.LastIndex(r.Host, ":")]
+ }
+ return nil
+}
+
+func dropSecretHeader(r *http.Request) error {
+ r.Header.Del("Secret")
+ return nil
+}
+
+func hideSecretBody(r *http.Request) error {
+ if r.Body != nil {
+ body := r.Body.(*Body)
+ body.Data = []byte("redacted")
+ }
+ return nil
+}
+
+func doNothing(b *bytes.Buffer) error {
+ return nil
+}
+
+func doRefresh(b *bytes.Buffer) error {
+ s := b.String()
+ b.Reset()
+ _, _ = b.WriteString(s)
+ return nil
+}
+
+func TestRecordReplay(t *testing.T) {
+ dir := t.TempDir()
+ file := dir + "/rr"
+
+ // 4 passes:
+ // 0: create
+ // 1: open
+ // 2: Open with -httprecord="r+"
+ // 3: Open with -httprecord=""
+ for pass := range 4 {
+ start := open
+ h := always555
+ *record = ""
+ switch pass {
+ case 0:
+ start = create
+ h = handler
+ case 2:
+ start = Open
+ *record = "r+"
+ h = handler
+ case 3:
+ start = Open
+ }
+ rr, err := start(file, http.DefaultTransport)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if rr.Recording() {
+ t.Log("RECORDING")
+ } else {
+ t.Log("REPLAYING")
+ }
+ rr.ScrubReq(dropPort, dropSecretHeader)
+ rr.ScrubReq(hideSecretBody)
+ rr.ScrubResp(doNothing, doRefresh)
+
+ mustNewRequest := func(method, url string, body io.Reader) *http.Request {
+ req, err := http.NewRequest(method, url, body)
+ if err != nil {
+ t.Helper()
+ t.Fatal(err)
+ }
+ return req
+ }
+
+ mustDo := func(req *http.Request, status int) {
+ resp, err := rr.Client().Do(req)
+ if err != nil {
+ t.Helper()
+ t.Fatal(err)
+ }
+ body, _ := io.ReadAll(resp.Body)
+ resp.Body.Close()
+ if resp.StatusCode != status {
+ t.Helper()
+ t.Fatalf("%v: %s\n%s", req.URL, resp.Status, body)
+ }
+ }
+
+ srv := httptest.NewServer(http.HandlerFunc(h))
+ defer srv.Close()
+
+ req := mustNewRequest("GET", srv.URL+"/myrequest", nil)
+ req.Header.Set("Secret", "key")
+ mustDo(req, 200)
+
+ req = mustNewRequest("POST", srv.URL+"/myrequest", strings.NewReader("my Secret"))
+ mustDo(req, 200)
+
+ req = mustNewRequest("GET", srv.URL+"/redirect", nil)
+ mustDo(req, 304)
+
+ if !rr.Recording() {
+ req = mustNewRequest("GET", srv.URL+"/uncached", nil)
+ resp, err := rr.Client().Do(req)
+ if err == nil {
+ body, _ := io.ReadAll(resp.Body)
+ t.Fatalf("%v: %s\n%s", req.URL, resp.Status, body)
+ }
+ }
+
+ if err := rr.Close(); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ data, err := os.ReadFile(file)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if strings.Contains(string(data), "Secret") {
+ t.Fatalf("rr file contains Secret:\n%s", data)
+ }
+}
+
+var badResponseTrace = []byte("httprr trace v1\n" +
+ "92 75\n" +
+ "GET http://127.0.0.1/myrequest HTTP/1.1\r\n" +
+ "Host: 127.0.0.1\r\n" +
+ "User-Agent: Go-http-client/1.1\r\n" +
+ "\r\n" +
+ "HZZP/1.1 200 OK\r\n" +
+ "Date: Wed, 12 Jun 2024 13:55:02 GMT\r\n" +
+ "Content-Length: 0\r\n" +
+ "\r\n")
+
+func TestErrors(t *testing.T) {
+ dir := t.TempDir()
+
+ makeTmpFile := func() string {
+ f, err := os.CreateTemp(dir, "TestErrors")
+ if err != nil {
+ t.Fatalf("failed to create tmp file for test: %v", err)
+ }
+ name := f.Name()
+ f.Close()
+ return name
+ }
+
+ // -httprecord regexp parsing
+ *record = "+"
+ if _, err := Open(makeTmpFile(), nil); err == nil || !strings.Contains(err.Error(), "invalid -httprecord flag") {
+ t.Errorf("did not diagnose bad -httprecord: err = %v", err)
+ }
+ *record = ""
+
+ // invalid httprr trace
+ if _, err := Open(makeTmpFile(), nil); err == nil || !strings.Contains(err.Error(), "not an httprr trace") {
+ t.Errorf("did not diagnose invalid httprr trace: err = %v", err)
+ }
+
+ // corrupt httprr trace
+ corruptTraceFile := makeTmpFile()
+ os.WriteFile(corruptTraceFile, []byte("httprr trace v1\ngarbage\n"), 0o666)
+ if _, err := Open(corruptTraceFile, nil); err == nil || !strings.Contains(err.Error(), "corrupt httprr trace") {
+ t.Errorf("did not diagnose invalid httprr trace: err = %v", err)
+ }
+
+ // os.Create error creating trace
+ if _, err := create("invalid\x00file", nil); err == nil {
+ t.Errorf("did not report failure from os.Create: err = %v", err)
+ }
+
+ // os.ReadAll error reading trace
+ if _, err := open("nonexistent", nil); err == nil {
+ t.Errorf("did not report failure from os.ReadFile: err = %v", err)
+ }
+
+ // error reading body
+ rr, err := create(makeTmpFile(), nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if _, err := rr.Client().Post("http://127.0.0.1/nonexist", "x/error", iotest.ErrReader(errors.New("MY ERROR"))); err == nil || !strings.Contains(err.Error(), "MY ERROR") {
+ t.Errorf("did not report failure from io.ReadAll(body): err = %v", err)
+ }
+
+ // error during request scrub
+ rr.ScrubReq(func(*http.Request) error { return errors.New("SCRUB ERROR") })
+ if _, err := rr.Client().Get("http://127.0.0.1/nonexist"); err == nil || !strings.Contains(err.Error(), "SCRUB ERROR") {
+ t.Errorf("did not report failure from scrub: err = %v", err)
+ }
+ rr.Close()
+
+ // error during response scrub
+ rr.ScrubResp(func(*bytes.Buffer) error { return errors.New("SCRUB ERROR") })
+ if _, err := rr.Client().Get("http://127.0.0.1/nonexist"); err == nil || !strings.Contains(err.Error(), "SCRUB ERROR") {
+ t.Errorf("did not report failure from scrub: err = %v", err)
+ }
+ rr.Close()
+
+ // error during rkey.WriteProxy
+ rr, err = create(makeTmpFile(), nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ rr.ScrubReq(func(req *http.Request) error {
+ req.URL = nil
+ req.Host = ""
+ return nil
+ })
+ rr.ScrubResp(func(b *bytes.Buffer) error {
+ b.Reset()
+ return nil
+ })
+ if _, err := rr.Client().Get("http://127.0.0.1/nonexist"); err == nil || !strings.Contains(err.Error(), "no Host or URL set") {
+ t.Errorf("did not report failure from rkey.WriteProxy: err = %v", err)
+ }
+ rr.Close()
+
+ // error during resp.Write
+ rr, err = create(makeTmpFile(), badRespTransport{})
+ if err != nil {
+ t.Fatal(err)
+ }
+ if _, err := rr.Client().Get("http://127.0.0.1/nonexist"); err == nil || !strings.Contains(err.Error(), "TRANSPORT ERROR") {
+ t.Errorf("did not report failure from resp.Write: err = %v", err)
+ }
+ rr.Close()
+
+ // error during Write logging request
+ srv := httptest.NewServer(http.HandlerFunc(always555))
+ defer srv.Close()
+ rr, err = create(makeTmpFile(), http.DefaultTransport)
+ if err != nil {
+ t.Fatal(err)
+ }
+ rr.ScrubReq(dropPort)
+ rr.record.Close() // cause write error
+ if _, err := rr.Client().Get(srv.URL + "/redirect"); err == nil || !strings.Contains(err.Error(), "file already closed") {
+ t.Errorf("did not report failure from record write: err = %v", err)
+ }
+ rr.writeErr = errors.New("BROKEN ERROR")
+ if _, err := rr.Client().Get(srv.URL + "/redirect"); err == nil || !strings.Contains(err.Error(), "BROKEN ERROR") {
+ t.Errorf("did not report previous write failure: err = %v", err)
+ }
+ if err := rr.Close(); err == nil || !strings.Contains(err.Error(), "BROKEN ERROR") {
+ t.Errorf("did not report write failure during close: err = %v", err)
+ }
+
+ // error during RoundTrip
+ rr, err = create(makeTmpFile(), errTransport{errors.New("TRANSPORT ERROR")})
+ if err != nil {
+ t.Fatal(err)
+ }
+ if _, err := rr.Client().Get(srv.URL); err == nil || !strings.Contains(err.Error(), "TRANSPORT ERROR") {
+ t.Errorf("did not report failure from transport: err = %v", err)
+ }
+ rr.Close()
+
+ // error during http.ReadResponse: trace is structurally okay but has malformed response inside
+ tmpFile := makeTmpFile()
+ if err := os.WriteFile(tmpFile, badResponseTrace, 0o666); err != nil {
+ t.Fatal(err)
+ }
+ rr, err = Open(tmpFile, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if _, err := rr.Client().Get("http://127.0.0.1/myrequest"); err == nil || !strings.Contains(err.Error(), "corrupt httprr trace:") {
+ t.Errorf("did not diagnose invalid httprr trace: err = %v", err)
+ }
+ rr.Close()
+}
+
+type errTransport struct{ err error }
+
+func (e errTransport) RoundTrip(req *http.Request) (*http.Response, error) {
+ return nil, e.err
+}
+
+type badRespTransport struct{}
+
+func (badRespTransport) RoundTrip(req *http.Request) (*http.Response, error) {
+ resp := new(http.Response)
+ resp.Body = io.NopCloser(iotest.ErrReader(errors.New("TRANSPORT ERROR")))
+ return resp, nil
+}
diff --git a/loop/agent.go b/loop/agent.go
new file mode 100644
index 0000000..ce362e6
--- /dev/null
+++ b/loop/agent.go
@@ -0,0 +1,1124 @@
+package loop
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "log/slog"
+ "net/http"
+ "os"
+ "os/exec"
+ "runtime/debug"
+ "slices"
+ "strings"
+ "sync"
+ "time"
+
+ "sketch.dev/ant"
+ "sketch.dev/claudetool"
+)
+
+const (
+ userCancelMessage = "user requested agent to stop handling responses"
+)
+
+type CodingAgent interface {
+ // Init initializes an agent inside a docker container.
+ Init(AgentInit) error
+
+ // Ready returns a channel closed after Init successfully called.
+ Ready() <-chan struct{}
+
+ // URL reports the HTTP URL of this agent.
+ URL() string
+
+ // UserMessage enqueues a message to the agent and returns immediately.
+ UserMessage(ctx context.Context, msg string)
+
+ // WaitForMessage blocks until the agent has a response to give.
+ // Use AgentMessage.EndOfTurn to help determine if you want to
+ // drain the agent.
+ WaitForMessage(ctx context.Context) AgentMessage
+
+ // Loop begins the agent loop returns only when ctx is cancelled.
+ Loop(ctx context.Context)
+
+ CancelInnerLoop(cause error)
+
+ CancelToolUse(toolUseID string, cause error) error
+
+ // Returns a subset of the agent's message history.
+ Messages(start int, end int) []AgentMessage
+
+ // Returns the current number of messages in the history
+ MessageCount() int
+
+ TotalUsage() ant.CumulativeUsage
+ OriginalBudget() ant.Budget
+
+ // WaitForMessageCount returns when the agent has at more than clientMessageCount messages or the context is done.
+ WaitForMessageCount(ctx context.Context, greaterThan int)
+
+ WorkingDir() string
+
+ // Diff returns a unified diff of changes made since the agent was instantiated.
+ // If commit is non-nil, it shows the diff for just that specific commit.
+ Diff(commit *string) (string, error)
+
+ // InitialCommit returns the Git commit hash that was saved when the agent was instantiated.
+ InitialCommit() string
+
+ // Title returns the current title of the conversation.
+ Title() string
+
+ // OS returns the operating system of the client.
+ OS() string
+}
+
+type CodingAgentMessageType string
+
+const (
+ UserMessageType CodingAgentMessageType = "user"
+ AgentMessageType CodingAgentMessageType = "agent"
+ ErrorMessageType CodingAgentMessageType = "error"
+ BudgetMessageType CodingAgentMessageType = "budget" // dedicated for "out of budget" errors
+ ToolUseMessageType CodingAgentMessageType = "tool"
+ CommitMessageType CodingAgentMessageType = "commit" // for displaying git commits
+ AutoMessageType CodingAgentMessageType = "auto" // for automated notifications like autoformatting
+
+ cancelToolUseMessage = "Stop responding to my previous message. Wait for me to ask you something else before attempting to use any more tools."
+)
+
+type AgentMessage struct {
+ Type CodingAgentMessageType `json:"type"`
+ // EndOfTurn indicates that the AI is done working and is ready for the next user input.
+ EndOfTurn bool `json:"end_of_turn"`
+
+ Content string `json:"content"`
+ ToolName string `json:"tool_name,omitempty"`
+ ToolInput string `json:"input,omitempty"`
+ ToolResult string `json:"tool_result,omitempty"`
+ ToolError bool `json:"tool_error,omitempty"`
+ ToolCallId string `json:"tool_call_id,omitempty"`
+
+ // ToolCalls is a list of all tool calls requested in this message (name and input pairs)
+ ToolCalls []ToolCall `json:"tool_calls,omitempty"`
+
+ // Commits is a list of git commits for a commit message
+ Commits []*GitCommit `json:"commits,omitempty"`
+
+ Timestamp time.Time `json:"timestamp"`
+ ConversationID string `json:"conversation_id"`
+ ParentConversationID *string `json:"parent_conversation_id,omitempty"`
+ Usage *ant.Usage `json:"usage,omitempty"`
+
+ // Message timing information
+ StartTime *time.Time `json:"start_time,omitempty"`
+ EndTime *time.Time `json:"end_time,omitempty"`
+ Elapsed *time.Duration `json:"elapsed,omitempty"`
+
+ // Turn duration - the time taken for a complete agent turn
+ TurnDuration *time.Duration `json:"turnDuration,omitempty"`
+
+ Idx int `json:"idx"`
+}
+
+// GitCommit represents a single git commit for a commit message
+type GitCommit struct {
+ Hash string `json:"hash"` // Full commit hash
+ Subject string `json:"subject"` // Commit subject line
+ Body string `json:"body"` // Full commit message body
+ PushedBranch string `json:"pushed_branch,omitempty"` // If set, this commit was pushed to this branch
+}
+
+// ToolCall represents a single tool call within an agent message
+type ToolCall struct {
+ Name string `json:"name"`
+ Input string `json:"input"`
+ ToolCallId string `json:"tool_call_id"`
+}
+
+func (a *AgentMessage) Attr() slog.Attr {
+ var attrs []any = []any{
+ slog.String("type", string(a.Type)),
+ }
+ if a.EndOfTurn {
+ attrs = append(attrs, slog.Bool("end_of_turn", a.EndOfTurn))
+ }
+ if a.Content != "" {
+ attrs = append(attrs, slog.String("content", a.Content))
+ }
+ if a.ToolName != "" {
+ attrs = append(attrs, slog.String("tool_name", a.ToolName))
+ }
+ if a.ToolInput != "" {
+ attrs = append(attrs, slog.String("tool_input", a.ToolInput))
+ }
+ if a.Elapsed != nil {
+ attrs = append(attrs, slog.Int64("elapsed", a.Elapsed.Nanoseconds()))
+ }
+ if a.TurnDuration != nil {
+ attrs = append(attrs, slog.Int64("turnDuration", a.TurnDuration.Nanoseconds()))
+ }
+ if a.ToolResult != "" {
+ attrs = append(attrs, slog.String("tool_result", a.ToolResult))
+ }
+ if a.ToolError {
+ attrs = append(attrs, slog.Bool("tool_error", a.ToolError))
+ }
+ if len(a.ToolCalls) > 0 {
+ toolCallAttrs := make([]any, 0, len(a.ToolCalls))
+ for i, tc := range a.ToolCalls {
+ toolCallAttrs = append(toolCallAttrs, slog.Group(
+ fmt.Sprintf("tool_call_%d", i),
+ slog.String("name", tc.Name),
+ slog.String("input", tc.Input),
+ ))
+ }
+ attrs = append(attrs, slog.Group("tool_calls", toolCallAttrs...))
+ }
+ if a.ConversationID != "" {
+ attrs = append(attrs, slog.String("convo_id", a.ConversationID))
+ }
+ if a.ParentConversationID != nil {
+ attrs = append(attrs, slog.String("parent_convo_id", *a.ParentConversationID))
+ }
+ if a.Usage != nil && !a.Usage.IsZero() {
+ attrs = append(attrs, a.Usage.Attr())
+ }
+ // TODO: timestamp, convo ids, idx?
+ return slog.Group("agent_message", attrs...)
+}
+
+func errorMessage(err error) AgentMessage {
+ // It's somewhat unknowable whether error messages are "end of turn" or not, but it seems like the best approach.
+ if os.Getenv(("DEBUG")) == "1" {
+ return AgentMessage{Type: ErrorMessageType, Content: err.Error() + " Stacktrace: " + string(debug.Stack()), EndOfTurn: true}
+ }
+
+ return AgentMessage{Type: ErrorMessageType, Content: err.Error(), EndOfTurn: true}
+}
+
+func budgetMessage(err error) AgentMessage {
+ return AgentMessage{Type: BudgetMessageType, Content: err.Error(), EndOfTurn: true}
+}
+
+// ConvoInterface defines the interface for conversation interactions
+type ConvoInterface interface {
+ CumulativeUsage() ant.CumulativeUsage
+ ResetBudget(ant.Budget)
+ OverBudget() error
+ SendMessage(message ant.Message) (*ant.MessageResponse, error)
+ SendUserTextMessage(s string, otherContents ...ant.Content) (*ant.MessageResponse, error)
+ ToolResultContents(ctx context.Context, resp *ant.MessageResponse) ([]ant.Content, error)
+ ToolResultCancelContents(resp *ant.MessageResponse) ([]ant.Content, error)
+ CancelToolUse(toolUseID string, cause error) error
+}
+
+type Agent struct {
+ convo ConvoInterface
+ config AgentConfig // config for this agent
+ workingDir string
+ repoRoot string // workingDir may be a subdir of repoRoot
+ url string
+ lastHEAD string // hash of the last HEAD that was pushed to the host (only when under docker)
+ initialCommit string // hash of the Git HEAD when the agent was instantiated or Init()
+ gitRemoteAddr string // HTTP URL of the host git repo (only when under docker)
+ ready chan struct{} // closed when the agent is initialized (only when under docker)
+ startedAt time.Time
+ originalBudget ant.Budget
+ title string
+ codereview *claudetool.CodeReviewer
+
+ // Time when the current turn started (reset at the beginning of InnerLoop)
+ startOfTurn time.Time
+
+ // Inbox - for messages from the user to the agent.
+ // sent on by UserMessage
+ // . e.g. when user types into the chat textarea
+ // read from by GatherMessages
+ inbox chan string
+
+ // Outbox
+ // sent on by pushToOutbox
+ // via OnToolResult and OnResponse callbacks
+ // read from by WaitForMessage
+ // called by termui inside its repl loop.
+ outbox chan AgentMessage
+
+ // protects cancelInnerLoop
+ cancelInnerLoopMu sync.Mutex
+ // cancels potentially long-running tool_use calls or chains of them
+ cancelInnerLoop context.CancelCauseFunc
+
+ // protects following
+ mu sync.Mutex
+
+ // Stores all messages for this agent
+ history []AgentMessage
+
+ listeners []chan struct{}
+
+ // Track git commits we've already seen (by hash)
+ seenCommits map[string]bool
+}
+
+func (a *Agent) URL() string { return a.url }
+
+// Title returns the current title of the conversation.
+// If no title has been set, returns an empty string.
+func (a *Agent) Title() string {
+ a.mu.Lock()
+ defer a.mu.Unlock()
+ return a.title
+}
+
+// OS returns the operating system of the client.
+func (a *Agent) OS() string {
+ return a.config.ClientGOOS
+}
+
+// SetTitle sets the title of the conversation.
+func (a *Agent) SetTitle(title string) {
+ a.mu.Lock()
+ defer a.mu.Unlock()
+ a.title = title
+ // Notify all listeners that the state has changed
+ for _, ch := range a.listeners {
+ close(ch)
+ }
+ a.listeners = a.listeners[:0]
+}
+
+// OnToolResult implements ant.Listener.
+func (a *Agent) OnToolResult(ctx context.Context, convo *ant.Convo, toolName string, toolInput json.RawMessage, content ant.Content, result *string, err error) {
+ m := AgentMessage{
+ Type: ToolUseMessageType,
+ Content: content.Text,
+ ToolResult: content.ToolResult,
+ ToolError: content.ToolError,
+ ToolName: toolName,
+ ToolInput: string(toolInput),
+ ToolCallId: content.ToolUseID,
+ StartTime: content.StartTime,
+ EndTime: content.EndTime,
+ }
+
+ // Calculate the elapsed time if both start and end times are set
+ if content.StartTime != nil && content.EndTime != nil {
+ elapsed := content.EndTime.Sub(*content.StartTime)
+ m.Elapsed = &elapsed
+ }
+
+ m.ConversationID = convo.ID
+ if convo.Parent != nil {
+ m.ParentConversationID = &convo.Parent.ID
+ }
+ a.pushToOutbox(ctx, m)
+}
+
+// OnRequest implements ant.Listener.
+func (a *Agent) OnRequest(ctx context.Context, convo *ant.Convo, msg *ant.Message) {
+ // No-op.
+ // We already get tool results from the above. We send user messages to the outbox in the agent loop.
+}
+
+// OnResponse implements ant.Listener. Responses contain messages from the LLM
+// that need to be displayed (as well as tool calls that we send along when
+// they're done). (It would be reasonable to also mention tool calls when they're
+// started, but we don't do that yet.)
+func (a *Agent) OnResponse(ctx context.Context, convo *ant.Convo, resp *ant.MessageResponse) {
+ endOfTurn := false
+ if resp.StopReason != ant.StopReasonToolUse {
+ endOfTurn = true
+ }
+ m := AgentMessage{
+ Type: AgentMessageType,
+ Content: collectTextContent(resp),
+ EndOfTurn: endOfTurn,
+ Usage: &resp.Usage,
+ StartTime: resp.StartTime,
+ EndTime: resp.EndTime,
+ }
+
+ // Extract any tool calls from the response
+ if resp.StopReason == ant.StopReasonToolUse {
+ var toolCalls []ToolCall
+ for _, part := range resp.Content {
+ if part.Type == "tool_use" {
+ toolCalls = append(toolCalls, ToolCall{
+ Name: part.ToolName,
+ Input: string(part.ToolInput),
+ ToolCallId: part.ID,
+ })
+ }
+ }
+ m.ToolCalls = toolCalls
+ }
+
+ // Calculate the elapsed time if both start and end times are set
+ if resp.StartTime != nil && resp.EndTime != nil {
+ elapsed := resp.EndTime.Sub(*resp.StartTime)
+ m.Elapsed = &elapsed
+ }
+
+ m.ConversationID = convo.ID
+ if convo.Parent != nil {
+ m.ParentConversationID = &convo.Parent.ID
+ }
+ a.pushToOutbox(ctx, m)
+}
+
+// WorkingDir implements CodingAgent.
+func (a *Agent) WorkingDir() string {
+ return a.workingDir
+}
+
+// MessageCount implements CodingAgent.
+func (a *Agent) MessageCount() int {
+ a.mu.Lock()
+ defer a.mu.Unlock()
+ return len(a.history)
+}
+
+// Messages implements CodingAgent.
+func (a *Agent) Messages(start int, end int) []AgentMessage {
+ a.mu.Lock()
+ defer a.mu.Unlock()
+ return slices.Clone(a.history[start:end])
+}
+
+func (a *Agent) OriginalBudget() ant.Budget {
+ return a.originalBudget
+}
+
+// AgentConfig contains configuration for creating a new Agent.
+type AgentConfig struct {
+ Context context.Context
+ AntURL string
+ APIKey string
+ HTTPC *http.Client
+ Budget ant.Budget
+ GitUsername string
+ GitEmail string
+ SessionID string
+ ClientGOOS string
+ ClientGOARCH string
+ UseAnthropicEdit bool
+}
+
+// NewAgent creates a new Agent.
+// It is not usable until Init() is called.
+func NewAgent(config AgentConfig) *Agent {
+ agent := &Agent{
+ config: config,
+ ready: make(chan struct{}),
+ inbox: make(chan string, 100),
+ outbox: make(chan AgentMessage, 100),
+ startedAt: time.Now(),
+ originalBudget: config.Budget,
+ seenCommits: make(map[string]bool),
+ }
+ return agent
+}
+
+type AgentInit struct {
+ WorkingDir string
+ NoGit bool // only for testing
+
+ InDocker bool
+ Commit string
+ GitRemoteAddr string
+ HostAddr string
+}
+
+func (a *Agent) Init(ini AgentInit) error {
+ ctx := a.config.Context
+ if ini.InDocker {
+ cmd := exec.CommandContext(ctx, "git", "stash")
+ cmd.Dir = ini.WorkingDir
+ if out, err := cmd.CombinedOutput(); err != nil {
+ return fmt.Errorf("git stash: %s: %v", out, err)
+ }
+ cmd = exec.CommandContext(ctx, "git", "fetch", ini.GitRemoteAddr)
+ cmd.Dir = ini.WorkingDir
+ if out, err := cmd.CombinedOutput(); err != nil {
+ return fmt.Errorf("git fetch: %s: %w", out, err)
+ }
+ cmd = exec.CommandContext(ctx, "git", "checkout", "-f", ini.Commit)
+ cmd.Dir = ini.WorkingDir
+ if out, err := cmd.CombinedOutput(); err != nil {
+ return fmt.Errorf("git checkout %s: %s: %w", ini.Commit, out, err)
+ }
+ a.lastHEAD = ini.Commit
+ a.gitRemoteAddr = ini.GitRemoteAddr
+ a.initialCommit = ini.Commit
+ if ini.HostAddr != "" {
+ a.url = "http://" + ini.HostAddr
+ }
+ }
+ a.workingDir = ini.WorkingDir
+
+ if !ini.NoGit {
+ repoRoot, err := repoRoot(ctx, a.workingDir)
+ if err != nil {
+ return fmt.Errorf("repoRoot: %w", err)
+ }
+ a.repoRoot = repoRoot
+
+ commitHash, err := resolveRef(ctx, a.repoRoot, "HEAD")
+ if err != nil {
+ return fmt.Errorf("resolveRef: %w", err)
+ }
+ a.initialCommit = commitHash
+
+ codereview, err := claudetool.NewCodeReviewer(ctx, a.repoRoot, a.initialCommit)
+ if err != nil {
+ return fmt.Errorf("Agent.Init: claudetool.NewCodeReviewer: %w", err)
+ }
+ a.codereview = codereview
+ }
+ a.lastHEAD = a.initialCommit
+ a.convo = a.initConvo()
+ close(a.ready)
+ return nil
+}
+
+// initConvo initializes the conversation.
+// It must not be called until all agent fields are initialized,
+// particularly workingDir and git.
+func (a *Agent) initConvo() *ant.Convo {
+ ctx := a.config.Context
+ convo := ant.NewConvo(ctx, a.config.APIKey)
+ if a.config.HTTPC != nil {
+ convo.HTTPC = a.config.HTTPC
+ }
+ if a.config.AntURL != "" {
+ convo.URL = a.config.AntURL
+ }
+ convo.PromptCaching = true
+ convo.Budget = a.config.Budget
+
+ var editPrompt string
+ if a.config.UseAnthropicEdit {
+ editPrompt = "Then use the str_replace_editor tool to make those edits. For short complete file replacements, you may use the bash tool with cat and heredoc stdin."
+ } else {
+ editPrompt = "Then use the patch tool to make those edits. Combine all edits to any given file into a single patch tool call."
+ }
+
+ convo.SystemPrompt = fmt.Sprintf(`
+You are an expert coding assistant and architect, with a specialty in Go.
+You are assisting the user to achieve their goals.
+
+Start by asking concise clarifying questions as needed.
+Once the intent is clear, work autonomously.
+
+Call the title tool early in the conversation to provide a brief summary of
+what the chat is about.
+
+Break down the overall goal into a series of smaller steps.
+(The first step is often: "Make a plan.")
+Then execute each step using tools.
+Update the plan if you have encountered problems or learned new information.
+
+When in doubt about a step, follow this broad workflow:
+
+- Think about how the current step fits into the overall plan.
+- Do research. Good tool choices: bash, think, keyword_search
+- Make edits.
+- Repeat.
+
+To make edits reliably and efficiently, first think about the intent of the edit,
+and what set of patches will achieve that intent.
+%s
+
+For renames or refactors, consider invoking gopls (via bash).
+
+The done tool provides a checklist of items you MUST verify and
+review before declaring that you are done. Before executing
+the done tool, run all the tools the done tool checklist asks
+for, including creating a git commit. Do not forget to run tests.
+
+<platform>
+%s/%s
+</platform>
+<pwd>
+%v
+</pwd>
+<git_root>
+%v
+</git_root>
+`, editPrompt, a.config.ClientGOOS, a.config.ClientGOARCH, a.workingDir, a.repoRoot)
+
+ // Register all tools with the conversation
+ // When adding, removing, or modifying tools here, double-check that the termui tool display
+ // template in termui/termui.go has pretty-printing support for all tools.
+ convo.Tools = []*ant.Tool{
+ claudetool.Bash, claudetool.Keyword,
+ claudetool.Think, a.titleTool(), makeDoneTool(a.codereview, a.config.GitUsername, a.config.GitEmail),
+ a.codereview.Tool(),
+ }
+ if a.config.UseAnthropicEdit {
+ convo.Tools = append(convo.Tools, claudetool.AnthropicEditTool)
+ } else {
+ convo.Tools = append(convo.Tools, claudetool.Patch)
+ }
+ convo.Listener = a
+ return convo
+}
+
+func (a *Agent) titleTool() *ant.Tool {
+ // titleTool creates the title tool that sets the conversation title.
+ title := &ant.Tool{
+ Name: "title",
+ Description: `Use this tool early in the conversation, BEFORE MAKING ANY GIT COMMITS, to summarize what the chat is about briefly.`,
+ InputSchema: json.RawMessage(`{
+ "type": "object",
+ "properties": {
+ "title": {
+ "type": "string",
+ "description": "A brief title summarizing what this chat is about"
+ }
+ },
+ "required": ["title"]
+}`),
+ Run: func(ctx context.Context, input json.RawMessage) (string, error) {
+ var params struct {
+ Title string `json:"title"`
+ }
+ if err := json.Unmarshal(input, ¶ms); err != nil {
+ return "", err
+ }
+ a.SetTitle(params.Title)
+ return fmt.Sprintf("Title set to: %s", params.Title), nil
+ },
+ }
+ return title
+}
+
+func (a *Agent) Ready() <-chan struct{} {
+ return a.ready
+}
+
+func (a *Agent) UserMessage(ctx context.Context, msg string) {
+ a.pushToOutbox(ctx, AgentMessage{Type: UserMessageType, Content: msg})
+ a.inbox <- msg
+}
+
+func (a *Agent) WaitForMessage(ctx context.Context) AgentMessage {
+ // TODO: Should this drain any outbox messages in case there are multiple?
+ select {
+ case msg := <-a.outbox:
+ return msg
+ case <-ctx.Done():
+ return errorMessage(ctx.Err())
+ }
+}
+
+func (a *Agent) CancelToolUse(toolUseID string, cause error) error {
+ return a.convo.CancelToolUse(toolUseID, cause)
+}
+
+func (a *Agent) CancelInnerLoop(cause error) {
+ a.cancelInnerLoopMu.Lock()
+ defer a.cancelInnerLoopMu.Unlock()
+ if a.cancelInnerLoop != nil {
+ a.cancelInnerLoop(cause)
+ }
+}
+
+func (a *Agent) Loop(ctxOuter context.Context) {
+ for {
+ select {
+ case <-ctxOuter.Done():
+ return
+ default:
+ ctxInner, cancel := context.WithCancelCause(ctxOuter)
+ a.cancelInnerLoopMu.Lock()
+ // Set .cancelInnerLoop so the user can cancel whatever is happening
+ // inside InnerLoop(ctxInner) without canceling this outer Loop execution.
+ // This CancelInnerLoop func is intended be called from other goroutines,
+ // hence the mutex.
+ a.cancelInnerLoop = cancel
+ a.cancelInnerLoopMu.Unlock()
+ a.InnerLoop(ctxInner)
+ cancel(nil)
+ }
+ }
+}
+
+func (a *Agent) pushToOutbox(ctx context.Context, m AgentMessage) {
+ if m.Timestamp.IsZero() {
+ m.Timestamp = time.Now()
+ }
+
+ // If this is an end-of-turn message, calculate the turn duration and add it to the message
+ if m.EndOfTurn && m.Type == AgentMessageType {
+ turnDuration := time.Since(a.startOfTurn)
+ m.TurnDuration = &turnDuration
+ slog.InfoContext(ctx, "Turn completed", "turnDuration", turnDuration)
+ }
+
+ slog.InfoContext(ctx, "agent message", m.Attr())
+
+ a.mu.Lock()
+ defer a.mu.Unlock()
+ m.Idx = len(a.history)
+ a.history = append(a.history, m)
+ a.outbox <- m
+
+ // Notify all listeners:
+ for _, ch := range a.listeners {
+ close(ch)
+ }
+ a.listeners = a.listeners[:0]
+}
+
+func (a *Agent) GatherMessages(ctx context.Context, block bool) ([]ant.Content, error) {
+ var m []ant.Content
+ if block {
+ select {
+ case <-ctx.Done():
+ return m, ctx.Err()
+ case msg := <-a.inbox:
+ m = append(m, ant.Content{Type: "text", Text: msg})
+ }
+ }
+ for {
+ select {
+ case msg := <-a.inbox:
+ m = append(m, ant.Content{Type: "text", Text: msg})
+ default:
+ return m, nil
+ }
+ }
+}
+
+func (a *Agent) InnerLoop(ctx context.Context) {
+ // Reset the start of turn time
+ a.startOfTurn = time.Now()
+
+ // Wait for at least one message from the user.
+ msgs, err := a.GatherMessages(ctx, true)
+ if err != nil { // e.g. the context was canceled while blocking in GatherMessages
+ return
+ }
+ // We do this as we go, but let's also do it at the end of the turn
+ defer func() {
+ if _, err := a.handleGitCommits(ctx); err != nil {
+ // Just log the error, don't stop execution
+ slog.WarnContext(ctx, "Failed to check for new git commits", "error", err)
+ }
+ }()
+
+ userMessage := ant.Message{
+ Role: "user",
+ Content: msgs,
+ }
+ // convo.SendMessage does the actual network call to send this to anthropic. This blocks until the response is ready.
+ // TODO: pass ctx to SendMessage, and figure out how to square that ctx with convo's own .Ctx. Who owns the scope of this call?
+ resp, err := a.convo.SendMessage(userMessage)
+ if err != nil {
+ a.pushToOutbox(ctx, errorMessage(err))
+ return
+ }
+ for {
+ // TODO: here and below where we check the budget,
+ // we should review the UX: is it clear what happened?
+ // is it clear how to resume?
+ // should we let the user set a new budget?
+ if err := a.overBudget(ctx); err != nil {
+ return
+ }
+ if resp.StopReason != ant.StopReasonToolUse {
+ break
+ }
+ var results []ant.Content
+ cancelled := false
+ select {
+ case <-ctx.Done():
+ // Don't actually run any of the tools, but rather build a response
+ // for each tool_use message letting the LLM know that user canceled it.
+ results, err = a.convo.ToolResultCancelContents(resp)
+ if err != nil {
+ a.pushToOutbox(ctx, errorMessage(err))
+ }
+ cancelled = true
+ default:
+ ctx = claudetool.WithWorkingDir(ctx, a.workingDir)
+ // fall-through, when the user has not canceled the inner loop:
+ results, err = a.convo.ToolResultContents(ctx, resp)
+ if ctx.Err() != nil { // e.g. the user canceled the operation
+ cancelled = true
+ } else if err != nil {
+ a.pushToOutbox(ctx, errorMessage(err))
+ }
+ }
+
+ // Check for git commits. Currently we do this here, after we collect
+ // tool results, since that's when we know commits could have happened.
+ // We could instead do this when the turn ends, but I think it makes sense
+ // to do this as we go.
+ newCommits, err := a.handleGitCommits(ctx)
+ if err != nil {
+ // Just log the error, don't stop execution
+ slog.WarnContext(ctx, "Failed to check for new git commits", "error", err)
+ }
+ var autoqualityMessages []string
+ if len(newCommits) == 1 {
+ formatted := a.codereview.Autoformat(ctx)
+ if len(formatted) > 0 {
+ msg := fmt.Sprintf(`
+I ran autoformatters and they updated these files:
+
+%s
+
+Please amend your latest git commit with these changes and then continue with what you were doing.`,
+ strings.Join(formatted, "\n"),
+ )[1:]
+ a.pushToOutbox(ctx, AgentMessage{
+ Type: AutoMessageType,
+ Content: msg,
+ Timestamp: time.Now(),
+ })
+ autoqualityMessages = append(autoqualityMessages, msg)
+ }
+ }
+
+ if err := a.overBudget(ctx); err != nil {
+ return
+ }
+
+ // Include, along with the tool results (which must go first for whatever reason),
+ // any messages that the user has sent along while the tool_use was executing concurrently.
+ msgs, err = a.GatherMessages(ctx, false)
+ if err != nil {
+ return
+ }
+ // Inject any auto-generated messages from quality checks.
+ for _, msg := range autoqualityMessages {
+ msgs = append(msgs, ant.Content{Type: "text", Text: msg})
+ }
+ if cancelled {
+ msgs = append(msgs, ant.Content{Type: "text", Text: cancelToolUseMessage})
+ // EndOfTurn is false here so that the client of this agent keeps processing
+ // messages from WaitForMessage() and gets the response from the LLM (usually
+ // something like "okay, I'll wait further instructions", but the user should
+ // be made aware of it regardless).
+ a.pushToOutbox(ctx, AgentMessage{Type: ErrorMessageType, Content: userCancelMessage, EndOfTurn: false})
+ } else if err := a.convo.OverBudget(); err != nil {
+ budgetMsg := "We've exceeded our budget. Please ask the user to confirm before continuing by ending the turn."
+ msgs = append(msgs, ant.Content{Type: "text", Text: budgetMsg})
+ a.pushToOutbox(ctx, budgetMessage(fmt.Errorf("warning: %w (ask to keep trying, if you'd like)", err)))
+ }
+ results = append(results, msgs...)
+ resp, err = a.convo.SendMessage(ant.Message{
+ Role: "user",
+ Content: results,
+ })
+ if err != nil {
+ a.pushToOutbox(ctx, errorMessage(fmt.Errorf("error: failed to continue conversation: %s", err.Error())))
+ break
+ }
+ if cancelled {
+ return
+ }
+ }
+}
+
+func (a *Agent) overBudget(ctx context.Context) error {
+ if err := a.convo.OverBudget(); err != nil {
+ m := budgetMessage(err)
+ m.Content = m.Content + "\n\nBudget reset."
+ a.pushToOutbox(ctx, budgetMessage(err))
+ a.convo.ResetBudget(a.originalBudget)
+ return err
+ }
+ return nil
+}
+
+func collectTextContent(msg *ant.MessageResponse) string {
+ // Collect all text content
+ var allText strings.Builder
+ for _, content := range msg.Content {
+ if content.Type == "text" && content.Text != "" {
+ if allText.Len() > 0 {
+ allText.WriteString("\n\n")
+ }
+ allText.WriteString(content.Text)
+ }
+ }
+ return allText.String()
+}
+
+func (a *Agent) TotalUsage() ant.CumulativeUsage {
+ a.mu.Lock()
+ defer a.mu.Unlock()
+ return a.convo.CumulativeUsage()
+}
+
+// WaitForMessageCount returns when the agent has at more than clientMessageCount messages or the context is done.
+func (a *Agent) WaitForMessageCount(ctx context.Context, greaterThan int) {
+ for a.MessageCount() <= greaterThan {
+ a.mu.Lock()
+ ch := make(chan struct{})
+ // Deletion happens when we notify.
+ a.listeners = append(a.listeners, ch)
+ a.mu.Unlock()
+
+ select {
+ case <-ctx.Done():
+ return
+ case <-ch:
+ continue
+ }
+ }
+}
+
+// Diff returns a unified diff of changes made since the agent was instantiated.
+func (a *Agent) Diff(commit *string) (string, error) {
+ if a.initialCommit == "" {
+ return "", fmt.Errorf("no initial commit reference available")
+ }
+
+ // Find the repository root
+ ctx := context.Background()
+
+ // If a specific commit hash is provided, show just that commit's changes
+ if commit != nil && *commit != "" {
+ // Validate that the commit looks like a valid git SHA
+ if !isValidGitSHA(*commit) {
+ return "", fmt.Errorf("invalid git commit SHA format: %s", *commit)
+ }
+
+ // Get the diff for just this commit
+ cmd := exec.CommandContext(ctx, "git", "show", "--unified=10", *commit)
+ cmd.Dir = a.repoRoot
+ output, err := cmd.CombinedOutput()
+ if err != nil {
+ return "", fmt.Errorf("failed to get diff for commit %s: %w - %s", *commit, err, string(output))
+ }
+ return string(output), nil
+ }
+
+ // Otherwise, get the diff between the initial commit and the current state using exec.Command
+ cmd := exec.CommandContext(ctx, "git", "diff", "--unified=10", a.initialCommit)
+ cmd.Dir = a.repoRoot
+ output, err := cmd.CombinedOutput()
+ if err != nil {
+ return "", fmt.Errorf("failed to get diff: %w - %s", err, string(output))
+ }
+
+ return string(output), nil
+}
+
+// InitialCommit returns the Git commit hash that was saved when the agent was instantiated.
+func (a *Agent) InitialCommit() string {
+ return a.initialCommit
+}
+
+// handleGitCommits() highlights new commits to the user. When running
+// under docker, new HEADs are pushed to a branch according to the title.
+func (a *Agent) handleGitCommits(ctx context.Context) ([]*GitCommit, error) {
+ if a.repoRoot == "" {
+ return nil, nil
+ }
+
+ head, err := resolveRef(ctx, a.repoRoot, "HEAD")
+ if err != nil {
+ return nil, err
+ }
+ if head == a.lastHEAD {
+ return nil, nil // nothing to do
+ }
+ defer func() {
+ a.lastHEAD = head
+ }()
+
+ // Get new commits. Because it's possible that the agent does rebases, fixups, and
+ // so forth, we use, as our fixed point, the "initialCommit", and we limit ourselves
+ // to the last 100 commits.
+ var commits []*GitCommit
+
+ // Get commits since the initial commit
+ // Format: <hash>\0<subject>\0<body>\0
+ // This uses NULL bytes as separators to avoid issues with newlines in commit messages
+ // Limit to 100 commits to avoid overwhelming the user
+ cmd := exec.CommandContext(ctx, "git", "log", "-n", "100", "--pretty=format:%H%x00%s%x00%b%x00", "^"+a.initialCommit, head)
+ cmd.Dir = a.repoRoot
+ output, err := cmd.Output()
+ if err != nil {
+ return nil, fmt.Errorf("failed to get git log: %w", err)
+ }
+
+ // Parse git log output and filter out already seen commits
+ parsedCommits := parseGitLog(string(output))
+
+ var headCommit *GitCommit
+
+ // Filter out commits we've already seen
+ for _, commit := range parsedCommits {
+ if commit.Hash == head {
+ headCommit = &commit
+ }
+
+ // Skip if we've seen this commit before. If our head has changed, always include that.
+ if a.seenCommits[commit.Hash] && commit.Hash != head {
+ continue
+ }
+
+ // Mark this commit as seen
+ a.seenCommits[commit.Hash] = true
+
+ // Add to our list of new commits
+ commits = append(commits, &commit)
+ }
+
+ if a.gitRemoteAddr != "" {
+ if headCommit == nil {
+ // I think this can only happen if we have a bug or if there's a race.
+ headCommit = &GitCommit{}
+ headCommit.Hash = head
+ headCommit.Subject = "unknown"
+ commits = append(commits, headCommit)
+ }
+
+ cleanTitle := titleToBranch(a.title)
+ if cleanTitle == "" {
+ cleanTitle = a.config.SessionID
+ }
+ branch := "sketch/" + cleanTitle
+
+ // TODO: I don't love the force push here. We could see if the push is a fast-forward, and,
+ // if it's not, we could make a backup with a unique name (perhaps append a timestamp) and
+ // then use push with lease to replace.
+ cmd = exec.Command("git", "push", "--force", a.gitRemoteAddr, "HEAD:refs/heads/"+branch)
+ cmd.Dir = a.workingDir
+ if out, err := cmd.CombinedOutput(); err != nil {
+ a.pushToOutbox(ctx, errorMessage(fmt.Errorf("git push to host: %s: %v", out, err)))
+ } else {
+ headCommit.PushedBranch = branch
+ }
+ }
+
+ // If we found new commits, create a message
+ if len(commits) > 0 {
+ msg := AgentMessage{
+ Type: CommitMessageType,
+ Timestamp: time.Now(),
+ Commits: commits,
+ }
+ a.pushToOutbox(ctx, msg)
+ }
+ return commits, nil
+}
+
+func titleToBranch(s string) string {
+ // Convert to lowercase
+ s = strings.ToLower(s)
+
+ // Replace spaces with hyphens
+ s = strings.ReplaceAll(s, " ", "-")
+
+ // Remove any character that isn't a-z or hyphen
+ var result strings.Builder
+ for _, r := range s {
+ if (r >= 'a' && r <= 'z') || r == '-' {
+ result.WriteRune(r)
+ }
+ }
+ return result.String()
+}
+
+// parseGitLog parses the output of git log with format '%H%x00%s%x00%b%x00'
+// and returns an array of GitCommit structs.
+func parseGitLog(output string) []GitCommit {
+ var commits []GitCommit
+
+ // No output means no commits
+ if len(output) == 0 {
+ return commits
+ }
+
+ // Split by NULL byte
+ parts := strings.Split(output, "\x00")
+
+ // Process in triplets (hash, subject, body)
+ for i := 0; i < len(parts); i++ {
+ // Skip empty parts
+ if parts[i] == "" {
+ continue
+ }
+
+ // This should be a hash
+ hash := strings.TrimSpace(parts[i])
+
+ // Make sure we have at least a subject part available
+ if i+1 >= len(parts) {
+ break // No more parts available
+ }
+
+ // Get the subject
+ subject := strings.TrimSpace(parts[i+1])
+
+ // Get the body if available
+ body := ""
+ if i+2 < len(parts) {
+ body = strings.TrimSpace(parts[i+2])
+ }
+
+ // Skip to the next triplet
+ i += 2
+
+ commits = append(commits, GitCommit{
+ Hash: hash,
+ Subject: subject,
+ Body: body,
+ })
+ }
+
+ return commits
+}
+
+func repoRoot(ctx context.Context, dir string) (string, error) {
+ cmd := exec.CommandContext(ctx, "git", "rev-parse", "--show-toplevel")
+ stderr := new(strings.Builder)
+ cmd.Stderr = stderr
+ cmd.Dir = dir
+ out, err := cmd.Output()
+ if err != nil {
+ return "", fmt.Errorf("git rev-parse failed: %w\n%s", err, stderr)
+ }
+ return strings.TrimSpace(string(out)), nil
+}
+
+func resolveRef(ctx context.Context, dir, refName string) (string, error) {
+ cmd := exec.CommandContext(ctx, "git", "rev-parse", refName)
+ stderr := new(strings.Builder)
+ cmd.Stderr = stderr
+ cmd.Dir = dir
+ out, err := cmd.Output()
+ if err != nil {
+ return "", fmt.Errorf("git rev-parse failed: %w\n%s", err, stderr)
+ }
+ // TODO: validate that out is valid hex
+ return strings.TrimSpace(string(out)), nil
+}
+
+// isValidGitSHA validates if a string looks like a valid git SHA hash.
+// Git SHAs are hexadecimal strings of at least 4 characters but typically 7, 8, or 40 characters.
+func isValidGitSHA(sha string) bool {
+ // Git SHA must be a hexadecimal string with at least 4 characters
+ if len(sha) < 4 || len(sha) > 40 {
+ return false
+ }
+
+ // Check if the string only contains hexadecimal characters
+ for _, char := range sha {
+ if !(char >= '0' && char <= '9') && !(char >= 'a' && char <= 'f') && !(char >= 'A' && char <= 'F') {
+ return false
+ }
+ }
+
+ return true
+}
diff --git a/loop/agent_git_test.go b/loop/agent_git_test.go
new file mode 100644
index 0000000..399943b
--- /dev/null
+++ b/loop/agent_git_test.go
@@ -0,0 +1,263 @@
+package loop
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+ "testing"
+ "time"
+)
+
+// TestGitCommitTracking tests the git commit tracking functionality
+func TestGitCommitTracking(t *testing.T) {
+ // Create a temporary directory for our test git repo
+ tempDir := t.TempDir() // Automatically cleaned up when the test completes
+
+ // Initialize a git repo in the temp directory
+ cmd := exec.Command("git", "init")
+ cmd.Dir = tempDir
+ if err := cmd.Run(); err != nil {
+ t.Fatalf("Failed to initialize git repo: %v", err)
+ }
+
+ // Configure git user for commits
+ cmd = exec.Command("git", "config", "user.name", "Test User")
+ cmd.Dir = tempDir
+ if err := cmd.Run(); err != nil {
+ t.Fatalf("Failed to configure git user name: %v", err)
+ }
+
+ cmd = exec.Command("git", "config", "user.email", "test@example.com")
+ cmd.Dir = tempDir
+ if err := cmd.Run(); err != nil {
+ t.Fatalf("Failed to configure git user email: %v", err)
+ }
+
+ // Make an initial commit
+ testFile := filepath.Join(tempDir, "test.txt")
+ if err := os.WriteFile(testFile, []byte("initial content\n"), 0o644); err != nil {
+ t.Fatalf("Failed to write file: %v", err)
+ }
+
+ cmd = exec.Command("git", "add", "test.txt")
+ cmd.Dir = tempDir
+ if err := cmd.Run(); err != nil {
+ t.Fatalf("Failed to add file: %v", err)
+ }
+
+ cmd = exec.Command("git", "commit", "-m", "Initial commit")
+ cmd.Dir = tempDir
+ if err := cmd.Run(); err != nil {
+ t.Fatalf("Failed to create initial commit: %v", err)
+ }
+
+ // Get the initial commit hash
+ cmd = exec.Command("git", "rev-parse", "HEAD")
+ cmd.Dir = tempDir
+ initialCommitOutput, err := cmd.Output()
+ if err != nil {
+ t.Fatalf("Failed to get initial commit hash: %v", err)
+ }
+ initialCommit := strings.TrimSpace(string(initialCommitOutput))
+
+ // Create agent with the temp repo
+ agent := &Agent{
+ workingDir: tempDir,
+ repoRoot: tempDir, // Set repoRoot to same as workingDir for this test
+ outbox: make(chan AgentMessage, 100),
+ seenCommits: make(map[string]bool),
+ initialCommit: initialCommit,
+ }
+
+ // Make a new commit
+ if err := os.WriteFile(testFile, []byte("updated content\n"), 0o644); err != nil {
+ t.Fatalf("Failed to update file: %v", err)
+ }
+
+ cmd = exec.Command("git", "add", "test.txt")
+ cmd.Dir = tempDir
+ if err := cmd.Run(); err != nil {
+ t.Fatalf("Failed to add updated file: %v", err)
+ }
+
+ cmd = exec.Command("git", "commit", "-m", "Second commit\n\nThis commit has a multi-line message\nwith details about the changes.")
+ cmd.Dir = tempDir
+ if err := cmd.Run(); err != nil {
+ t.Fatalf("Failed to create second commit: %v", err)
+ }
+
+ // Call handleGitCommits and verify we get a commit message
+ ctx := context.Background()
+ _, err = agent.handleGitCommits(ctx)
+ if err != nil {
+ t.Fatalf("handleGitCommits failed: %v", err)
+ }
+
+ // Check if we received a commit message
+ var commitMsg AgentMessage
+ select {
+ case commitMsg = <-agent.outbox:
+ // We got a message
+ case <-time.After(500 * time.Millisecond):
+ t.Fatal("Timed out waiting for commit message")
+ }
+
+ // Verify the commit message
+ if commitMsg.Type != CommitMessageType {
+ t.Errorf("Expected message type %s, got %s", CommitMessageType, commitMsg.Type)
+ }
+
+ if len(commitMsg.Commits) < 1 {
+ t.Fatalf("Expected at least 1 commit, got %d", len(commitMsg.Commits))
+ }
+
+ // Find the second commit
+ var commit *GitCommit
+ found := false
+ for _, c := range commitMsg.Commits {
+ if strings.HasPrefix(c.Subject, "Second commit") {
+ commit = c
+ found = true
+ break
+ }
+ }
+
+ if !found {
+ t.Fatalf("Could not find 'Second commit' in commits")
+ }
+ if !strings.HasPrefix(commit.Subject, "Second commit") {
+ t.Errorf("Expected commit subject 'Second commit', got '%s'", commit.Subject)
+ }
+
+ if !strings.Contains(commit.Body, "multi-line message") {
+ t.Errorf("Expected body to contain 'multi-line message', got '%s'", commit.Body)
+ }
+
+ // Test with many commits
+ if testing.Short() {
+ t.Skip("Skipping multiple commits test in short mode")
+ }
+
+ // Make multiple commits (more than 100)
+ for i := 0; i < 110; i++ {
+ newContent := []byte(fmt.Sprintf("content update %d\n", i))
+ if err := os.WriteFile(testFile, newContent, 0o644); err != nil {
+ t.Fatalf("Failed to update file: %v", err)
+ }
+
+ cmd = exec.Command("git", "add", "test.txt")
+ cmd.Dir = tempDir
+ if err := cmd.Run(); err != nil {
+ t.Fatalf("Failed to add updated file: %v", err)
+ }
+
+ cmd = exec.Command("git", "commit", "-m", fmt.Sprintf("Commit %d", i+3))
+ cmd.Dir = tempDir
+ if err := cmd.Run(); err != nil {
+ t.Fatalf("Failed to create commit %d: %v", i+3, err)
+ }
+ }
+
+ // Reset the outbox channel and seen commits map
+ agent.outbox = make(chan AgentMessage, 100)
+ agent.seenCommits = make(map[string]bool)
+
+ // Call handleGitCommits again - it should still work but only show at most 100 commits
+ _, err = agent.handleGitCommits(ctx)
+ if err != nil {
+ t.Fatalf("handleGitCommits failed: %v", err)
+ }
+
+ // Check if we received a commit message
+ select {
+ case commitMsg = <-agent.outbox:
+ // We got a message
+ case <-time.After(500 * time.Millisecond):
+ t.Fatal("Timed out waiting for commit message")
+ }
+
+ // Should have at most 100 commits due to the -n 100 limit in git log
+ if len(commitMsg.Commits) > 100 {
+ t.Errorf("Expected at most 100 commits, got %d", len(commitMsg.Commits))
+ }
+
+ if len(commitMsg.Commits) < 50 {
+ t.Errorf("Expected at least 50 commits, but only got %d", len(commitMsg.Commits))
+ }
+
+ t.Logf("Received %d commits out of 112 total", len(commitMsg.Commits))
+}
+
+// TestParseGitLog tests the parseGitLog function
+func TestParseGitLog(t *testing.T) {
+ tests := []struct {
+ name string
+ input string
+ expected []GitCommit
+ }{
+ {
+ name: "Empty input",
+ input: "",
+ expected: []GitCommit{},
+ },
+ {
+ name: "Single commit",
+ input: "abcdef1234567890\x00Initial commit\x00This is the first commit\x00",
+ expected: []GitCommit{
+ {Hash: "abcdef1234567890", Subject: "Initial commit", Body: "This is the first commit"},
+ },
+ },
+ {
+ name: "Multiple commits",
+ input: "abcdef1234567890\x00Initial commit\x00This is the first commit\x00" +
+ "fedcba0987654321\x00Second commit\x00This is the second commit\x00" +
+ "123456abcdef7890\x00Third commit\x00This is the third commit\x00",
+ expected: []GitCommit{
+ {Hash: "abcdef1234567890", Subject: "Initial commit", Body: "This is the first commit"},
+ {Hash: "fedcba0987654321", Subject: "Second commit", Body: "This is the second commit"},
+ {Hash: "123456abcdef7890", Subject: "Third commit", Body: "This is the third commit"},
+ },
+ },
+ {
+ name: "Commit with multi-line body",
+ input: "abcdef1234567890\x00Commit with multi-line body\x00This is a commit\nwith a multi-line\nbody message\x00",
+ expected: []GitCommit{
+ {Hash: "abcdef1234567890", Subject: "Commit with multi-line body", Body: "This is a commit\nwith a multi-line\nbody message"},
+ },
+ },
+ {
+ name: "Commit with empty body",
+ input: "abcdef1234567890\x00Commit with empty body\x00\x00",
+ expected: []GitCommit{
+ {Hash: "abcdef1234567890", Subject: "Commit with empty body", Body: ""},
+ },
+ },
+ {
+ name: "Empty parts removed",
+ input: "\x00abcdef1234567890\x00Initial commit\x00This is the first commit\x00\x00",
+ expected: []GitCommit{
+ {Hash: "abcdef1234567890", Subject: "Initial commit", Body: "This is the first commit"},
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ actual := parseGitLog(tt.input)
+
+ if len(actual) != len(tt.expected) {
+ t.Fatalf("Expected %d commits, got %d", len(tt.expected), len(actual))
+ }
+
+ for i, commit := range actual {
+ expected := tt.expected[i]
+ if commit.Hash != expected.Hash || commit.Subject != expected.Subject || commit.Body != expected.Body {
+ t.Errorf("Commit %d doesn't match:\nExpected: %+v\nGot: %+v", i, expected, commit)
+ }
+ }
+ })
+ }
+}
diff --git a/loop/agent_test.go b/loop/agent_test.go
new file mode 100644
index 0000000..b9f9994
--- /dev/null
+++ b/loop/agent_test.go
@@ -0,0 +1,154 @@
+package loop
+
+import (
+ "context"
+ "net/http"
+ "os"
+ "strings"
+ "testing"
+ "time"
+
+ "sketch.dev/ant"
+ "sketch.dev/httprr"
+)
+
+// TestAgentLoop tests that the Agent loop functionality works correctly.
+// It uses the httprr package to record HTTP interactions for replay in tests.
+// When failing, rebuild with "go test ./sketch/loop -run TestAgentLoop -httprecord .*agent_loop.*"
+// as necessary.
+func TestAgentLoop(t *testing.T) {
+ ctx := context.Background()
+
+ // Setup httprr recorder
+ rrPath := "testdata/agent_loop.httprr"
+ rr, err := httprr.Open(rrPath, http.DefaultTransport)
+ if err != nil && !os.IsNotExist(err) {
+ t.Fatal(err)
+ }
+
+ if rr.Recording() {
+ // Skip the test if API key is not available
+ if os.Getenv("ANTHROPIC_API_KEY") == "" {
+ t.Fatal("ANTHROPIC_API_KEY not set, required for HTTP recording")
+ }
+ }
+
+ // Create HTTP client
+ var client *http.Client
+ if rr != nil {
+ // Scrub API keys from requests for security
+ rr.ScrubReq(func(req *http.Request) error {
+ req.Header.Del("x-api-key")
+ req.Header.Del("anthropic-api-key")
+ return nil
+ })
+ client = rr.Client()
+ } else {
+ client = &http.Client{Transport: http.DefaultTransport}
+ }
+
+ // Create a new agent with the httprr client
+ origWD, err := os.Getwd()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := os.Chdir("/"); err != nil {
+ t.Fatal(err)
+ }
+ budget := ant.Budget{MaxResponses: 100}
+ wd, err := os.Getwd()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ cfg := AgentConfig{
+ Context: ctx,
+ APIKey: os.Getenv("ANTHROPIC_API_KEY"),
+ HTTPC: client,
+ Budget: budget,
+ GitUsername: "Test Agent",
+ GitEmail: "totallyhuman@sketch.dev",
+ SessionID: "test-session-id",
+ ClientGOOS: "linux",
+ ClientGOARCH: "amd64",
+ }
+ agent := NewAgent(cfg)
+ if err := os.Chdir(origWD); err != nil {
+ t.Fatal(err)
+ }
+ err = agent.Init(AgentInit{WorkingDir: wd, NoGit: true})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Setup a test message that will trigger a simple, predictable response
+ userMessage := "What tools are available to you? Please just list them briefly."
+
+ // Send the message to the agent
+ agent.UserMessage(ctx, userMessage)
+
+ // Process a single loop iteration to avoid long-running tests
+ agent.InnerLoop(ctx)
+
+ // Collect responses with a timeout
+ var responses []AgentMessage
+ timeout := time.After(10 * time.Second)
+ done := false
+
+ for !done {
+ select {
+ case <-timeout:
+ t.Log("Timeout reached while waiting for agent responses")
+ done = true
+ default:
+ select {
+ case msg := <-agent.outbox:
+ t.Logf("Received message: Type=%s, EndOfTurn=%v, Content=%q", msg.Type, msg.EndOfTurn, msg.Content)
+ responses = append(responses, msg)
+ if msg.EndOfTurn {
+ done = true
+ }
+ default:
+ // No more messages available right now
+ time.Sleep(100 * time.Millisecond)
+ }
+ }
+ }
+
+ // Verify we got at least one response
+ if len(responses) == 0 {
+ t.Fatal("No responses received from agent")
+ }
+
+ // Log the received responses for debugging
+ t.Logf("Received %d responses", len(responses))
+
+ // Find the final agent response (with EndOfTurn=true)
+ var finalResponse *AgentMessage
+ for i := range responses {
+ if responses[i].Type == AgentMessageType && responses[i].EndOfTurn {
+ finalResponse = &responses[i]
+ break
+ }
+ }
+
+ // Verify we got a final agent response
+ if finalResponse == nil {
+ t.Fatal("No final agent response received")
+ }
+
+ // Check that the response contains tools information
+ if !strings.Contains(strings.ToLower(finalResponse.Content), "tool") {
+ t.Error("Expected response to mention tools")
+ }
+
+ // Count how many tool use messages we received
+ toolUseCount := 0
+ for _, msg := range responses {
+ if msg.Type == ToolUseMessageType {
+ toolUseCount++
+ }
+ }
+
+ t.Logf("Agent used %d tools in its response", toolUseCount)
+}
diff --git a/loop/agent_user_cancel_test.go b/loop/agent_user_cancel_test.go
new file mode 100644
index 0000000..f79e73e
--- /dev/null
+++ b/loop/agent_user_cancel_test.go
@@ -0,0 +1,605 @@
+//go:build goexperiment.synctest
+
+package loop
+
+import (
+ "context"
+ "fmt"
+ "strings"
+ "testing"
+ "testing/synctest"
+
+ "sketch.dev/ant"
+)
+
+func TestLoop_OneTurn_Basic(t *testing.T) {
+ synctest.Run(func() {
+ mockConvo := NewMockConvo(t)
+
+ agent := &Agent{
+ convo: mockConvo,
+ inbox: make(chan string, 1),
+ outbox: make(chan AgentMessage, 1),
+ }
+ userMsg := ant.Message{
+ Role: "user",
+ Content: []ant.Content{
+ {Type: "text", Text: "hi"},
+ },
+ }
+ userMsgResponse := &ant.MessageResponse{}
+ mockConvo.ExpectCall("SendMessage", userMsg).Return(userMsgResponse, nil)
+
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ go agent.Loop(ctx)
+
+ agent.UserMessage(ctx, "hi")
+
+ // This makes sure the SendMessage call happens before we assert the expectations.
+ synctest.Wait()
+
+ // Verify results
+ mockConvo.AssertExpectations(t)
+ })
+}
+
+func TestLoop_ToolCall_Basic(t *testing.T) {
+ synctest.Run(func() {
+ mockConvo := NewMockConvo(t)
+
+ agent := &Agent{
+ convo: mockConvo,
+ inbox: make(chan string, 1),
+ outbox: make(chan AgentMessage, 1),
+ }
+ userMsg := ant.Message{
+ Role: "user",
+ Content: []ant.Content{
+ {Type: "text", Text: "hi"},
+ },
+ }
+ userMsgResponse := &ant.MessageResponse{
+ StopReason: ant.StopReasonToolUse,
+ Content: []ant.Content{
+ {
+ Type: ant.ContentTypeToolUse,
+ ID: "tool1",
+ ToolName: "test_tool",
+ ToolInput: []byte(`{"param":"value"}`),
+ },
+ },
+ Usage: ant.Usage{
+ InputTokens: 100,
+ OutputTokens: 200,
+ },
+ }
+
+ toolUseContents := []ant.Content{
+ {
+ Type: ant.ContentTypeToolResult,
+ ToolUseID: "tool1",
+ Text: "",
+ ToolResult: "This is a tool result",
+ ToolError: false,
+ },
+ }
+ toolUseResultsMsg := ant.Message{
+ Role: "user",
+ Content: toolUseContents,
+ }
+ toolUseResponse := &ant.MessageResponse{
+ StopReason: ant.StopReasonEndTurn,
+ Content: []ant.Content{
+ {
+ Type: ant.ContentTypeText,
+ Text: "tool_use contents accepted",
+ },
+ },
+ Usage: ant.Usage{
+ InputTokens: 50,
+ OutputTokens: 75,
+ },
+ }
+
+ // Set up the mock response for tool results
+ mockConvo.ExpectCall("SendMessage", userMsg).Return(userMsgResponse, nil)
+ mockConvo.ExpectCall("ToolResultContents", userMsgResponse).Return(toolUseContents, nil)
+ mockConvo.ExpectCall("SendMessage", toolUseResultsMsg).Return(toolUseResponse, nil)
+
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ go agent.Loop(ctx)
+
+ agent.UserMessage(ctx, "hi")
+
+ // This makes sure the SendMessage call happens before we assert the expectations.
+ synctest.Wait()
+
+ // Verify results
+ mockConvo.AssertExpectations(t)
+ })
+}
+
+func TestLoop_ToolCall_UserCancelsDuringToolResultContents(t *testing.T) {
+ synctest.Run(func() {
+ mockConvo := NewMockConvo(t)
+
+ agent := &Agent{
+ convo: mockConvo,
+ inbox: make(chan string, 1),
+ outbox: make(chan AgentMessage, 10), // don't let anything block on outbox.
+ }
+ userMsg := ant.Message{
+ Role: "user",
+ Content: []ant.Content{
+ {Type: "text", Text: "hi"},
+ },
+ }
+ userMsgResponse := &ant.MessageResponse{
+ StopReason: ant.StopReasonToolUse,
+ Content: []ant.Content{
+ {
+ Type: ant.ContentTypeToolUse,
+ ID: "tool1",
+ ToolName: "test_tool",
+ ToolInput: []byte(`{"param":"value"}`),
+ },
+ },
+ Usage: ant.Usage{
+ InputTokens: 100,
+ OutputTokens: 200,
+ },
+ }
+ toolUseResultsMsg := ant.Message{
+ Role: "user",
+ Content: []ant.Content{
+ {Type: "text", Text: cancelToolUseMessage},
+ },
+ }
+ toolUseResponse := &ant.MessageResponse{
+ StopReason: ant.StopReasonEndTurn,
+ Content: []ant.Content{
+ {
+ Type: ant.ContentTypeText,
+ Text: "tool_use contents accepted",
+ },
+ },
+ Usage: ant.Usage{
+ InputTokens: 50,
+ OutputTokens: 75,
+ },
+ }
+
+ // Set up the mock response for tool results
+
+ userCancelError := fmt.Errorf("user canceled")
+ // This allows the test to block the InnerLoop goroutine that invokes ToolResultsContents so
+ // we can force its context to cancel while it's blocked.
+ waitForToolResultContents := make(chan any, 1)
+
+ mockConvo.ExpectCall("SendMessage", userMsg).Return(userMsgResponse, nil)
+ mockConvo.ExpectCall("ToolResultContents",
+ userMsgResponse).BlockAndReturn(waitForToolResultContents, []ant.Content{}, userCancelError)
+ mockConvo.ExpectCall("SendMessage", toolUseResultsMsg).Return(toolUseResponse, nil)
+
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ go agent.Loop(ctx)
+
+ // This puts one message into agent.inbox, which should un-block the GatherMessages call
+ // at the top of agent.InnerLoop.
+ agent.UserMessage(ctx, "hi")
+
+ // This makes sure the first SendMessage call happens before we proceed with the cancel.
+ synctest.Wait()
+
+ // The goroutine executing ToolResultContents call should be blocked, simulating a long
+ // running operation that the user wishes to cancel while it's still in progress.
+ // This call invokes that InnerLoop context's cancel() func.
+ agent.CancelInnerLoop(userCancelError)
+
+ // This tells the goroutine that's in mockConvo.ToolResultContents to proceed.
+ waitForToolResultContents <- nil
+
+ // This makes sure the final SendMessage call happens before we assert the expectations.
+ synctest.Wait()
+
+ // Verify results
+ mockConvo.AssertExpectations(t)
+ })
+}
+
+func TestLoop_ToolCall_UserCancelsDuringToolResultContents_AndContinuesToChat(t *testing.T) {
+ synctest.Run(func() {
+ mockConvo := NewMockConvo(t)
+
+ agent := &Agent{
+ convo: mockConvo,
+ inbox: make(chan string, 1),
+ outbox: make(chan AgentMessage, 10), // don't let anything block on outbox.
+ }
+ userMsg := ant.Message{
+ Role: "user",
+ Content: []ant.Content{
+ {Type: "text", Text: "hi"},
+ },
+ }
+ userMsgResponse := &ant.MessageResponse{
+ StopReason: ant.StopReasonToolUse,
+ Content: []ant.Content{
+ {
+ Type: ant.ContentTypeToolUse,
+ ID: "tool1",
+ ToolName: "test_tool",
+ ToolInput: []byte(`{"param":"value"}`),
+ },
+ },
+ Usage: ant.Usage{
+ InputTokens: 100,
+ OutputTokens: 200,
+ },
+ }
+ toolUseResultsMsg := ant.Message{
+ Role: "user",
+ Content: []ant.Content{
+ {Type: "text", Text: cancelToolUseMessage},
+ },
+ }
+ toolUseResultResponse := &ant.MessageResponse{
+ StopReason: ant.StopReasonEndTurn,
+ Content: []ant.Content{
+ {
+ Type: ant.ContentTypeText,
+ Text: "awaiting further instructions",
+ },
+ },
+ Usage: ant.Usage{
+ InputTokens: 50,
+ OutputTokens: 75,
+ },
+ }
+ userFollowUpMsg := ant.Message{
+ Role: "user",
+ Content: []ant.Content{
+ {Type: "text", Text: "that was the wrong thing to do"},
+ },
+ }
+ userFollowUpResponse := &ant.MessageResponse{
+ StopReason: ant.StopReasonEndTurn,
+ Content: []ant.Content{
+ {
+ Type: ant.ContentTypeText,
+ Text: "sorry about that",
+ },
+ },
+ Usage: ant.Usage{
+ InputTokens: 100,
+ OutputTokens: 200,
+ },
+ }
+ // Set up the mock response for tool results
+
+ userCancelError := fmt.Errorf("user canceled")
+ // This allows the test to block the InnerLoop goroutine that invokes ToolResultsContents so
+ // we can force its context to cancel while it's blocked.
+ waitForToolResultContents := make(chan any, 1)
+
+ mockConvo.ExpectCall("SendMessage", userMsg).Return(userMsgResponse, nil)
+ mockConvo.ExpectCall("ToolResultContents",
+ userMsgResponse).BlockAndReturn(waitForToolResultContents, []ant.Content{}, userCancelError)
+ mockConvo.ExpectCall("SendMessage", toolUseResultsMsg).Return(toolUseResultResponse, nil)
+
+ mockConvo.ExpectCall("SendMessage", userFollowUpMsg).Return(userFollowUpResponse, nil)
+
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ go agent.Loop(ctx)
+
+ // This puts one message into agent.inbox, which should un-block the GatherMessages call
+ // at the top of agent.InnerLoop.
+ agent.UserMessage(ctx, "hi")
+
+ // This makes sure the first SendMessage call happens before we proceed with the cancel.
+ synctest.Wait()
+
+ // The goroutine executing ToolResultContents call should be blocked, simulating a long
+ // running operation that the user wishes to cancel while it's still in progress.
+ // This call invokes that InnerLoop context's cancel() func.
+ agent.CancelInnerLoop(userCancelError)
+
+ // This tells the goroutine that's in mockConvo.ToolResultContents to proceed.
+ waitForToolResultContents <- nil
+
+ // Allow InnerLoop to handle the cancellation logic before continuing the conversation.
+ synctest.Wait()
+
+ agent.UserMessage(ctx, "that was the wrong thing to do")
+
+ synctest.Wait()
+
+ // Verify results
+ mockConvo.AssertExpectations(t)
+ })
+}
+
+func TestInnerLoop_UserCancels(t *testing.T) {
+ synctest.Run(func() {
+ mockConvo := NewMockConvo(t)
+
+ agent := &Agent{
+ convo: mockConvo,
+ inbox: make(chan string, 1),
+ outbox: make(chan AgentMessage, 10), // don't block on outbox
+ }
+
+ // Define test message
+ // This simulates something that would result in claude responding with tool_use responses.
+ userMsg := ant.Message{
+ Role: "user",
+ Content: []ant.Content{
+ {Type: "text", Text: "use test_tool for something"},
+ },
+ }
+ // Mock initial response with tool use
+ userMsgResponse := &ant.MessageResponse{
+ StopReason: ant.StopReasonToolUse,
+ Content: []ant.Content{
+ {
+ Type: ant.ContentTypeToolUse,
+ ID: "tool1",
+ ToolName: "test_tool",
+ ToolInput: []byte(`{"param":"value"}`),
+ },
+ },
+ Usage: ant.Usage{
+ InputTokens: 100,
+ OutputTokens: 200,
+ },
+ }
+ canceledToolUseContents := []ant.Content{
+ {
+ Type: ant.ContentTypeToolResult,
+ ToolUseID: "tool1",
+ ToolError: true,
+ ToolResult: "user canceled this tool_use",
+ },
+ }
+ canceledToolUseMsg := ant.Message{
+ Role: "user",
+ Content: append(canceledToolUseContents, ant.Content{
+ Type: ant.ContentTypeText,
+ Text: cancelToolUseMessage,
+ }),
+ }
+ // Set up expected behaviors
+ waitForSendMessage := make(chan any)
+ mockConvo.ExpectCall("SendMessage", userMsg).BlockAndReturn(waitForSendMessage, userMsgResponse, nil)
+
+ mockConvo.ExpectCall("ToolResultCancelContents", userMsgResponse).Return(canceledToolUseContents, nil)
+ mockConvo.ExpectCall("SendMessage", canceledToolUseMsg).Return(
+ &ant.MessageResponse{
+ StopReason: ant.StopReasonToolUse,
+ }, nil)
+
+ ctx, cancel := context.WithCancelCause(context.Background())
+
+ // Run one iteration of InnerLoop
+ go agent.InnerLoop(ctx)
+
+ // Send a message to the agent's inbox
+ agent.UserMessage(ctx, "use test_tool for something")
+
+ synctest.Wait()
+
+ // cancel the context before we even call InnerLoop with it, so it will
+ // be .Done() the first time it checks.
+ cancel(fmt.Errorf("user canceled"))
+
+ // unblock the InnerLoop goroutine's SendMessage call
+ waitForSendMessage <- nil
+
+ synctest.Wait()
+
+ // Verify results
+ mockConvo.AssertExpectations(t)
+
+ // Get all messages from outbox and verify their types/content
+ var messages []AgentMessage
+
+ // Collect messages until outbox is empty or we have 10 messages
+ for i := 0; i < 10; i++ {
+ select {
+ case msg := <-agent.outbox:
+ messages = append(messages, msg)
+ default:
+ // No more messages
+ i = 10 // Exit the loop
+ }
+ }
+
+ // Print out the messages we got for debugging
+ t.Logf("Received %d messages from outbox", len(messages))
+ for i, msg := range messages {
+ t.Logf("Message %d: Type=%s, Content=%s, EndOfTurn=%t", i, msg.Type, msg.Content, msg.EndOfTurn)
+ if msg.ToolName != "" {
+ t.Logf(" Tool: Name=%s, Input=%s, Result=%s, Error=%v",
+ msg.ToolName, msg.ToolInput, msg.ToolResult, msg.ToolError)
+ }
+ }
+
+ // Basic checks
+ if len(messages) < 1 {
+ t.Errorf("Should have at least one message, got %d", len(messages))
+ }
+
+ // The main thing we want to verify: when user cancels, the response processing stops
+ // and appropriate messages are sent
+
+ // Check if we have an error message about cancellation
+ hasCancelErrorMessage := false
+ for _, msg := range messages {
+ if msg.Type == ErrorMessageType && msg.Content == userCancelMessage {
+ hasCancelErrorMessage = true
+ break
+ }
+ }
+
+ // Check if we have a tool message with error
+ hasToolError := false
+ for _, msg := range messages {
+ if msg.Type == ToolUseMessageType &&
+ msg.ToolError && strings.Contains(msg.ToolResult, "user canceled") {
+ hasToolError = true
+ break
+ }
+ }
+
+ // We should have at least one of these messages
+ if !(hasCancelErrorMessage || hasToolError) {
+ t.Errorf("Should have either an error message or a tool with error about cancellation")
+ }
+ })
+}
+
+func TestInnerLoop_UserDoesNotCancel(t *testing.T) {
+ mockConvo := NewMockConvo(t)
+
+ agent := &Agent{
+ convo: mockConvo,
+ inbox: make(chan string, 100),
+ outbox: make(chan AgentMessage, 100),
+ }
+
+ // Define test message
+ // This simulates something that would result in claude
+ // responding with tool_use responses.
+ testMsg := "use test_tool for something"
+
+ // Mock initial response with tool use
+ initialResponse := &ant.MessageResponse{
+ StopReason: ant.StopReasonToolUse,
+ Content: []ant.Content{
+ {
+ Type: ant.ContentTypeToolUse,
+ ID: "tool1",
+ ToolName: "test_tool",
+ ToolInput: []byte(`{"param":"value"}`),
+ },
+ },
+ Usage: ant.Usage{
+ InputTokens: 100,
+ OutputTokens: 200,
+ },
+ }
+
+ // Set up expected behaviors
+ mockConvo.ExpectCall("SendMessage", nil).Return(initialResponse, nil)
+
+ toolUseContents := []ant.Content{
+ {
+ Type: ant.ContentTypeToolResult,
+ ToolUseID: "tool1",
+ Text: "",
+ ToolResult: "This is a tool result",
+ ToolError: false,
+ },
+ }
+ toolUseResponse := &ant.MessageResponse{
+ // StopReason: ant.StopReasonEndTurn,
+ Content: []ant.Content{
+ {
+ Type: ant.ContentTypeText,
+ Text: "tool_use contents accepted",
+ },
+ },
+ Usage: ant.Usage{
+ InputTokens: 50,
+ OutputTokens: 75,
+ },
+ }
+
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ // Setting up the mock response for tool results
+ mockConvo.ExpectCall("ToolResultContents", initialResponse).Return(toolUseContents, nil)
+ mockConvo.ExpectCall("SendMessage", nil).Return(toolUseResponse, nil)
+ // mockConvo, as a mock, isn't able to run the loop in ant.Convo that makes this agent.OnToolResult callback.
+ // So we "mock" it out here by calling it explicitly, in order to make sure it calls .pushToOutbox with this message.
+ // This is not a good situation.
+ // ant.Convo and loop.Agent seem to be excessively coupled, and aware of each others' internal details.
+ // TODO: refactor (or clarify in docs somewhere) the boundary between what ant.Convo is responsible
+ // for vs what loop.Agent is responsible for.
+ antConvo := &ant.Convo{}
+ res := ""
+ agent.OnToolResult(ctx, antConvo, "tool1", nil, toolUseContents[0], &res, nil)
+
+ // Send a message to the agent's inbox
+ agent.UserMessage(ctx, testMsg)
+
+ // Run one iteration of InnerLoop
+ agent.InnerLoop(ctx)
+
+ // Verify results
+ mockConvo.AssertExpectations(t)
+
+ // Get all messages from outbox and verify their types/content
+ var messages []AgentMessage
+
+ // Collect messages until outbox is empty or we have 10 messages
+ for i := 0; i < 10; i++ {
+ select {
+ case msg := <-agent.outbox:
+ messages = append(messages, msg)
+ default:
+ // No more messages
+ i = 10 // Exit the loop
+ }
+ }
+
+ // Print out the messages we got for debugging
+ t.Logf("Received %d messages from outbox", len(messages))
+ for i, msg := range messages {
+ t.Logf("Message %d: Type=%s, Content=%s, EndOfTurn=%t", i, msg.Type, msg.Content, msg.EndOfTurn)
+ if msg.ToolName != "" {
+ t.Logf(" Tool: Name=%s, Input=%s, Result=%s, Error=%v",
+ msg.ToolName, msg.ToolInput, msg.ToolResult, msg.ToolError)
+ }
+ }
+
+ // Basic checks
+ if len(messages) < 1 {
+ t.Errorf("Should have at least one message, got %d", len(messages))
+ }
+
+ // The main thing we want to verify: when user cancels, the response processing stops
+ // and appropriate messages are sent
+
+ // Check if we have an error message about cancellation
+ hasCancelErrorMessage := false
+ for _, msg := range messages {
+ if msg.Type == ErrorMessageType && msg.Content == userCancelMessage {
+ hasCancelErrorMessage = true
+ break
+ }
+ }
+
+ // Check if we have a tool message with error
+ hasToolError := false
+ for _, msg := range messages {
+ if msg.Type == ToolUseMessageType &&
+ msg.ToolError && strings.Contains(msg.ToolResult, "user canceled") {
+ hasToolError = true
+ break
+ }
+ }
+
+ if hasCancelErrorMessage || hasToolError {
+ t.Errorf("Should not have either an error message nor a tool with error about cancellation")
+ }
+}
diff --git a/loop/donetool.go b/loop/donetool.go
new file mode 100644
index 0000000..c02e5a5
--- /dev/null
+++ b/loop/donetool.go
@@ -0,0 +1,112 @@
+package loop
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+
+ "sketch.dev/ant"
+ "sketch.dev/claudetool"
+)
+
+// makeDoneTool creates a tool that provides a checklist to the agent. There
+// are some duplicative instructions here and in the system prompt, and it's
+// not as reliable as it could be. Historically, we've found that Claude ignores
+// the tool results here, so we don't tell the tool to say "hey, really check this"
+// at the moment, though we've tried.
+func makeDoneTool(codereview *claudetool.CodeReviewer, gitUsername, gitEmail string) *ant.Tool {
+ return &ant.Tool{
+ Name: "done",
+ Description: `Use this tool when you have achieved the user's goal. The parameters form a checklist which you should evaluate.`,
+ InputSchema: json.RawMessage(doneChecklistJSONSchema(gitUsername, gitEmail)),
+ Run: func(ctx context.Context, input json.RawMessage) (string, error) {
+ // Cannot be done with a messy git.
+ if err := codereview.RequireNormalGitState(ctx); err != nil {
+ return "", err
+ }
+ if err := codereview.RequireNoUncommittedChanges(ctx); err != nil {
+ return "", err
+ }
+ // Ensure that the current commit has been reviewed.
+ head, err := codereview.CurrentCommit(ctx)
+ if err == nil {
+ needsReview := !codereview.IsInitialCommit(head) && !codereview.HasReviewed(head)
+ if needsReview {
+ return "", fmt.Errorf("codereview tool has not been run for commit %v", head)
+ }
+ }
+ return `Please ask the user to review your work. Be concise - users are more likely to read shorter comments.`, nil
+ },
+ }
+}
+
+func doneChecklistJSONSchema(gitUsername, gitEmail string) string {
+ gitCommitDescription := fmt.Sprintf(`Create git commits for any code changes you made. Match the style of recent commit messages. Include 'Co-Authored-By: sketch' and the original user prompt. Use GIT_AUTHOR_NAME="%s" GIT_AUTHOR_EMAIL="%s" (not git config).`,
+ gitUsername, gitEmail)
+ desc, err := json.Marshal(gitCommitDescription)
+ if err != nil {
+ panic(err)
+ }
+ return doneChecklistJSONSchemaPrefix + string(desc) + doneChecklistJSONSchemaSuffix
+}
+
+// TODO: this is ugly, maybe JSON-encode a deeply nested map[string]any instead? also ugly.
+const (
+ doneChecklistJSONSchemaPrefix = `{
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Checklist",
+ "description": "A schema for tracking checklist items with status and comments",
+ "type": "object",
+ "required": ["checklist_items"],
+ "properties": {
+ "checklist_items": {
+ "type": "object",
+ "description": "Collection of checklist items",
+ "properties": {
+ "wrote_tests": {
+ "$ref": "#/definitions/checklistItem",
+ "description": "If code was changed, tests were written or updated."
+ },
+ "passes_tests": {
+ "$ref": "#/definitions/checklistItem",
+ "description": "If any commits were made, tests pass."
+ },
+ "code_reviewed": {
+ "$ref": "#/definitions/checklistItem",
+ "description": "If any commits were made, the codereview tool was run and its output was addressed."
+ },
+ "git_commit": {
+ "$ref": "#/definitions/checklistItem",
+ "description": `
+
+ doneChecklistJSONSchemaSuffix = `
+ }
+ },
+ "additionalProperties": {
+ "$ref": "#/definitions/checklistItem"
+ }
+ }
+ },
+ "definitions": {
+ "checklistItem": {
+ "type": "object",
+ "required": ["status"],
+ "properties": {
+ "status": {
+ "type": "string",
+ "description": "Current status of the checklist item",
+ "enum": ["yes", "no", "not applicable", "other"]
+ },
+ "description": {
+ "type": "string",
+ "description": "Description of what this checklist item verifies"
+ },
+ "comments": {
+ "type": "string",
+ "description": "Additional comments or context for this checklist item"
+ }
+ }
+ }
+ }
+}`
+)
diff --git a/loop/mocks.go b/loop/mocks.go
new file mode 100644
index 0000000..264c6bc
--- /dev/null
+++ b/loop/mocks.go
@@ -0,0 +1,209 @@
+package loop
+
+import (
+ "context"
+ "reflect"
+ "sync"
+ "testing"
+
+ "sketch.dev/ant"
+)
+
+// MockConvo is a custom mock for ant.Convo interface
+type MockConvo struct {
+ mu sync.Mutex
+ t *testing.T
+
+ // Maps method name to a list of calls with arguments and return values
+ calls map[string][]*mockCall
+ // Maps method name to expected calls
+ expectations map[string][]*mockExpectation
+}
+
+type mockCall struct {
+ args []interface{}
+ result []interface{}
+}
+
+type mockExpectation struct {
+ until chan any
+ args []interface{}
+ result []interface{}
+}
+
+// Return sets up return values for an expectation
+func (e *mockExpectation) Return(values ...interface{}) {
+ e.result = values
+}
+
+// Return sets up return values for an expectation
+func (e *mockExpectation) BlockAndReturn(until chan any, values ...interface{}) {
+ e.until = until
+ e.result = values
+}
+
+// NewMockConvo creates a new mock Convo
+func NewMockConvo(t *testing.T) *MockConvo {
+ return &MockConvo{
+ t: t,
+ mu: sync.Mutex{},
+ calls: make(map[string][]*mockCall),
+ expectations: make(map[string][]*mockExpectation),
+ }
+}
+
+// ExpectCall sets up an expectation for a method call
+func (m *MockConvo) ExpectCall(method string, args ...interface{}) *mockExpectation {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ expectation := &mockExpectation{args: args}
+ if _, ok := m.expectations[method]; !ok {
+ m.expectations[method] = []*mockExpectation{}
+ }
+ m.expectations[method] = append(m.expectations[method], expectation)
+ return expectation
+}
+
+// findMatchingExpectation finds a matching expectation for a method call
+func (m *MockConvo) findMatchingExpectation(method string, args ...interface{}) (*mockExpectation, bool) {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ expectations, ok := m.expectations[method]
+ if !ok {
+ return nil, false
+ }
+
+ for i, exp := range expectations {
+ if matchArgs(exp.args, args) {
+ if exp.until != nil {
+ <-exp.until
+ }
+ // Remove the matched expectation
+ m.expectations[method] = append(expectations[:i], expectations[i+1:]...)
+ return exp, true
+ }
+ }
+ return nil, false
+}
+
+// matchArgs checks if call arguments match expectation arguments
+func matchArgs(expected, actual []interface{}) bool {
+ if len(expected) != len(actual) {
+ return false
+ }
+
+ for i, exp := range expected {
+ // Special case: nil matches anything
+ if exp == nil {
+ continue
+ }
+
+ // Check for equality
+ if !reflect.DeepEqual(exp, actual[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+// recordCall records a method call
+func (m *MockConvo) recordCall(method string, args ...interface{}) {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ if _, ok := m.calls[method]; !ok {
+ m.calls[method] = []*mockCall{}
+ }
+ m.calls[method] = append(m.calls[method], &mockCall{args: args})
+}
+
+func (m *MockConvo) SendMessage(message ant.Message) (*ant.MessageResponse, error) {
+ m.recordCall("SendMessage", message)
+ exp, ok := m.findMatchingExpectation("SendMessage", message)
+ if !ok {
+ m.t.Errorf("unexpected call to SendMessage: %+v", message)
+ m.t.FailNow()
+ }
+ var retErr error
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ if err, ok := exp.result[1].(error); ok {
+ retErr = err
+ }
+ return exp.result[0].(*ant.MessageResponse), retErr
+}
+
+func (m *MockConvo) SendUserTextMessage(message string, otherContents ...ant.Content) (*ant.MessageResponse, error) {
+ m.recordCall("SendUserTextMessage", message, otherContents)
+ exp, ok := m.findMatchingExpectation("SendUserTextMessage", message, otherContents)
+ if !ok {
+ m.t.Error("unexpected call to SendUserTextMessage")
+ m.t.FailNow()
+ }
+ var retErr error
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ if err, ok := exp.result[1].(error); ok {
+ retErr = err
+ }
+ return exp.result[0].(*ant.MessageResponse), retErr
+}
+
+func (m *MockConvo) ToolResultContents(ctx context.Context, resp *ant.MessageResponse) ([]ant.Content, error) {
+ m.recordCall("ToolResultContents", resp)
+ exp, ok := m.findMatchingExpectation("ToolResultContents", resp)
+ if !ok {
+ m.t.Error("unexpected call to ToolResultContents")
+ m.t.FailNow()
+ }
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ var retErr error
+ if err, ok := exp.result[1].(error); ok {
+ retErr = err
+ }
+
+ return exp.result[0].([]ant.Content), retErr
+}
+
+func (m *MockConvo) ToolResultCancelContents(resp *ant.MessageResponse) ([]ant.Content, error) {
+ m.recordCall("ToolResultCancelContents", resp)
+ exp, ok := m.findMatchingExpectation("ToolResultCancelContents", resp)
+ if !ok {
+ m.t.Error("unexpected call to ToolResultCancelContents")
+ m.t.FailNow()
+ }
+ var retErr error
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ if err, ok := exp.result[1].(error); ok {
+ retErr = err
+ }
+
+ return exp.result[0].([]ant.Content), retErr
+}
+
+func (m *MockConvo) CumulativeUsage() ant.CumulativeUsage {
+ m.recordCall("CumulativeUsage")
+ return ant.CumulativeUsage{}
+}
+
+func (m *MockConvo) OverBudget() error {
+ m.recordCall("OverBudget")
+ return nil
+}
+
+func (m *MockConvo) ResetBudget(_ ant.Budget) {
+ m.recordCall("ResetBudget")
+}
+
+// AssertExpectations checks that all expectations were met
+func (m *MockConvo) AssertExpectations(t *testing.T) {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+
+ for method, expectations := range m.expectations {
+ if len(expectations) > 0 {
+ t.Errorf("not all expectations were met for method %s:", method)
+ }
+ }
+}
diff --git a/loop/server/loophttp.go b/loop/server/loophttp.go
new file mode 100644
index 0000000..0fd7ee1
--- /dev/null
+++ b/loop/server/loophttp.go
@@ -0,0 +1,753 @@
+// Package server provides HTTP server functionality for the sketch loop.
+package server
+
+import (
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "html"
+ "io"
+ "io/fs"
+ "log/slog"
+ "net/http"
+ "net/http/pprof"
+ "os"
+ "os/exec"
+ "strconv"
+ "strings"
+ "sync"
+ "syscall"
+ "time"
+
+ "github.com/creack/pty"
+ "sketch.dev/ant"
+ "sketch.dev/loop"
+ "sketch.dev/loop/webui"
+)
+
+// terminalSession represents a terminal session with its PTY and the event channel
+type terminalSession struct {
+ pty *os.File
+ eventsClients map[chan []byte]bool
+ lastEventClientID int
+ eventsClientsMutex sync.Mutex
+ cmd *exec.Cmd
+}
+
+// TerminalMessage represents a message sent from the client for terminal resize events
+type TerminalMessage struct {
+ Type string `json:"type"`
+ Cols uint16 `json:"cols"`
+ Rows uint16 `json:"rows"`
+}
+
+// TerminalResponse represents the response for a new terminal creation
+type TerminalResponse struct {
+ SessionID string `json:"sessionId"`
+}
+
+// Server serves sketch HTTP. Server implements http.Handler.
+type Server struct {
+ mux *http.ServeMux
+ agent loop.CodingAgent
+ hostname string
+ logFile *os.File
+ // Mutex to protect terminalSessions
+ ptyMutex sync.Mutex
+ terminalSessions map[string]*terminalSession
+}
+
+func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ s.mux.ServeHTTP(w, r)
+}
+
+// New creates a new HTTP server.
+func New(agent loop.CodingAgent, logFile *os.File) (*Server, error) {
+ s := &Server{
+ mux: http.NewServeMux(),
+ agent: agent,
+ hostname: getHostname(),
+ logFile: logFile,
+ terminalSessions: make(map[string]*terminalSession),
+ }
+
+ webBundle, err := webui.Build()
+ if err != nil {
+ return nil, fmt.Errorf("failed to build web bundle, did you run 'go generate sketch.dev/loop/...'?: %w", err)
+ }
+
+ s.mux.HandleFunc("/diff", func(w http.ResponseWriter, r *http.Request) {
+ // Check if a specific commit hash was requested
+ commit := r.URL.Query().Get("commit")
+
+ // Get the diff, optionally for a specific commit
+ var diff string
+ var err error
+ if commit != "" {
+ // Validate the commit hash format
+ if !isValidGitSHA(commit) {
+ http.Error(w, fmt.Sprintf("Invalid git commit SHA format: %s", commit), http.StatusBadRequest)
+ return
+ }
+
+ diff, err = agent.Diff(&commit)
+ } else {
+ diff, err = agent.Diff(nil)
+ }
+
+ if err != nil {
+ http.Error(w, fmt.Sprintf("Error generating diff: %v", err), http.StatusInternalServerError)
+ return
+ }
+
+ w.Header().Set("Content-Type", "text/plain")
+ w.Write([]byte(diff))
+ })
+
+ // Handler for initialization called by host sketch binary when inside docker.
+ s.mux.HandleFunc("/init", func(w http.ResponseWriter, r *http.Request) {
+ defer func() {
+ if err := recover(); err != nil {
+ slog.ErrorContext(r.Context(), "/init panic", slog.Any("recovered_err", err))
+
+ // Return an error response to the client
+ http.Error(w, fmt.Sprintf("panic: %v\n", err), http.StatusInternalServerError)
+ }
+ }()
+
+ if r.Method != "POST" {
+ http.Error(w, "POST required", http.StatusBadRequest)
+ return
+ }
+
+ body, err := io.ReadAll(r.Body)
+ r.Body.Close()
+ if err != nil {
+ http.Error(w, "failed to read request body: "+err.Error(), http.StatusBadRequest)
+ return
+ }
+ m := make(map[string]string)
+ if err := json.Unmarshal(body, &m); err != nil {
+ http.Error(w, "bad request body: "+err.Error(), http.StatusBadRequest)
+ return
+ }
+ hostAddr := m["host_addr"]
+ gitRemoteAddr := m["git_remote_addr"]
+ commit := m["commit"]
+ ini := loop.AgentInit{
+ WorkingDir: "/app",
+ InDocker: true,
+ Commit: commit,
+ GitRemoteAddr: gitRemoteAddr,
+ HostAddr: hostAddr,
+ }
+ if err := agent.Init(ini); err != nil {
+ http.Error(w, "init failed: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+ w.Header().Set("Content-Type", "application/json")
+ io.WriteString(w, "{}\n")
+ })
+
+ // Handler for /messages?start=N&end=M (start/end are optional)
+ s.mux.HandleFunc("/messages", func(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Content-Type", "application/json")
+
+ // Extract query parameters for range
+ var start, end int
+ var err error
+
+ currentCount := agent.MessageCount()
+
+ startParam := r.URL.Query().Get("start")
+ if startParam != "" {
+ start, err = strconv.Atoi(startParam)
+ if err != nil {
+ http.Error(w, "Invalid 'start' parameter", http.StatusBadRequest)
+ return
+ }
+ }
+
+ endParam := r.URL.Query().Get("end")
+ if endParam != "" {
+ end, err = strconv.Atoi(endParam)
+ if err != nil {
+ http.Error(w, "Invalid 'end' parameter", http.StatusBadRequest)
+ return
+ }
+ } else {
+ end = currentCount
+ }
+
+ if start < 0 || start > end || end > currentCount {
+ http.Error(w, fmt.Sprintf("Invalid range: start %d end %d currentCount %d", start, end, currentCount), http.StatusBadRequest)
+ return
+ }
+
+ start = max(0, start)
+ end = min(agent.MessageCount(), end)
+ messages := agent.Messages(start, end)
+
+ // Create a JSON encoder with indentation for pretty-printing
+ encoder := json.NewEncoder(w)
+ encoder.SetIndent("", " ") // Two spaces for each indentation level
+
+ err = encoder.Encode(messages)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ }
+ })
+
+ // Handler for /logs - displays the contents of the log file
+ s.mux.HandleFunc("/logs", func(w http.ResponseWriter, r *http.Request) {
+ if s.logFile == nil {
+ http.Error(w, "log file not set", http.StatusNotFound)
+ return
+ }
+ logContents, err := os.ReadFile(s.logFile.Name())
+ if err != nil {
+ http.Error(w, "error reading log file: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+ w.Header().Set("Content-Type", "text/html; charset=utf-8")
+ fmt.Fprintf(w, "<!DOCTYPE html>\n<html>\n<head>\n<title>Sketchy Log File</title>\n</head>\n<body>\n")
+ fmt.Fprintf(w, "<pre>%s</pre>\n", html.EscapeString(string(logContents)))
+ fmt.Fprintf(w, "</body>\n</html>")
+ })
+
+ // Handler for /download - downloads both messages and status as a JSON file
+ s.mux.HandleFunc("/download", func(w http.ResponseWriter, r *http.Request) {
+ // Set headers for file download
+ w.Header().Set("Content-Type", "application/octet-stream")
+
+ // Generate filename with format: sketch-YYYYMMDD-HHMMSS.json
+ timestamp := time.Now().Format("20060102-150405")
+ filename := fmt.Sprintf("sketch-%s.json", timestamp)
+
+ w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", filename))
+
+ // Get all messages
+ messageCount := agent.MessageCount()
+ messages := agent.Messages(0, messageCount)
+
+ // Get status information (usage and other metadata)
+ totalUsage := agent.TotalUsage()
+ hostname := getHostname()
+ workingDir := getWorkingDir()
+
+ // Create a combined structure with all information
+ downloadData := struct {
+ Messages []loop.AgentMessage `json:"messages"`
+ MessageCount int `json:"message_count"`
+ TotalUsage ant.CumulativeUsage `json:"total_usage"`
+ Hostname string `json:"hostname"`
+ WorkingDir string `json:"working_dir"`
+ DownloadTime string `json:"download_time"`
+ }{
+ Messages: messages,
+ MessageCount: messageCount,
+ TotalUsage: totalUsage,
+ Hostname: hostname,
+ WorkingDir: workingDir,
+ DownloadTime: time.Now().Format(time.RFC3339),
+ }
+
+ // Marshal the JSON with indentation for better readability
+ jsonData, err := json.MarshalIndent(downloadData, "", " ")
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+ w.Write(jsonData)
+ })
+
+ // The latter doesn't return until the number of messages has changed (from seen
+ // or from when this was called.)
+ s.mux.HandleFunc("/state", func(w http.ResponseWriter, r *http.Request) {
+ pollParam := r.URL.Query().Get("poll")
+ seenParam := r.URL.Query().Get("seen")
+
+ // Get the client's current message count (if provided)
+ clientMessageCount := -1
+ var err error
+ if seenParam != "" {
+ clientMessageCount, err = strconv.Atoi(seenParam)
+ if err != nil {
+ http.Error(w, "Invalid 'seen' parameter", http.StatusBadRequest)
+ return
+ }
+ }
+
+ serverMessageCount := agent.MessageCount()
+
+ // Let lazy clients not have to specify this.
+ if clientMessageCount == -1 {
+ clientMessageCount = serverMessageCount
+ }
+
+ if pollParam == "true" {
+ ch := make(chan string)
+ go func() {
+ // This is your blocking operation
+ agent.WaitForMessageCount(r.Context(), clientMessageCount)
+ close(ch)
+ }()
+ select {
+ case <-r.Context().Done():
+ slog.DebugContext(r.Context(), "abandoned poll request")
+ return
+ case <-time.After(90 * time.Second):
+ // Let the user call /state again to get the latest to limit how long our long polls hang out.
+ slog.DebugContext(r.Context(), "longish poll request")
+ break
+ case <-ch:
+ break
+ }
+ }
+
+ serverMessageCount = agent.MessageCount()
+ totalUsage := agent.TotalUsage()
+
+ w.Header().Set("Content-Type", "application/json")
+
+ state := struct {
+ MessageCount int `json:"message_count"`
+ TotalUsage ant.CumulativeUsage `json:"total_usage"`
+ Hostname string `json:"hostname"`
+ WorkingDir string `json:"working_dir"`
+ InitialCommit string `json:"initial_commit"`
+ Title string `json:"title"`
+ OS string `json:"os"`
+ }{
+ MessageCount: serverMessageCount,
+ TotalUsage: totalUsage,
+ Hostname: s.hostname,
+ WorkingDir: getWorkingDir(),
+ InitialCommit: agent.InitialCommit(),
+ Title: agent.Title(),
+ OS: agent.OS(),
+ }
+
+ // Create a JSON encoder with indentation for pretty-printing
+ encoder := json.NewEncoder(w)
+ encoder.SetIndent("", " ") // Two spaces for each indentation level
+
+ err = encoder.Encode(state)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ }
+ })
+
+ s.mux.Handle("/static/", http.StripPrefix("/static/", http.FileServerFS(webBundle)))
+
+ // Terminal WebSocket handler
+ // Terminal endpoints - predefined terminals 1-9
+ // TODO: The UI doesn't actually know how to use terminals 2-9!
+ s.mux.HandleFunc("/terminal/events/", func(w http.ResponseWriter, r *http.Request) {
+ if r.Method != http.MethodGet {
+ http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
+ return
+ }
+ pathParts := strings.Split(r.URL.Path, "/")
+ if len(pathParts) < 4 {
+ http.Error(w, "Invalid terminal ID", http.StatusBadRequest)
+ return
+ }
+
+ sessionID := pathParts[3]
+ // Validate that the terminal ID is between 1-9
+ if len(sessionID) != 1 || sessionID[0] < '1' || sessionID[0] > '9' {
+ http.Error(w, "Terminal ID must be between 1 and 9", http.StatusBadRequest)
+ return
+ }
+
+ s.handleTerminalEvents(w, r, sessionID)
+ })
+
+ s.mux.HandleFunc("/terminal/input/", func(w http.ResponseWriter, r *http.Request) {
+ if r.Method != http.MethodPost {
+ http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
+ return
+ }
+ pathParts := strings.Split(r.URL.Path, "/")
+ if len(pathParts) < 4 {
+ http.Error(w, "Invalid terminal ID", http.StatusBadRequest)
+ return
+ }
+ sessionID := pathParts[3]
+ s.handleTerminalInput(w, r, sessionID)
+ })
+
+ s.mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
+ // Serve the timeline.html file directly from the embedded filesystem
+ data, err := fs.ReadFile(webBundle, "timeline.html")
+ if err != nil {
+ http.Error(w, "File not found", http.StatusNotFound)
+ return
+ }
+ w.Header().Set("Content-Type", "text/html")
+ w.Write(data)
+ })
+
+ // Handler for POST /chat
+ s.mux.HandleFunc("/chat", func(w http.ResponseWriter, r *http.Request) {
+ if r.Method != http.MethodPost {
+ http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
+ return
+ }
+
+ // Parse the request body
+ var requestBody struct {
+ Message string `json:"message"`
+ }
+
+ decoder := json.NewDecoder(r.Body)
+ if err := decoder.Decode(&requestBody); err != nil {
+ http.Error(w, "Invalid request body: "+err.Error(), http.StatusBadRequest)
+ return
+ }
+ defer r.Body.Close()
+
+ if requestBody.Message == "" {
+ http.Error(w, "Message cannot be empty", http.StatusBadRequest)
+ return
+ }
+
+ agent.UserMessage(r.Context(), requestBody.Message)
+
+ w.WriteHeader(http.StatusOK)
+ })
+
+ // Handler for /cancel - cancels the current inner loop in progress
+ s.mux.HandleFunc("/cancel", func(w http.ResponseWriter, r *http.Request) {
+ if r.Method != http.MethodPost {
+ http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
+ return
+ }
+
+ // Parse the request body (optional)
+ var requestBody struct {
+ Reason string `json:"reason"`
+ ToolCallID string `json:"tool_call_id"`
+ }
+
+ decoder := json.NewDecoder(r.Body)
+ if err := decoder.Decode(&requestBody); err != nil && err != io.EOF {
+ http.Error(w, "Invalid request body: "+err.Error(), http.StatusBadRequest)
+ return
+ }
+ defer r.Body.Close()
+
+ cancelReason := "user requested cancellation"
+ if requestBody.Reason != "" {
+ cancelReason = requestBody.Reason
+ }
+
+ if requestBody.ToolCallID != "" {
+ err := agent.CancelToolUse(requestBody.ToolCallID, fmt.Errorf("%s", cancelReason))
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusBadRequest)
+ return
+ }
+ // Return a success response
+ w.Header().Set("Content-Type", "application/json")
+ json.NewEncoder(w).Encode(map[string]string{
+ "status": "cancelled",
+ "too_use_id": requestBody.ToolCallID,
+ "reason": cancelReason})
+ return
+ }
+ // Call the CancelInnerLoop method
+ agent.CancelInnerLoop(fmt.Errorf("%s", cancelReason))
+ // Return a success response
+ w.Header().Set("Content-Type", "application/json")
+ json.NewEncoder(w).Encode(map[string]string{"status": "cancelled", "reason": cancelReason})
+ })
+
+ debugMux := initDebugMux()
+ s.mux.HandleFunc("/debug/", func(w http.ResponseWriter, r *http.Request) {
+ debugMux.ServeHTTP(w, r)
+ })
+
+ return s, nil
+}
+
+// Utility functions
+func getHostname() string {
+ hostname, err := os.Hostname()
+ if err != nil {
+ return "unknown"
+ }
+ return hostname
+}
+
+func getWorkingDir() string {
+ wd, err := os.Getwd()
+ if err != nil {
+ return "unknown"
+ }
+ return wd
+}
+
+// createTerminalSession creates a new terminal session with the given ID
+func (s *Server) createTerminalSession(sessionID string) (*terminalSession, error) {
+ // Start a new shell process
+ shellPath := getShellPath()
+ cmd := exec.Command(shellPath)
+
+ // Get working directory from the agent if possible
+ workDir := getWorkingDir()
+ cmd.Dir = workDir
+
+ // Set up environment
+ cmd.Env = append(os.Environ(), "TERM=xterm-256color")
+
+ // Start the command with a pty
+ ptmx, err := pty.Start(cmd)
+ if err != nil {
+ slog.Error("Failed to start pty", "error", err)
+ return nil, err
+ }
+
+ // Create the terminal session
+ session := &terminalSession{
+ pty: ptmx,
+ eventsClients: make(map[chan []byte]bool),
+ cmd: cmd,
+ }
+
+ // Start goroutine to read from pty and broadcast to all connected SSE clients
+ go s.readFromPtyAndBroadcast(sessionID, session)
+
+ return session, nil
+} // handleTerminalEvents handles SSE connections for terminal output
+func (s *Server) handleTerminalEvents(w http.ResponseWriter, r *http.Request, sessionID string) {
+ // Check if the session exists, if not, create it
+ s.ptyMutex.Lock()
+ session, exists := s.terminalSessions[sessionID]
+
+ if !exists {
+ // Create a new terminal session
+ var err error
+ session, err = s.createTerminalSession(sessionID)
+ if err != nil {
+ s.ptyMutex.Unlock()
+ http.Error(w, fmt.Sprintf("Failed to create terminal: %v", err), http.StatusInternalServerError)
+ return
+ }
+
+ // Store the new session
+ s.terminalSessions[sessionID] = session
+ }
+ s.ptyMutex.Unlock()
+
+ // Set headers for SSE
+ w.Header().Set("Content-Type", "text/event-stream")
+ w.Header().Set("Cache-Control", "no-cache")
+ w.Header().Set("Connection", "keep-alive")
+ w.Header().Set("Access-Control-Allow-Origin", "*")
+
+ // Create a channel for this client
+ events := make(chan []byte, 4096) // Buffer to prevent blocking
+
+ // Register this client's channel
+ session.eventsClientsMutex.Lock()
+ clientID := session.lastEventClientID + 1
+ session.lastEventClientID = clientID
+ session.eventsClients[events] = true
+ session.eventsClientsMutex.Unlock()
+
+ // When the client disconnects, remove their channel
+ defer func() {
+ session.eventsClientsMutex.Lock()
+ delete(session.eventsClients, events)
+ close(events)
+ session.eventsClientsMutex.Unlock()
+ }()
+
+ // Flush to send headers to client immediately
+ if f, ok := w.(http.Flusher); ok {
+ f.Flush()
+ }
+
+ // Send events to the client as they arrive
+ for {
+ select {
+ case <-r.Context().Done():
+ return
+ case data := <-events:
+ // Format as SSE with base64 encoding
+ fmt.Fprintf(w, "data: %s\n\n", base64.StdEncoding.EncodeToString(data))
+
+ // Flush the data immediately
+ if f, ok := w.(http.Flusher); ok {
+ f.Flush()
+ }
+ }
+ }
+}
+
+// handleTerminalInput processes input to the terminal
+func (s *Server) handleTerminalInput(w http.ResponseWriter, r *http.Request, sessionID string) {
+ // Check if the session exists
+ s.ptyMutex.Lock()
+ session, exists := s.terminalSessions[sessionID]
+ s.ptyMutex.Unlock()
+
+ if !exists {
+ http.Error(w, "Terminal session not found", http.StatusNotFound)
+ return
+ }
+
+ // Read the request body (terminal input or resize command)
+ body, err := io.ReadAll(r.Body)
+ if err != nil {
+ http.Error(w, "Failed to read request body", http.StatusBadRequest)
+ return
+ }
+
+ // Check if it's a resize message
+ if len(body) > 0 && body[0] == '{' {
+ var msg TerminalMessage
+ if err := json.Unmarshal(body, &msg); err == nil && msg.Type == "resize" {
+ if msg.Cols > 0 && msg.Rows > 0 {
+ pty.Setsize(session.pty, &pty.Winsize{
+ Cols: msg.Cols,
+ Rows: msg.Rows,
+ })
+
+ // Respond with success
+ w.WriteHeader(http.StatusOK)
+ return
+ }
+ }
+ }
+
+ // Regular terminal input
+ _, err = session.pty.Write(body)
+ if err != nil {
+ slog.Error("Failed to write to pty", "error", err)
+ http.Error(w, "Failed to write to terminal", http.StatusInternalServerError)
+ return
+ }
+
+ // Respond with success
+ w.WriteHeader(http.StatusOK)
+}
+
+// readFromPtyAndBroadcast reads output from the PTY and broadcasts it to all connected clients
+func (s *Server) readFromPtyAndBroadcast(sessionID string, session *terminalSession) {
+ buf := make([]byte, 4096)
+ defer func() {
+ // Clean up when done
+ s.ptyMutex.Lock()
+ delete(s.terminalSessions, sessionID)
+ s.ptyMutex.Unlock()
+
+ // Close the PTY
+ session.pty.Close()
+
+ // Ensure process is terminated
+ if session.cmd.Process != nil {
+ session.cmd.Process.Signal(syscall.SIGTERM)
+ time.Sleep(100 * time.Millisecond)
+ session.cmd.Process.Kill()
+ }
+
+ // Close all client channels
+ session.eventsClientsMutex.Lock()
+ for ch := range session.eventsClients {
+ delete(session.eventsClients, ch)
+ close(ch)
+ }
+ session.eventsClientsMutex.Unlock()
+ }()
+
+ for {
+ n, err := session.pty.Read(buf)
+ if err != nil {
+ if err != io.EOF {
+ slog.Error("Failed to read from pty", "error", err)
+ }
+ break
+ }
+
+ // Make a copy of the data for each client
+ data := make([]byte, n)
+ copy(data, buf[:n])
+
+ // Broadcast to all connected clients
+ session.eventsClientsMutex.Lock()
+ for ch := range session.eventsClients {
+ // Try to send, but don't block if channel is full
+ select {
+ case ch <- data:
+ default:
+ // Channel is full, drop the message for this client
+ }
+ }
+ session.eventsClientsMutex.Unlock()
+ }
+}
+
+// getShellPath returns the path to the shell to use
+func getShellPath() string {
+ // Try to use the user's preferred shell
+ shell := os.Getenv("SHELL")
+ if shell != "" {
+ return shell
+ }
+
+ // Default to bash on Unix-like systems
+ if _, err := os.Stat("/bin/bash"); err == nil {
+ return "/bin/bash"
+ }
+
+ // Fall back to sh
+ return "/bin/sh"
+}
+
+func initDebugMux() *http.ServeMux {
+ mux := http.NewServeMux()
+ mux.HandleFunc("GET /debug/{$}", func(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Content-Type", "text/html; charset=utf-8")
+ fmt.Fprintf(w, `<!doctype html>
+ <html><head><title>sketch debug</title></head><body>
+ <h1>sketch debug</h1>
+ <ul>
+ <li><a href="/debug/pprof/cmdline">pprof/cmdline</a></li>
+ <li><a href="/debug/pprof/profile">pprof/profile</a></li>
+ <li><a href="/debug/pprof/symbol">pprof/symbol</a></li>
+ <li><a href="/debug/pprof/trace">pprof/trace</a></li>
+ <li><a href="/debug/pprof/goroutine?debug=1">pprof/goroutine?debug=1</a></li>
+ <li><a href="/debug/metrics">metrics</a></li>
+ </ul>
+ </body>
+ </html>
+ `)
+ })
+ mux.HandleFunc("GET /debug/pprof/", pprof.Index)
+ mux.HandleFunc("GET /debug/pprof/cmdline", pprof.Cmdline)
+ mux.HandleFunc("GET /debug/pprof/profile", pprof.Profile)
+ mux.HandleFunc("GET /debug/pprof/symbol", pprof.Symbol)
+ mux.HandleFunc("GET /debug/pprof/trace", pprof.Trace)
+ return mux
+}
+
+// isValidGitSHA validates if a string looks like a valid git SHA hash.
+// Git SHAs are hexadecimal strings of at least 4 characters but typically 7, 8, or 40 characters.
+func isValidGitSHA(sha string) bool {
+ // Git SHA must be a hexadecimal string with at least 4 characters
+ if len(sha) < 4 || len(sha) > 40 {
+ return false
+ }
+
+ // Check if the string only contains hexadecimal characters
+ for _, char := range sha {
+ if !(char >= '0' && char <= '9') && !(char >= 'a' && char <= 'f') && !(char >= 'A' && char <= 'F') {
+ return false
+ }
+ }
+
+ return true
+}
diff --git a/loop/testdata/agent_loop.httprr b/loop/testdata/agent_loop.httprr
new file mode 100644
index 0000000..d762fc6
--- /dev/null
+++ b/loop/testdata/agent_loop.httprr
@@ -0,0 +1,260 @@
+httprr trace v1
+8492 1723
+POST https://api.anthropic.com/v1/messages HTTP/1.1
+Host: api.anthropic.com
+User-Agent: Go-http-client/1.1
+Content-Length: 8295
+Anthropic-Version: 2023-06-01
+Content-Type: application/json
+
+{
+ "model": "claude-3-7-sonnet-20250219",
+ "messages": [
+ {
+ "role": "user",
+ "content": [
+ {
+ "type": "text",
+ "text": "What tools are available to you? Please just list them briefly.",
+ "cache_control": {
+ "type": "ephemeral"
+ }
+ }
+ ]
+ }
+ ],
+ "max_tokens": 8192,
+ "tools": [
+ {
+ "name": "bash",
+ "description": "Executes a shell command using bash -c with an optional timeout, returning combined stdout and stderr.\n\nExecutables pre-installed in this environment include:\n- standard unix tools\n- go\n- git\n- rg\n- jq\n- gopls\n- sqlite\n- fzf\n- gh\n- python3",
+ "input_schema": {
+ "type": "object",
+ "required": [
+ "command"
+ ],
+ "properties": {
+ "command": {
+ "type": "string",
+ "description": "Shell script to execute"
+ },
+ "timeout": {
+ "type": "string",
+ "description": "Timeout as a Go duration string, defaults to '1m'"
+ }
+ }
+ }
+ },
+ {
+ "name": "keyword_search",
+ "description": "\nkeyword_search locates files with a search-and-filter approach.\nUse when navigating unfamiliar codebases with only conceptual understanding or vague user questions.\n\nEffective use:\n- Provide a detailed query for accurate relevance ranking\n- Include extensive but uncommon keywords to ensure comprehensive results\n- Order keywords by importance (most important first) - less important keywords may be dropped if there are too many results\n\nIMPORTANT: Do NOT use this tool if you have precise information like log lines, error messages, filenames, symbols, or package names. Use direct approaches (grep, cat, go doc, etc.) instead.\n",
+ "input_schema": {
+ "type": "object",
+ "required": [
+ "query",
+ "keywords"
+ ],
+ "properties": {
+ "query": {
+ "type": "string",
+ "description": "A detailed statement of what you're trying to find or learn."
+ },
+ "keywords": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ },
+ "description": "List of keywords in descending order of importance."
+ }
+ }
+ }
+ },
+ {
+ "name": "think",
+ "description": "Think out loud, take notes, form plans. Has no external effects.",
+ "input_schema": {
+ "type": "object",
+ "required": [
+ "thoughts"
+ ],
+ "properties": {
+ "thoughts": {
+ "type": "string",
+ "description": "The thoughts, notes, or plans to record"
+ }
+ }
+ }
+ },
+ {
+ "name": "title",
+ "description": "Use this tool early in the conversation, BEFORE MAKING ANY GIT COMMITS, to summarize what the chat is about briefly.",
+ "input_schema": {
+ "type": "object",
+ "properties": {
+ "title": {
+ "type": "string",
+ "description": "A brief title summarizing what this chat is about"
+ }
+ },
+ "required": [
+ "title"
+ ]
+ }
+ },
+ {
+ "name": "done",
+ "description": "Use this tool when you have achieved the user's goal. The parameters form a checklist which you should evaluate.",
+ "input_schema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Checklist",
+ "description": "A schema for tracking checklist items with status and comments",
+ "type": "object",
+ "required": [
+ "checklist_items"
+ ],
+ "properties": {
+ "checklist_items": {
+ "type": "object",
+ "description": "Collection of checklist items",
+ "properties": {
+ "wrote_tests": {
+ "$ref": "#/definitions/checklistItem",
+ "description": "If code was changed, tests were written or updated."
+ },
+ "passes_tests": {
+ "$ref": "#/definitions/checklistItem",
+ "description": "If any commits were made, tests pass."
+ },
+ "code_reviewed": {
+ "$ref": "#/definitions/checklistItem",
+ "description": "If any commits were made, the codereview tool was run and its output was addressed."
+ },
+ "git_commit": {
+ "$ref": "#/definitions/checklistItem",
+ "description": "Create git commits for any code changes you made. Match the style of recent commit messages. Include 'Co-Authored-By: sketch' and the original user prompt. Use GIT_AUTHOR_NAME=\"Test Agent\" GIT_AUTHOR_EMAIL=\"totallyhuman@sketch.dev\" (not git config)."
+ }
+ },
+ "additionalProperties": {
+ "$ref": "#/definitions/checklistItem"
+ }
+ }
+ },
+ "definitions": {
+ "checklistItem": {
+ "type": "object",
+ "required": [
+ "status"
+ ],
+ "properties": {
+ "status": {
+ "type": "string",
+ "description": "Current status of the checklist item",
+ "enum": [
+ "yes",
+ "no",
+ "not applicable",
+ "other"
+ ]
+ },
+ "description": {
+ "type": "string",
+ "description": "Description of what this checklist item verifies"
+ },
+ "comments": {
+ "type": "string",
+ "description": "Additional comments or context for this checklist item"
+ }
+ }
+ }
+ }
+ }
+ },
+ {
+ "name": "codereview",
+ "description": "Run an automated code review.",
+ "input_schema": {
+ "type": "object"
+ }
+ },
+ {
+ "name": "patch",
+ "description": "File modification tool for precise text edits.\n\nOperations:\n- replace: Substitute text with new content\n- append_eof: Append new text at the end of the file\n- prepend_bof: Insert new text at the beginning of the file\n- overwrite: Replace the entire file with new content (automatically creates the file)\n\nUsage notes:\n- All inputs are interpreted literally (no automatic newline or whitespace handling)\n- For replace operations, oldText must appear EXACTLY ONCE in the file",
+ "input_schema": {
+ "type": "object",
+ "required": [
+ "path",
+ "patches"
+ ],
+ "properties": {
+ "path": {
+ "type": "string",
+ "description": "Absolute path to the file to patch"
+ },
+ "patches": {
+ "type": "array",
+ "description": "List of patch requests to apply",
+ "items": {
+ "type": "object",
+ "required": [
+ "operation",
+ "newText"
+ ],
+ "properties": {
+ "operation": {
+ "type": "string",
+ "enum": [
+ "replace",
+ "append_eof",
+ "prepend_bof",
+ "overwrite"
+ ],
+ "description": "Type of operation to perform"
+ },
+ "oldText": {
+ "type": "string",
+ "description": "Text to locate for the operation (must be unique in file, required for replace)"
+ },
+ "newText": {
+ "type": "string",
+ "description": "The new text to use (empty for deletions)"
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ ],
+ "system": [
+ {
+ "text": "\nYou are an expert coding assistant and architect, with a specialty in Go.\nYou are assisting the user to achieve their goals.\n\nStart by asking concise clarifying questions as needed.\nOnce the intent is clear, work autonomously.\n\nCall the title tool early in the conversation to provide a brief summary of\nwhat the chat is about.\n\nBreak down the overall goal into a series of smaller steps.\n(The first step is often: \"Make a plan.\")\nThen execute each step using tools.\nUpdate the plan if you have encountered problems or learned new information.\n\nWhen in doubt about a step, follow this broad workflow:\n\n- Think about how the current step fits into the overall plan.\n- Do research. Good tool choices: bash, think, keyword_search\n- Make edits.\n- Repeat.\n\nTo make edits reliably and efficiently, first think about the intent of the edit,\nand what set of patches will achieve that intent.\nThen use the patch tool to make those edits. Combine all edits to any given file into a single patch tool call.\n\nFor renames or refactors, consider invoking gopls (via bash).\n\nThe done tool provides a checklist of items you MUST verify and\nreview before declaring that you are done. Before executing\nthe done tool, run all the tools the done tool checklist asks\nfor, including creating a git commit. Do not forget to run tests.\n\n\u003cplatform\u003e\nlinux/amd64\n\u003c/platform\u003e\n\u003cpwd\u003e\n/\n\u003c/pwd\u003e\n\u003cgit_root\u003e\n\n\u003c/git_root\u003e\n",
+ "type": "text",
+ "cache_control": {
+ "type": "ephemeral"
+ }
+ }
+ ]
+}HTTP/2.0 200 OK
+Anthropic-Organization-Id: 3c473a21-7208-450a-a9f8-80aebda45c1b
+Anthropic-Ratelimit-Input-Tokens-Limit: 200000
+Anthropic-Ratelimit-Input-Tokens-Remaining: 200000
+Anthropic-Ratelimit-Input-Tokens-Reset: 2025-04-05T23:01:05Z
+Anthropic-Ratelimit-Output-Tokens-Limit: 80000
+Anthropic-Ratelimit-Output-Tokens-Remaining: 80000
+Anthropic-Ratelimit-Output-Tokens-Reset: 2025-04-05T23:01:08Z
+Anthropic-Ratelimit-Requests-Limit: 4000
+Anthropic-Ratelimit-Requests-Remaining: 3999
+Anthropic-Ratelimit-Requests-Reset: 2025-04-05T23:01:05Z
+Anthropic-Ratelimit-Tokens-Limit: 280000
+Anthropic-Ratelimit-Tokens-Remaining: 280000
+Anthropic-Ratelimit-Tokens-Reset: 2025-04-05T23:01:05Z
+Cf-Cache-Status: DYNAMIC
+Cf-Ray: 92bcaa52a9e4cfc8-SJC
+Content-Type: application/json
+Date: Sat, 05 Apr 2025 23:01:08 GMT
+Request-Id: req_01GT1wg7toE5VY2k2b2WiX13
+Server: cloudflare
+Via: 1.1 google
+X-Robots-Tag: none
+
+{"id":"msg_01N9DMvaYosVobjJaYB2SiDd","type":"message","role":"assistant","model":"claude-3-7-sonnet-20250219","content":[{"type":"text","text":"Here's a brief list of tools available to me:\n\n1. bash - Execute shell commands\n2. keyword_search - Search for files using keywords\n3. think - Record thoughts or plans (no external effects)\n4. title - Summarize conversation topic\n5. done - Mark when user's goal is achieved\n6. codereview - Run automated code review\n7. patch - Make precise text edits to files\n\nThese tools allow me to navigate codebases, execute commands, make code changes, and help you achieve your goals effectively."}],"stop_reason":"end_turn","stop_sequence":null,"usage":{"input_tokens":4,"cache_creation_input_tokens":2074,"cache_read_input_tokens":0,"output_tokens":119}}
\ No newline at end of file
diff --git a/loop/update_tests.sh b/loop/update_tests.sh
new file mode 100755
index 0000000..f6be384
--- /dev/null
+++ b/loop/update_tests.sh
@@ -0,0 +1,4 @@
+#!/bin/bash
+
+go test -httprecord .
+go test
diff --git a/loop/webui/Makefile b/loop/webui/Makefile
new file mode 100644
index 0000000..51b4bb7
--- /dev/null
+++ b/loop/webui/Makefile
@@ -0,0 +1,19 @@
+all: install check build-tailwind
+
+install:
+ npm ci
+
+# TypeScript type checking
+# Note: The actual esbuild bundling happens in esbuild.go
+check:
+ npx tsc --noEmit
+
+build-tailwind:
+ npx postcss ./src/input.css -o ./src/tailwind.css
+
+watch-tailwind:
+ npx postcss -i ./src/input.css -o ./src/tailwind.css --watch
+
+clean:
+ rm -rf node_modules
+ -rm -f ./src/tailwind.css
diff --git a/loop/webui/esbuild.go b/loop/webui/esbuild.go
new file mode 100644
index 0000000..968127c
--- /dev/null
+++ b/loop/webui/esbuild.go
@@ -0,0 +1,239 @@
+// Package webui provides the web interface for the sketch loop.
+// It bundles typescript files into JavaScript using esbuild.
+//
+// This is substantially the same mechanism as /esbuild.go in this repo as well.
+package webui
+
+import (
+ "crypto/sha256"
+ "embed"
+ "encoding/hex"
+ "fmt"
+ "io"
+ "io/fs"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+
+ esbuildcli "github.com/evanw/esbuild/pkg/cli"
+)
+
+//go:embed package.json package-lock.json src tsconfig.json postcss.config.js tailwind.config.js
+var embedded embed.FS
+
+func embeddedHash() (string, error) {
+ h := sha256.New()
+ err := fs.WalkDir(embedded, ".", func(path string, d fs.DirEntry, err error) error {
+ if d.IsDir() {
+ return nil
+ }
+ f, err := embedded.Open(path)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ if _, err := io.Copy(h, f); err != nil {
+ return fmt.Errorf("%s: %w", path, err)
+ }
+ return nil
+ })
+ if err != nil {
+ return "", fmt.Errorf("embedded hash: %w", err)
+ }
+ return hex.EncodeToString(h.Sum(nil)), nil
+}
+
+func cleanBuildDir(buildDir string) error {
+ err := fs.WalkDir(os.DirFS(buildDir), ".", func(path string, d fs.DirEntry, err error) error {
+ if d.Name() == "." {
+ return nil
+ }
+ if d.Name() == "node_modules" {
+ return fs.SkipDir
+ }
+ osPath := filepath.Join(buildDir, path)
+ fmt.Printf("removing %s\n", osPath)
+ os.RemoveAll(osPath)
+ if d.IsDir() {
+ return fs.SkipDir
+ }
+ return nil
+ })
+ if err != nil {
+ return fmt.Errorf("clean build dir: %w", err)
+ }
+ return nil
+}
+
+func unpackFS(out string, srcFS fs.FS) error {
+ err := fs.WalkDir(srcFS, ".", func(path string, d fs.DirEntry, err error) error {
+ if d.Name() == "." {
+ return nil
+ }
+ if d.IsDir() {
+ if err := os.Mkdir(filepath.Join(out, path), 0o777); err != nil {
+ return err
+ }
+ return nil
+ }
+ f, err := srcFS.Open(path)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ dst, err := os.Create(filepath.Join(out, path))
+ if err != nil {
+ return err
+ }
+ defer dst.Close()
+ if _, err := io.Copy(dst, f); err != nil {
+ return err
+ }
+ if err := dst.Close(); err != nil {
+ return err
+ }
+ return nil
+ })
+ if err != nil {
+ return fmt.Errorf("unpack fs into out dir %s: %w", out, err)
+ }
+ return nil
+}
+
+// Build unpacks and esbuild's all bundleTs typescript files
+func Build() (fs.FS, error) {
+ homeDir, err := os.UserHomeDir()
+ if err != nil {
+ return nil, err
+ }
+ cacheDir := filepath.Join(homeDir, ".cache", "sketch", "webui")
+ buildDir := filepath.Join(cacheDir, "build")
+ if err := os.MkdirAll(buildDir, 0o777); err != nil { // make sure .cache/sketch/build exists
+ return nil, err
+ }
+ hash, err := embeddedHash()
+ if err != nil {
+ return nil, err
+ }
+ finalHashDir := filepath.Join(cacheDir, hash)
+ if _, err := os.Stat(finalHashDir); err == nil {
+ // Build already done, serve it out.
+ return os.DirFS(finalHashDir), nil
+ }
+
+ // We need to do a build.
+
+ // Clear everything out of the build directory except node_modules.
+ if err := cleanBuildDir(buildDir); err != nil {
+ return nil, err
+ }
+ tmpHashDir := filepath.Join(buildDir, "out")
+ if err := os.Mkdir(tmpHashDir, 0o777); err != nil {
+ return nil, err
+ }
+
+ // Unpack everything from embedded into build dir.
+ if err := unpackFS(buildDir, embedded); err != nil {
+ return nil, err
+ }
+
+ // Do the build.
+ cmd := exec.Command("npm", "ci")
+ cmd.Dir = buildDir
+ if out, err := cmd.CombinedOutput(); err != nil {
+ return nil, fmt.Errorf("npm ci: %s: %v", out, err)
+ }
+ cmd = exec.Command("npx", "postcss", filepath.Join(buildDir, "./src/input.css"), "-o", filepath.Join(tmpHashDir, "tailwind.css"))
+ cmd.Dir = buildDir
+ if out, err := cmd.CombinedOutput(); err != nil {
+ return nil, fmt.Errorf("npm postcss: %s: %v", out, err)
+ }
+ bundleTs := []string{"src/timeline.ts"}
+ for _, tsName := range bundleTs {
+ if err := esbuildBundle(tmpHashDir, filepath.Join(buildDir, tsName)); err != nil {
+ return nil, fmt.Errorf("esbuild: %s: %w", tsName, err)
+ }
+ }
+
+ // Copy src files used directly into the new hash output dir.
+ err = fs.WalkDir(embedded, "src", func(path string, d fs.DirEntry, err error) error {
+ if d.IsDir() {
+ return nil
+ }
+ if strings.HasSuffix(path, ".html") || strings.HasSuffix(path, ".css") || strings.HasSuffix(path, ".js") {
+ b, err := embedded.ReadFile(path)
+ if err != nil {
+ return err
+ }
+ dstPath := filepath.Join(tmpHashDir, strings.TrimPrefix(path, "src/"))
+ if err := os.WriteFile(dstPath, b, 0o777); err != nil {
+ return err
+ }
+ return nil
+ }
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Copy xterm.css from node_modules
+ const xtermCssPath = "node_modules/@xterm/xterm/css/xterm.css"
+ xtermCss, err := os.ReadFile(filepath.Join(buildDir, xtermCssPath))
+ if err != nil {
+ return nil, fmt.Errorf("failed to read xterm.css: %w", err)
+ }
+ if err := os.WriteFile(filepath.Join(tmpHashDir, "xterm.css"), xtermCss, 0o666); err != nil {
+ return nil, fmt.Errorf("failed to write xterm.css: %w", err)
+ }
+
+ // Everything succeeded, so we move tmpHashDir to finalHashDir
+ if err := os.Rename(tmpHashDir, finalHashDir); err != nil {
+ return nil, err
+ }
+ return os.DirFS(finalHashDir), nil
+}
+
+// unpackTS unpacks all the typescript-relevant files from the embedded filesystem into tmpDir.
+func unpackTS(outDir string, embedded fs.FS) error {
+ return fs.WalkDir(embedded, ".", func(path string, d fs.DirEntry, err error) error {
+ if err != nil {
+ return err
+ }
+ tgt := filepath.Join(outDir, path)
+ if d.IsDir() {
+ if err := os.MkdirAll(tgt, 0o777); err != nil {
+ return err
+ }
+ return nil
+ }
+ if strings.HasSuffix(path, ".html") || strings.HasSuffix(path, ".md") || strings.HasSuffix(path, ".css") {
+ return nil
+ }
+ data, err := fs.ReadFile(embedded, path)
+ if err != nil {
+ return err
+ }
+ if err := os.WriteFile(tgt, data, 0o666); err != nil {
+ return err
+ }
+ return nil
+ })
+}
+
+func esbuildBundle(outDir, src string) error {
+ ret := esbuildcli.Run([]string{
+ src,
+ "--bundle",
+ "--sourcemap",
+ "--log-level=error",
+ // Disable minification for now
+ // "--minify",
+ "--outdir=" + outDir,
+ })
+ if ret != 0 {
+ return fmt.Errorf("esbuild %s failed: %d", filepath.Base(src), ret)
+ }
+ return nil
+}
diff --git a/loop/webui/memfs.go b/loop/webui/memfs.go
new file mode 100644
index 0000000..5431862
--- /dev/null
+++ b/loop/webui/memfs.go
@@ -0,0 +1,53 @@
+package webui
+
+import (
+ "bytes"
+ "fmt"
+ "io/fs"
+ "time"
+)
+
+// memFS implements fs.FS in-memory.
+type memFS struct {
+ m map[string][]byte
+}
+
+func (m memFS) Open(name string) (fs.File, error) {
+ b, found := m.m[name]
+ if !found {
+ return nil, fmt.Errorf("esbuild.memFS(%q): %w", name, fs.ErrNotExist)
+ }
+ return &memFile{name: name, Reader: *bytes.NewReader(b)}, nil
+}
+
+func (m memFS) ReadFile(name string) ([]byte, error) {
+ b, found := m.m[name]
+ if !found {
+ return nil, fmt.Errorf("esbuild.memFS.ReadFile(%q): %w", name, fs.ErrNotExist)
+ }
+ return append(make([]byte, 0, len(b)), b...), nil
+}
+
+// memFile implements fs.File in-memory.
+type memFile struct {
+ // embedding is very important here because need more than
+ // Read, we need Seek to make http.ServeContent happy.
+ bytes.Reader
+ name string
+}
+
+func (f *memFile) Stat() (fs.FileInfo, error) { return &memFileInfo{f: f}, nil }
+func (f *memFile) Close() error { return nil }
+
+var start = time.Now()
+
+type memFileInfo struct {
+ f *memFile
+}
+
+func (i memFileInfo) Name() string { return i.f.name }
+func (i memFileInfo) Size() int64 { return i.f.Reader.Size() }
+func (i memFileInfo) Mode() fs.FileMode { return 0o444 }
+func (i memFileInfo) ModTime() time.Time { return start }
+func (i memFileInfo) IsDir() bool { return false }
+func (i memFileInfo) Sys() any { return nil }
diff --git a/loop/webui/package-lock.json b/loop/webui/package-lock.json
new file mode 100644
index 0000000..27de4c5
--- /dev/null
+++ b/loop/webui/package-lock.json
@@ -0,0 +1,3334 @@
+{
+ "name": "webui",
+ "version": "1.0.0",
+ "lockfileVersion": 3,
+ "requires": true,
+ "packages": {
+ "": {
+ "name": "webui",
+ "version": "1.0.0",
+ "license": "ISC",
+ "dependencies": {
+ "@xterm/addon-fit": "^0.10.0",
+ "@xterm/xterm": "^5.5.0",
+ "diff2html": "3.4.51",
+ "lit-html": "^3.2.1",
+ "marked": "^15.0.7",
+ "vega": "^5.33.0",
+ "vega-embed": "^6.29.0",
+ "vega-lite": "^5.23.0"
+ },
+ "devDependencies": {
+ "@types/marked": "^5.0.2",
+ "@types/node": "^22.13.14",
+ "autoprefixer": "^10.4.21",
+ "esbuild": "^0.25.1",
+ "postcss": "^8.5.3",
+ "postcss-cli": "^11.0.1",
+ "tailwindcss": "^3.4.1",
+ "typescript": "^5.8.2"
+ }
+ },
+ "node_modules/@alloc/quick-lru": {
+ "version": "5.2.0",
+ "resolved": "https://registry.npmjs.org/@alloc/quick-lru/-/quick-lru-5.2.0.tgz",
+ "integrity": "sha512-UrcABB+4bUrFABwbluTIBErXwvbsU/V7TZWfmbgJfbkwiBuziS9gxdODUyuiecfdGQ85jglMW6juS3+z5TsKLw==",
+ "dev": true,
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/@esbuild/aix-ppc64": {
+ "version": "0.25.1",
+ "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.25.1.tgz",
+ "integrity": "sha512-kfYGy8IdzTGy+z0vFGvExZtxkFlA4zAxgKEahG9KE1ScBjpQnFsNOX8KTU5ojNru5ed5CVoJYXFtoxaq5nFbjQ==",
+ "cpu": [
+ "ppc64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "aix"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/android-arm": {
+ "version": "0.25.1",
+ "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.25.1.tgz",
+ "integrity": "sha512-dp+MshLYux6j/JjdqVLnMglQlFu+MuVeNrmT5nk6q07wNhCdSnB7QZj+7G8VMUGh1q+vj2Bq8kRsuyA00I/k+Q==",
+ "cpu": [
+ "arm"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "android"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/android-arm64": {
+ "version": "0.25.1",
+ "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.25.1.tgz",
+ "integrity": "sha512-50tM0zCJW5kGqgG7fQ7IHvQOcAn9TKiVRuQ/lN0xR+T2lzEFvAi1ZcS8DiksFcEpf1t/GYOeOfCAgDHFpkiSmA==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "android"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/android-x64": {
+ "version": "0.25.1",
+ "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.25.1.tgz",
+ "integrity": "sha512-GCj6WfUtNldqUzYkN/ITtlhwQqGWu9S45vUXs7EIYf+7rCiiqH9bCloatO9VhxsL0Pji+PF4Lz2XXCES+Q8hDw==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "android"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/darwin-arm64": {
+ "version": "0.25.1",
+ "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.25.1.tgz",
+ "integrity": "sha512-5hEZKPf+nQjYoSr/elb62U19/l1mZDdqidGfmFutVUjjUZrOazAtwK+Kr+3y0C/oeJfLlxo9fXb1w7L+P7E4FQ==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/darwin-x64": {
+ "version": "0.25.1",
+ "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.25.1.tgz",
+ "integrity": "sha512-hxVnwL2Dqs3fM1IWq8Iezh0cX7ZGdVhbTfnOy5uURtao5OIVCEyj9xIzemDi7sRvKsuSdtCAhMKarxqtlyVyfA==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/freebsd-arm64": {
+ "version": "0.25.1",
+ "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.25.1.tgz",
+ "integrity": "sha512-1MrCZs0fZa2g8E+FUo2ipw6jw5qqQiH+tERoS5fAfKnRx6NXH31tXBKI3VpmLijLH6yriMZsxJtaXUyFt/8Y4A==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "freebsd"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/freebsd-x64": {
+ "version": "0.25.1",
+ "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.25.1.tgz",
+ "integrity": "sha512-0IZWLiTyz7nm0xuIs0q1Y3QWJC52R8aSXxe40VUxm6BB1RNmkODtW6LHvWRrGiICulcX7ZvyH6h5fqdLu4gkww==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "freebsd"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/linux-arm": {
+ "version": "0.25.1",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.25.1.tgz",
+ "integrity": "sha512-NdKOhS4u7JhDKw9G3cY6sWqFcnLITn6SqivVArbzIaf3cemShqfLGHYMx8Xlm/lBit3/5d7kXvriTUGa5YViuQ==",
+ "cpu": [
+ "arm"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/linux-arm64": {
+ "version": "0.25.1",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.25.1.tgz",
+ "integrity": "sha512-jaN3dHi0/DDPelk0nLcXRm1q7DNJpjXy7yWaWvbfkPvI+7XNSc/lDOnCLN7gzsyzgu6qSAmgSvP9oXAhP973uQ==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/linux-ia32": {
+ "version": "0.25.1",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.25.1.tgz",
+ "integrity": "sha512-OJykPaF4v8JidKNGz8c/q1lBO44sQNUQtq1KktJXdBLn1hPod5rE/Hko5ugKKZd+D2+o1a9MFGUEIUwO2YfgkQ==",
+ "cpu": [
+ "ia32"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/linux-loong64": {
+ "version": "0.25.1",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.25.1.tgz",
+ "integrity": "sha512-nGfornQj4dzcq5Vp835oM/o21UMlXzn79KobKlcs3Wz9smwiifknLy4xDCLUU0BWp7b/houtdrgUz7nOGnfIYg==",
+ "cpu": [
+ "loong64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/linux-mips64el": {
+ "version": "0.25.1",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.25.1.tgz",
+ "integrity": "sha512-1osBbPEFYwIE5IVB/0g2X6i1qInZa1aIoj1TdL4AaAb55xIIgbg8Doq6a5BzYWgr+tEcDzYH67XVnTmUzL+nXg==",
+ "cpu": [
+ "mips64el"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/linux-ppc64": {
+ "version": "0.25.1",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.25.1.tgz",
+ "integrity": "sha512-/6VBJOwUf3TdTvJZ82qF3tbLuWsscd7/1w+D9LH0W/SqUgM5/JJD0lrJ1fVIfZsqB6RFmLCe0Xz3fmZc3WtyVg==",
+ "cpu": [
+ "ppc64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/linux-riscv64": {
+ "version": "0.25.1",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.25.1.tgz",
+ "integrity": "sha512-nSut/Mx5gnilhcq2yIMLMe3Wl4FK5wx/o0QuuCLMtmJn+WeWYoEGDN1ipcN72g1WHsnIbxGXd4i/MF0gTcuAjQ==",
+ "cpu": [
+ "riscv64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/linux-s390x": {
+ "version": "0.25.1",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.25.1.tgz",
+ "integrity": "sha512-cEECeLlJNfT8kZHqLarDBQso9a27o2Zd2AQ8USAEoGtejOrCYHNtKP8XQhMDJMtthdF4GBmjR2au3x1udADQQQ==",
+ "cpu": [
+ "s390x"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/linux-x64": {
+ "version": "0.25.1",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.25.1.tgz",
+ "integrity": "sha512-xbfUhu/gnvSEg+EGovRc+kjBAkrvtk38RlerAzQxvMzlB4fXpCFCeUAYzJvrnhFtdeyVCDANSjJvOvGYoeKzFA==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/netbsd-arm64": {
+ "version": "0.25.1",
+ "resolved": "https://registry.npmjs.org/@esbuild/netbsd-arm64/-/netbsd-arm64-0.25.1.tgz",
+ "integrity": "sha512-O96poM2XGhLtpTh+s4+nP7YCCAfb4tJNRVZHfIE7dgmax+yMP2WgMd2OecBuaATHKTHsLWHQeuaxMRnCsH8+5g==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "netbsd"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/netbsd-x64": {
+ "version": "0.25.1",
+ "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.25.1.tgz",
+ "integrity": "sha512-X53z6uXip6KFXBQ+Krbx25XHV/NCbzryM6ehOAeAil7X7oa4XIq+394PWGnwaSQ2WRA0KI6PUO6hTO5zeF5ijA==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "netbsd"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/openbsd-arm64": {
+ "version": "0.25.1",
+ "resolved": "https://registry.npmjs.org/@esbuild/openbsd-arm64/-/openbsd-arm64-0.25.1.tgz",
+ "integrity": "sha512-Na9T3szbXezdzM/Kfs3GcRQNjHzM6GzFBeU1/6IV/npKP5ORtp9zbQjvkDJ47s6BCgaAZnnnu/cY1x342+MvZg==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "openbsd"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/openbsd-x64": {
+ "version": "0.25.1",
+ "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.25.1.tgz",
+ "integrity": "sha512-T3H78X2h1tszfRSf+txbt5aOp/e7TAz3ptVKu9Oyir3IAOFPGV6O9c2naym5TOriy1l0nNf6a4X5UXRZSGX/dw==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "openbsd"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/sunos-x64": {
+ "version": "0.25.1",
+ "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.25.1.tgz",
+ "integrity": "sha512-2H3RUvcmULO7dIE5EWJH8eubZAI4xw54H1ilJnRNZdeo8dTADEZ21w6J22XBkXqGJbe0+wnNJtw3UXRoLJnFEg==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "sunos"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/win32-arm64": {
+ "version": "0.25.1",
+ "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.25.1.tgz",
+ "integrity": "sha512-GE7XvrdOzrb+yVKB9KsRMq+7a2U/K5Cf/8grVFRAGJmfADr/e/ODQ134RK2/eeHqYV5eQRFxb1hY7Nr15fv1NQ==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "win32"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/win32-ia32": {
+ "version": "0.25.1",
+ "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.25.1.tgz",
+ "integrity": "sha512-uOxSJCIcavSiT6UnBhBzE8wy3n0hOkJsBOzy7HDAuTDE++1DJMRRVCPGisULScHL+a/ZwdXPpXD3IyFKjA7K8A==",
+ "cpu": [
+ "ia32"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "win32"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/win32-x64": {
+ "version": "0.25.1",
+ "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.25.1.tgz",
+ "integrity": "sha512-Y1EQdcfwMSeQN/ujR5VayLOJ1BHaK+ssyk0AEzPjC+t1lITgsnccPqFjb6V+LsTp/9Iov4ysfjxLaGJ9RPtkVg==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "win32"
+ ],
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@isaacs/cliui": {
+ "version": "8.0.2",
+ "resolved": "https://registry.npmjs.org/@isaacs/cliui/-/cliui-8.0.2.tgz",
+ "integrity": "sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA==",
+ "dev": true,
+ "dependencies": {
+ "string-width": "^5.1.2",
+ "string-width-cjs": "npm:string-width@^4.2.0",
+ "strip-ansi": "^7.0.1",
+ "strip-ansi-cjs": "npm:strip-ansi@^6.0.1",
+ "wrap-ansi": "^8.1.0",
+ "wrap-ansi-cjs": "npm:wrap-ansi@^7.0.0"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@isaacs/cliui/node_modules/ansi-regex": {
+ "version": "6.1.0",
+ "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.1.0.tgz",
+ "integrity": "sha512-7HSX4QQb4CspciLpVFwyRe79O3xsIZDDLER21kERQ71oaPodF8jL725AgJMFAYbooIqolJoRLuM81SpeUkpkvA==",
+ "dev": true,
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/ansi-regex?sponsor=1"
+ }
+ },
+ "node_modules/@isaacs/cliui/node_modules/ansi-styles": {
+ "version": "6.2.1",
+ "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.1.tgz",
+ "integrity": "sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==",
+ "dev": true,
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/ansi-styles?sponsor=1"
+ }
+ },
+ "node_modules/@isaacs/cliui/node_modules/emoji-regex": {
+ "version": "9.2.2",
+ "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz",
+ "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==",
+ "dev": true
+ },
+ "node_modules/@isaacs/cliui/node_modules/string-width": {
+ "version": "5.1.2",
+ "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz",
+ "integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==",
+ "dev": true,
+ "dependencies": {
+ "eastasianwidth": "^0.2.0",
+ "emoji-regex": "^9.2.2",
+ "strip-ansi": "^7.0.1"
+ },
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/@isaacs/cliui/node_modules/strip-ansi": {
+ "version": "7.1.0",
+ "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz",
+ "integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==",
+ "dev": true,
+ "dependencies": {
+ "ansi-regex": "^6.0.1"
+ },
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/strip-ansi?sponsor=1"
+ }
+ },
+ "node_modules/@isaacs/cliui/node_modules/wrap-ansi": {
+ "version": "8.1.0",
+ "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-8.1.0.tgz",
+ "integrity": "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==",
+ "dev": true,
+ "dependencies": {
+ "ansi-styles": "^6.1.0",
+ "string-width": "^5.0.1",
+ "strip-ansi": "^7.0.1"
+ },
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/wrap-ansi?sponsor=1"
+ }
+ },
+ "node_modules/@jridgewell/gen-mapping": {
+ "version": "0.3.8",
+ "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.8.tgz",
+ "integrity": "sha512-imAbBGkb+ebQyxKgzv5Hu2nmROxoDOXHh80evxdoXNOrvAnVx7zimzc1Oo5h9RlfV4vPXaE2iM5pOFbvOCClWA==",
+ "dev": true,
+ "dependencies": {
+ "@jridgewell/set-array": "^1.2.1",
+ "@jridgewell/sourcemap-codec": "^1.4.10",
+ "@jridgewell/trace-mapping": "^0.3.24"
+ },
+ "engines": {
+ "node": ">=6.0.0"
+ }
+ },
+ "node_modules/@jridgewell/resolve-uri": {
+ "version": "3.1.2",
+ "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz",
+ "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==",
+ "dev": true,
+ "engines": {
+ "node": ">=6.0.0"
+ }
+ },
+ "node_modules/@jridgewell/set-array": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.2.1.tgz",
+ "integrity": "sha512-R8gLRTZeyp03ymzP/6Lil/28tGeGEzhx1q2k703KGWRAI1VdvPIXdG70VJc2pAMw3NA6JKL5hhFu1sJX0Mnn/A==",
+ "dev": true,
+ "engines": {
+ "node": ">=6.0.0"
+ }
+ },
+ "node_modules/@jridgewell/sourcemap-codec": {
+ "version": "1.5.0",
+ "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.0.tgz",
+ "integrity": "sha512-gv3ZRaISU3fjPAgNsriBRqGWQL6quFx04YMPW/zD8XMLsU32mhCCbfbO6KZFLjvYpCZ8zyDEgqsgf+PwPaM7GQ==",
+ "dev": true
+ },
+ "node_modules/@jridgewell/trace-mapping": {
+ "version": "0.3.25",
+ "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.25.tgz",
+ "integrity": "sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ==",
+ "dev": true,
+ "dependencies": {
+ "@jridgewell/resolve-uri": "^3.1.0",
+ "@jridgewell/sourcemap-codec": "^1.4.14"
+ }
+ },
+ "node_modules/@nodelib/fs.scandir": {
+ "version": "2.1.5",
+ "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz",
+ "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==",
+ "dev": true,
+ "dependencies": {
+ "@nodelib/fs.stat": "2.0.5",
+ "run-parallel": "^1.1.9"
+ },
+ "engines": {
+ "node": ">= 8"
+ }
+ },
+ "node_modules/@nodelib/fs.stat": {
+ "version": "2.0.5",
+ "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz",
+ "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==",
+ "dev": true,
+ "engines": {
+ "node": ">= 8"
+ }
+ },
+ "node_modules/@nodelib/fs.walk": {
+ "version": "1.2.8",
+ "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz",
+ "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==",
+ "dev": true,
+ "dependencies": {
+ "@nodelib/fs.scandir": "2.1.5",
+ "fastq": "^1.6.0"
+ },
+ "engines": {
+ "node": ">= 8"
+ }
+ },
+ "node_modules/@pkgjs/parseargs": {
+ "version": "0.11.0",
+ "resolved": "https://registry.npmjs.org/@pkgjs/parseargs/-/parseargs-0.11.0.tgz",
+ "integrity": "sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg==",
+ "dev": true,
+ "optional": true,
+ "engines": {
+ "node": ">=14"
+ }
+ },
+ "node_modules/@rollup/rollup-linux-x64-gnu": {
+ "version": "4.37.0",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.37.0.tgz",
+ "integrity": "sha512-pKivGpgJM5g8dwj0ywBwe/HeVAUSuVVJhUTa/URXjxvoyTT/AxsLTAbkHkDHG7qQxLoW2s3apEIl26uUe08LVQ==",
+ "cpu": [
+ "x64"
+ ],
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@types/estree": {
+ "version": "1.0.7",
+ "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.7.tgz",
+ "integrity": "sha512-w28IoSUCJpidD/TGviZwwMJckNESJZXFu7NBZ5YJ4mEUnNraUn9Pm8HSZm/jDF1pDWYKspWE7oVphigUPRakIQ==",
+ "license": "MIT"
+ },
+ "node_modules/@types/geojson": {
+ "version": "7946.0.4",
+ "resolved": "https://registry.npmjs.org/@types/geojson/-/geojson-7946.0.4.tgz",
+ "integrity": "sha512-MHmwBtCb7OCv1DSivz2UNJXPGU/1btAWRKlqJ2saEhVJkpkvqHMMaOpKg0v4sAbDWSQekHGvPVMM8nQ+Jen03Q==",
+ "license": "MIT"
+ },
+ "node_modules/@types/marked": {
+ "version": "5.0.2",
+ "resolved": "https://registry.npmjs.org/@types/marked/-/marked-5.0.2.tgz",
+ "integrity": "sha512-OucS4KMHhFzhz27KxmWg7J+kIYqyqoW5kdIEI319hqARQQUTqhao3M/F+uFnDXD0Rg72iDDZxZNxq5gvctmLlg==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/@types/node": {
+ "version": "22.13.14",
+ "resolved": "https://registry.npmjs.org/@types/node/-/node-22.13.14.tgz",
+ "integrity": "sha512-Zs/Ollc1SJ8nKUAgc7ivOEdIBM8JAKgrqqUYi2J997JuKO7/tpQC+WCetQ1sypiKCQWHdvdg9wBNpUPEWZae7w==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "undici-types": "~6.20.0"
+ }
+ },
+ "node_modules/@types/trusted-types": {
+ "version": "2.0.7",
+ "resolved": "https://registry.npmjs.org/@types/trusted-types/-/trusted-types-2.0.7.tgz",
+ "integrity": "sha512-ScaPdn1dQczgbl0QFTeTOmVHFULt394XJgOQNoyVhZ6r2vLnMLJfBPd53SB52T/3G36VI1/g2MZaX0cwDuXsfw==",
+ "license": "MIT"
+ },
+ "node_modules/@xterm/addon-fit": {
+ "version": "0.10.0",
+ "resolved": "https://registry.npmjs.org/@xterm/addon-fit/-/addon-fit-0.10.0.tgz",
+ "integrity": "sha512-UFYkDm4HUahf2lnEyHvio51TNGiLK66mqP2JoATy7hRZeXaGMRDr00JiSF7m63vR5WKATF605yEggJKsw0JpMQ==",
+ "license": "MIT",
+ "peerDependencies": {
+ "@xterm/xterm": "^5.0.0"
+ }
+ },
+ "node_modules/@xterm/xterm": {
+ "version": "5.5.0",
+ "resolved": "https://registry.npmjs.org/@xterm/xterm/-/xterm-5.5.0.tgz",
+ "integrity": "sha512-hqJHYaQb5OptNunnyAnkHyM8aCjZ1MEIDTQu1iIbbTD/xops91NB5yq1ZK/dC2JDbVWtF23zUtl9JE2NqwT87A==",
+ "license": "MIT"
+ },
+ "node_modules/abbrev": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/abbrev/-/abbrev-1.1.1.tgz",
+ "integrity": "sha512-nne9/IiQ/hzIhY6pdDnbBtz7DjPTKrY00P/zvPSm5pOFkl6xuGrGnXn/VtTNNfNtAfZ9/1RtehkszU9qcTii0Q==",
+ "license": "ISC"
+ },
+ "node_modules/ansi-regex": {
+ "version": "5.0.1",
+ "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz",
+ "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/ansi-styles": {
+ "version": "4.3.0",
+ "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz",
+ "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==",
+ "license": "MIT",
+ "dependencies": {
+ "color-convert": "^2.0.1"
+ },
+ "engines": {
+ "node": ">=8"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/ansi-styles?sponsor=1"
+ }
+ },
+ "node_modules/any-promise": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/any-promise/-/any-promise-1.3.0.tgz",
+ "integrity": "sha512-7UvmKalWRt1wgjL1RrGxoSJW/0QZFIegpeGvZG9kjp8vrRu55XTHbwnqq2GpXm9uLbcuhxm3IqX9OB4MZR1b2A==",
+ "dev": true
+ },
+ "node_modules/anymatch": {
+ "version": "3.1.3",
+ "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz",
+ "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==",
+ "dev": true,
+ "dependencies": {
+ "normalize-path": "^3.0.0",
+ "picomatch": "^2.0.4"
+ },
+ "engines": {
+ "node": ">= 8"
+ }
+ },
+ "node_modules/arg": {
+ "version": "5.0.2",
+ "resolved": "https://registry.npmjs.org/arg/-/arg-5.0.2.tgz",
+ "integrity": "sha512-PYjyFOLKQ9y57JvQ6QLo8dAgNqswh8M1RMJYdQduT6xbWSgK36P/Z/v+p888pM69jMMfS8Xd8F6I1kQ/I9HUGg==",
+ "dev": true
+ },
+ "node_modules/autoprefixer": {
+ "version": "10.4.21",
+ "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-10.4.21.tgz",
+ "integrity": "sha512-O+A6LWV5LDHSJD3LjHYoNi4VLsj/Whi7k6zG12xTYaU4cQ8oxQGckXNX8cRHK5yOZ/ppVHe0ZBXGzSV9jXdVbQ==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "opencollective",
+ "url": "https://opencollective.com/postcss/"
+ },
+ {
+ "type": "tidelift",
+ "url": "https://tidelift.com/funding/github/npm/autoprefixer"
+ },
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ],
+ "dependencies": {
+ "browserslist": "^4.24.4",
+ "caniuse-lite": "^1.0.30001702",
+ "fraction.js": "^4.3.7",
+ "normalize-range": "^0.1.2",
+ "picocolors": "^1.1.1",
+ "postcss-value-parser": "^4.2.0"
+ },
+ "bin": {
+ "autoprefixer": "bin/autoprefixer"
+ },
+ "engines": {
+ "node": "^10 || ^12 || >=14"
+ },
+ "peerDependencies": {
+ "postcss": "^8.1.0"
+ }
+ },
+ "node_modules/balanced-match": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz",
+ "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==",
+ "dev": true
+ },
+ "node_modules/binary-extensions": {
+ "version": "2.3.0",
+ "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.3.0.tgz",
+ "integrity": "sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw==",
+ "dev": true,
+ "engines": {
+ "node": ">=8"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/brace-expansion": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz",
+ "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==",
+ "dev": true,
+ "dependencies": {
+ "balanced-match": "^1.0.0"
+ }
+ },
+ "node_modules/braces": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz",
+ "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==",
+ "dev": true,
+ "dependencies": {
+ "fill-range": "^7.1.1"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/browserslist": {
+ "version": "4.24.4",
+ "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.24.4.tgz",
+ "integrity": "sha512-KDi1Ny1gSePi1vm0q4oxSF8b4DR44GF4BbmS2YdhPLOEqd8pDviZOGH/GsmRwoWJ2+5Lr085X7naowMwKHDG1A==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "opencollective",
+ "url": "https://opencollective.com/browserslist"
+ },
+ {
+ "type": "tidelift",
+ "url": "https://tidelift.com/funding/github/npm/browserslist"
+ },
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ],
+ "dependencies": {
+ "caniuse-lite": "^1.0.30001688",
+ "electron-to-chromium": "^1.5.73",
+ "node-releases": "^2.0.19",
+ "update-browserslist-db": "^1.1.1"
+ },
+ "bin": {
+ "browserslist": "cli.js"
+ },
+ "engines": {
+ "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7"
+ }
+ },
+ "node_modules/camelcase-css": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/camelcase-css/-/camelcase-css-2.0.1.tgz",
+ "integrity": "sha512-QOSvevhslijgYwRx6Rv7zKdMF8lbRmx+uQGx2+vDc+KI/eBnsy9kit5aj23AgGu3pa4t9AgwbnXWqS+iOY+2aA==",
+ "dev": true,
+ "engines": {
+ "node": ">= 6"
+ }
+ },
+ "node_modules/caniuse-lite": {
+ "version": "1.0.30001710",
+ "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001710.tgz",
+ "integrity": "sha512-B5C0I0UmaGqHgo5FuqJ7hBd4L57A4dDD+Xi+XX1nXOoxGeDdY4Ko38qJYOyqznBVJEqON5p8P1x5zRR3+rsnxA==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "opencollective",
+ "url": "https://opencollective.com/browserslist"
+ },
+ {
+ "type": "tidelift",
+ "url": "https://tidelift.com/funding/github/npm/caniuse-lite"
+ },
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ]
+ },
+ "node_modules/chokidar": {
+ "version": "3.6.0",
+ "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.6.0.tgz",
+ "integrity": "sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==",
+ "dev": true,
+ "dependencies": {
+ "anymatch": "~3.1.2",
+ "braces": "~3.0.2",
+ "glob-parent": "~5.1.2",
+ "is-binary-path": "~2.1.0",
+ "is-glob": "~4.0.1",
+ "normalize-path": "~3.0.0",
+ "readdirp": "~3.6.0"
+ },
+ "engines": {
+ "node": ">= 8.10.0"
+ },
+ "funding": {
+ "url": "https://paulmillr.com/funding/"
+ },
+ "optionalDependencies": {
+ "fsevents": "~2.3.2"
+ }
+ },
+ "node_modules/cliui": {
+ "version": "8.0.1",
+ "resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz",
+ "integrity": "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==",
+ "license": "ISC",
+ "dependencies": {
+ "string-width": "^4.2.0",
+ "strip-ansi": "^6.0.1",
+ "wrap-ansi": "^7.0.0"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/color-convert": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz",
+ "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==",
+ "license": "MIT",
+ "dependencies": {
+ "color-name": "~1.1.4"
+ },
+ "engines": {
+ "node": ">=7.0.0"
+ }
+ },
+ "node_modules/color-name": {
+ "version": "1.1.4",
+ "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz",
+ "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==",
+ "license": "MIT"
+ },
+ "node_modules/commander": {
+ "version": "7.2.0",
+ "resolved": "https://registry.npmjs.org/commander/-/commander-7.2.0.tgz",
+ "integrity": "sha512-QrWXB+ZQSVPmIWIhtEO9H+gwHaMGYiF5ChvoJ+K9ZGHG/sVsa6yiesAD1GC/x46sET00Xlwo1u49RVVVzvcSkw==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 10"
+ }
+ },
+ "node_modules/cross-spawn": {
+ "version": "7.0.6",
+ "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz",
+ "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==",
+ "dev": true,
+ "dependencies": {
+ "path-key": "^3.1.0",
+ "shebang-command": "^2.0.0",
+ "which": "^2.0.1"
+ },
+ "engines": {
+ "node": ">= 8"
+ }
+ },
+ "node_modules/cssesc": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/cssesc/-/cssesc-3.0.0.tgz",
+ "integrity": "sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==",
+ "dev": true,
+ "bin": {
+ "cssesc": "bin/cssesc"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/d3-array": {
+ "version": "3.2.4",
+ "resolved": "https://registry.npmjs.org/d3-array/-/d3-array-3.2.4.tgz",
+ "integrity": "sha512-tdQAmyA18i4J7wprpYq8ClcxZy3SC31QMeByyCFyRt7BVHdREQZ5lpzoe5mFEYZUWe+oq8HBvk9JjpibyEV4Jg==",
+ "license": "ISC",
+ "dependencies": {
+ "internmap": "1 - 2"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-color": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/d3-color/-/d3-color-3.1.0.tgz",
+ "integrity": "sha512-zg/chbXyeBtMQ1LbD/WSoW2DpC3I0mpmPdW+ynRTj/x2DAWYrIY7qeZIHidozwV24m4iavr15lNwIwLxRmOxhA==",
+ "license": "ISC",
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-delaunay": {
+ "version": "6.0.4",
+ "resolved": "https://registry.npmjs.org/d3-delaunay/-/d3-delaunay-6.0.4.tgz",
+ "integrity": "sha512-mdjtIZ1XLAM8bm/hx3WwjfHt6Sggek7qH043O8KEjDXN40xi3vx/6pYSVTwLjEgiXQTbvaouWKynLBiUZ6SK6A==",
+ "license": "ISC",
+ "dependencies": {
+ "delaunator": "5"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-dispatch": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/d3-dispatch/-/d3-dispatch-3.0.1.tgz",
+ "integrity": "sha512-rzUyPU/S7rwUflMyLc1ETDeBj0NRuHKKAcvukozwhshr6g6c5d8zh4c2gQjY2bZ0dXeGLWc1PF174P2tVvKhfg==",
+ "license": "ISC",
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-dsv": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/d3-dsv/-/d3-dsv-3.0.1.tgz",
+ "integrity": "sha512-UG6OvdI5afDIFP9w4G0mNq50dSOsXHJaRE8arAS5o9ApWnIElp8GZw1Dun8vP8OyHOZ/QJUKUJwxiiCCnUwm+Q==",
+ "license": "ISC",
+ "dependencies": {
+ "commander": "7",
+ "iconv-lite": "0.6",
+ "rw": "1"
+ },
+ "bin": {
+ "csv2json": "bin/dsv2json.js",
+ "csv2tsv": "bin/dsv2dsv.js",
+ "dsv2dsv": "bin/dsv2dsv.js",
+ "dsv2json": "bin/dsv2json.js",
+ "json2csv": "bin/json2dsv.js",
+ "json2dsv": "bin/json2dsv.js",
+ "json2tsv": "bin/json2dsv.js",
+ "tsv2csv": "bin/dsv2dsv.js",
+ "tsv2json": "bin/dsv2json.js"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-force": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/d3-force/-/d3-force-3.0.0.tgz",
+ "integrity": "sha512-zxV/SsA+U4yte8051P4ECydjD/S+qeYtnaIyAs9tgHCqfguma/aAQDjo85A9Z6EKhBirHRJHXIgJUlffT4wdLg==",
+ "license": "ISC",
+ "dependencies": {
+ "d3-dispatch": "1 - 3",
+ "d3-quadtree": "1 - 3",
+ "d3-timer": "1 - 3"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-format": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/d3-format/-/d3-format-3.1.0.tgz",
+ "integrity": "sha512-YyUI6AEuY/Wpt8KWLgZHsIU86atmikuoOmCfommt0LYHiQSPjvX2AcFc38PX0CBpr2RCyZhjex+NS/LPOv6YqA==",
+ "license": "ISC",
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-geo": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/d3-geo/-/d3-geo-3.1.1.tgz",
+ "integrity": "sha512-637ln3gXKXOwhalDzinUgY83KzNWZRKbYubaG+fGVuc/dxO64RRljtCTnf5ecMyE1RIdtqpkVcq0IbtU2S8j2Q==",
+ "license": "ISC",
+ "dependencies": {
+ "d3-array": "2.5.0 - 3"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-geo-projection": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/d3-geo-projection/-/d3-geo-projection-4.0.0.tgz",
+ "integrity": "sha512-p0bK60CEzph1iqmnxut7d/1kyTmm3UWtPlwdkM31AU+LW+BXazd5zJdoCn7VFxNCHXRngPHRnsNn5uGjLRGndg==",
+ "license": "ISC",
+ "dependencies": {
+ "commander": "7",
+ "d3-array": "1 - 3",
+ "d3-geo": "1.12.0 - 3"
+ },
+ "bin": {
+ "geo2svg": "bin/geo2svg.js",
+ "geograticule": "bin/geograticule.js",
+ "geoproject": "bin/geoproject.js",
+ "geoquantize": "bin/geoquantize.js",
+ "geostitch": "bin/geostitch.js"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-hierarchy": {
+ "version": "3.1.2",
+ "resolved": "https://registry.npmjs.org/d3-hierarchy/-/d3-hierarchy-3.1.2.tgz",
+ "integrity": "sha512-FX/9frcub54beBdugHjDCdikxThEqjnR93Qt7PvQTOHxyiNCAlvMrHhclk3cD5VeAaq9fxmfRp+CnWw9rEMBuA==",
+ "license": "ISC",
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-interpolate": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/d3-interpolate/-/d3-interpolate-3.0.1.tgz",
+ "integrity": "sha512-3bYs1rOD33uo8aqJfKP3JWPAibgw8Zm2+L9vBKEHJ2Rg+viTR7o5Mmv5mZcieN+FRYaAOWX5SJATX6k1PWz72g==",
+ "license": "ISC",
+ "dependencies": {
+ "d3-color": "1 - 3"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-path": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/d3-path/-/d3-path-3.1.0.tgz",
+ "integrity": "sha512-p3KP5HCf/bvjBSSKuXid6Zqijx7wIfNW+J/maPs+iwR35at5JCbLUT0LzF1cnjbCHWhqzQTIN2Jpe8pRebIEFQ==",
+ "license": "ISC",
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-quadtree": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/d3-quadtree/-/d3-quadtree-3.0.1.tgz",
+ "integrity": "sha512-04xDrxQTDTCFwP5H6hRhsRcb9xxv2RzkcsygFzmkSIOJy3PeRJP7sNk3VRIbKXcog561P9oU0/rVH6vDROAgUw==",
+ "license": "ISC",
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-scale": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/d3-scale/-/d3-scale-4.0.2.tgz",
+ "integrity": "sha512-GZW464g1SH7ag3Y7hXjf8RoUuAFIqklOAq3MRl4OaWabTFJY9PN/E1YklhXLh+OQ3fM9yS2nOkCoS+WLZ6kvxQ==",
+ "license": "ISC",
+ "dependencies": {
+ "d3-array": "2.10.0 - 3",
+ "d3-format": "1 - 3",
+ "d3-interpolate": "1.2.0 - 3",
+ "d3-time": "2.1.1 - 3",
+ "d3-time-format": "2 - 4"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-scale-chromatic": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/d3-scale-chromatic/-/d3-scale-chromatic-3.1.0.tgz",
+ "integrity": "sha512-A3s5PWiZ9YCXFye1o246KoscMWqf8BsD9eRiJ3He7C9OBaxKhAd5TFCdEx/7VbKtxxTsu//1mMJFrEt572cEyQ==",
+ "license": "ISC",
+ "dependencies": {
+ "d3-color": "1 - 3",
+ "d3-interpolate": "1 - 3"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-shape": {
+ "version": "3.2.0",
+ "resolved": "https://registry.npmjs.org/d3-shape/-/d3-shape-3.2.0.tgz",
+ "integrity": "sha512-SaLBuwGm3MOViRq2ABk3eLoxwZELpH6zhl3FbAoJ7Vm1gofKx6El1Ib5z23NUEhF9AsGl7y+dzLe5Cw2AArGTA==",
+ "license": "ISC",
+ "dependencies": {
+ "d3-path": "^3.1.0"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-time": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/d3-time/-/d3-time-3.1.0.tgz",
+ "integrity": "sha512-VqKjzBLejbSMT4IgbmVgDjpkYrNWUYJnbCGo874u7MMKIWsILRX+OpX/gTk8MqjpT1A/c6HY2dCA77ZN0lkQ2Q==",
+ "license": "ISC",
+ "dependencies": {
+ "d3-array": "2 - 3"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-time-format": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/d3-time-format/-/d3-time-format-4.1.0.tgz",
+ "integrity": "sha512-dJxPBlzC7NugB2PDLwo9Q8JiTR3M3e4/XANkreKSUxF8vvXKqm1Yfq4Q5dl8budlunRVlUUaDUgFt7eA8D6NLg==",
+ "license": "ISC",
+ "dependencies": {
+ "d3-time": "1 - 3"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-timer": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/d3-timer/-/d3-timer-3.0.1.tgz",
+ "integrity": "sha512-ndfJ/JxxMd3nw31uyKoY2naivF+r29V+Lc0svZxe1JvvIRmi8hUsrMvdOwgS1o6uBHmiz91geQ0ylPP0aj1VUA==",
+ "license": "ISC",
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/delaunator": {
+ "version": "5.0.1",
+ "resolved": "https://registry.npmjs.org/delaunator/-/delaunator-5.0.1.tgz",
+ "integrity": "sha512-8nvh+XBe96aCESrGOqMp/84b13H9cdKbG5P2ejQCh4d4sK9RL4371qou9drQjMhvnPmhWl5hnmqbEE0fXr9Xnw==",
+ "license": "ISC",
+ "dependencies": {
+ "robust-predicates": "^3.0.2"
+ }
+ },
+ "node_modules/dependency-graph": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/dependency-graph/-/dependency-graph-1.0.0.tgz",
+ "integrity": "sha512-cW3gggJ28HZ/LExwxP2B++aiKxhJXMSIt9K48FOXQkm+vuG5gyatXnLsONRJdzO/7VfjDIiaOOa/bs4l464Lwg==",
+ "dev": true,
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/didyoumean": {
+ "version": "1.2.2",
+ "resolved": "https://registry.npmjs.org/didyoumean/-/didyoumean-1.2.2.tgz",
+ "integrity": "sha512-gxtyfqMg7GKyhQmb056K7M3xszy/myH8w+B4RT+QXBQsvAOdc3XymqDDPHx1BgPgsdAA5SIifona89YtRATDzw==",
+ "dev": true
+ },
+ "node_modules/diff": {
+ "version": "7.0.0",
+ "resolved": "https://registry.npmjs.org/diff/-/diff-7.0.0.tgz",
+ "integrity": "sha512-PJWHUb1RFevKCwaFA9RlG5tCd+FO5iRh9A8HEtkmBH2Li03iJriB6m6JIN4rGz3K3JLawI7/veA1xzRKP6ISBw==",
+ "license": "BSD-3-Clause",
+ "engines": {
+ "node": ">=0.3.1"
+ }
+ },
+ "node_modules/diff2html": {
+ "version": "3.4.51",
+ "resolved": "https://registry.npmjs.org/diff2html/-/diff2html-3.4.51.tgz",
+ "integrity": "sha512-/rVCSDyokkzSCEGaGjkkElXtIRwyNDRzIa3S8VUhR6pjk25p6+AMnb1s2zGmhjl66D5m/HnV3IeZoxnWsvTy+w==",
+ "license": "MIT",
+ "dependencies": {
+ "diff": "^7.0.0",
+ "hogan.js": "3.0.2"
+ },
+ "engines": {
+ "node": ">=12"
+ },
+ "optionalDependencies": {
+ "highlight.js": "11.9.0"
+ }
+ },
+ "node_modules/dlv": {
+ "version": "1.1.3",
+ "resolved": "https://registry.npmjs.org/dlv/-/dlv-1.1.3.tgz",
+ "integrity": "sha512-+HlytyjlPKnIG8XuRG8WvmBP8xs8P71y+SKKS6ZXWoEgLuePxtDoUEiH7WkdePWrQ5JBpE6aoVqfZfJUQkjXwA==",
+ "dev": true
+ },
+ "node_modules/eastasianwidth": {
+ "version": "0.2.0",
+ "resolved": "https://registry.npmjs.org/eastasianwidth/-/eastasianwidth-0.2.0.tgz",
+ "integrity": "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==",
+ "dev": true
+ },
+ "node_modules/electron-to-chromium": {
+ "version": "1.5.132",
+ "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.132.tgz",
+ "integrity": "sha512-QgX9EBvWGmvSRa74zqfnG7+Eno0Ak0vftBll0Pt2/z5b3bEGYL6OUXLgKPtvx73dn3dvwrlyVkjPKRRlhLYTEg==",
+ "dev": true
+ },
+ "node_modules/emoji-regex": {
+ "version": "8.0.0",
+ "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz",
+ "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==",
+ "license": "MIT"
+ },
+ "node_modules/esbuild": {
+ "version": "0.25.1",
+ "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.25.1.tgz",
+ "integrity": "sha512-BGO5LtrGC7vxnqucAe/rmvKdJllfGaYWdyABvyMoXQlfYMb2bbRuReWR5tEGE//4LcNJj9XrkovTqNYRFZHAMQ==",
+ "dev": true,
+ "hasInstallScript": true,
+ "license": "MIT",
+ "bin": {
+ "esbuild": "bin/esbuild"
+ },
+ "engines": {
+ "node": ">=18"
+ },
+ "optionalDependencies": {
+ "@esbuild/aix-ppc64": "0.25.1",
+ "@esbuild/android-arm": "0.25.1",
+ "@esbuild/android-arm64": "0.25.1",
+ "@esbuild/android-x64": "0.25.1",
+ "@esbuild/darwin-arm64": "0.25.1",
+ "@esbuild/darwin-x64": "0.25.1",
+ "@esbuild/freebsd-arm64": "0.25.1",
+ "@esbuild/freebsd-x64": "0.25.1",
+ "@esbuild/linux-arm": "0.25.1",
+ "@esbuild/linux-arm64": "0.25.1",
+ "@esbuild/linux-ia32": "0.25.1",
+ "@esbuild/linux-loong64": "0.25.1",
+ "@esbuild/linux-mips64el": "0.25.1",
+ "@esbuild/linux-ppc64": "0.25.1",
+ "@esbuild/linux-riscv64": "0.25.1",
+ "@esbuild/linux-s390x": "0.25.1",
+ "@esbuild/linux-x64": "0.25.1",
+ "@esbuild/netbsd-arm64": "0.25.1",
+ "@esbuild/netbsd-x64": "0.25.1",
+ "@esbuild/openbsd-arm64": "0.25.1",
+ "@esbuild/openbsd-x64": "0.25.1",
+ "@esbuild/sunos-x64": "0.25.1",
+ "@esbuild/win32-arm64": "0.25.1",
+ "@esbuild/win32-ia32": "0.25.1",
+ "@esbuild/win32-x64": "0.25.1"
+ }
+ },
+ "node_modules/escalade": {
+ "version": "3.2.0",
+ "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz",
+ "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/fast-glob": {
+ "version": "3.3.3",
+ "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.3.tgz",
+ "integrity": "sha512-7MptL8U0cqcFdzIzwOTHoilX9x5BrNqye7Z/LuC7kCMRio1EMSyqRK3BEAUD7sXRq4iT4AzTVuZdhgQ2TCvYLg==",
+ "dev": true,
+ "dependencies": {
+ "@nodelib/fs.stat": "^2.0.2",
+ "@nodelib/fs.walk": "^1.2.3",
+ "glob-parent": "^5.1.2",
+ "merge2": "^1.3.0",
+ "micromatch": "^4.0.8"
+ },
+ "engines": {
+ "node": ">=8.6.0"
+ }
+ },
+ "node_modules/fast-json-patch": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/fast-json-patch/-/fast-json-patch-3.1.1.tgz",
+ "integrity": "sha512-vf6IHUX2SBcA+5/+4883dsIjpBTqmfBjmYiWK1savxQmFk4JfBMLa7ynTYOs1Rolp/T1betJxHiGD3g1Mn8lUQ==",
+ "license": "MIT"
+ },
+ "node_modules/fastq": {
+ "version": "1.19.1",
+ "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.19.1.tgz",
+ "integrity": "sha512-GwLTyxkCXjXbxqIhTsMI2Nui8huMPtnxg7krajPJAjnEG/iiOS7i+zCtWGZR9G0NBKbXKh6X9m9UIsYX/N6vvQ==",
+ "dev": true,
+ "dependencies": {
+ "reusify": "^1.0.4"
+ }
+ },
+ "node_modules/fill-range": {
+ "version": "7.1.1",
+ "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz",
+ "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==",
+ "dev": true,
+ "dependencies": {
+ "to-regex-range": "^5.0.1"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/foreground-child": {
+ "version": "3.3.1",
+ "resolved": "https://registry.npmjs.org/foreground-child/-/foreground-child-3.3.1.tgz",
+ "integrity": "sha512-gIXjKqtFuWEgzFRJA9WCQeSJLZDjgJUOMCMzxtvFq/37KojM1BFGufqsCy0r4qSQmYLsZYMeyRqzIWOMup03sw==",
+ "dev": true,
+ "dependencies": {
+ "cross-spawn": "^7.0.6",
+ "signal-exit": "^4.0.1"
+ },
+ "engines": {
+ "node": ">=14"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/isaacs"
+ }
+ },
+ "node_modules/fraction.js": {
+ "version": "4.3.7",
+ "resolved": "https://registry.npmjs.org/fraction.js/-/fraction.js-4.3.7.tgz",
+ "integrity": "sha512-ZsDfxO51wGAXREY55a7la9LScWpwv9RxIrYABrlvOFBlH/ShPnrtsXeuUIfXKKOVicNxQ+o8JTbJvjS4M89yew==",
+ "dev": true,
+ "engines": {
+ "node": "*"
+ },
+ "funding": {
+ "type": "patreon",
+ "url": "https://github.com/sponsors/rawify"
+ }
+ },
+ "node_modules/fs-extra": {
+ "version": "11.3.0",
+ "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-11.3.0.tgz",
+ "integrity": "sha512-Z4XaCL6dUDHfP/jT25jJKMmtxvuwbkrD1vNSMFlo9lNLY2c5FHYSQgHPRZUjAB26TpDEoW9HCOgplrdbaPV/ew==",
+ "dev": true,
+ "dependencies": {
+ "graceful-fs": "^4.2.0",
+ "jsonfile": "^6.0.1",
+ "universalify": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=14.14"
+ }
+ },
+ "node_modules/fsevents": {
+ "version": "2.3.3",
+ "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz",
+ "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==",
+ "dev": true,
+ "hasInstallScript": true,
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "engines": {
+ "node": "^8.16.0 || ^10.6.0 || >=11.0.0"
+ }
+ },
+ "node_modules/function-bind": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz",
+ "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==",
+ "dev": true,
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/get-caller-file": {
+ "version": "2.0.5",
+ "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz",
+ "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==",
+ "license": "ISC",
+ "engines": {
+ "node": "6.* || 8.* || >= 10.*"
+ }
+ },
+ "node_modules/glob": {
+ "version": "10.4.5",
+ "resolved": "https://registry.npmjs.org/glob/-/glob-10.4.5.tgz",
+ "integrity": "sha512-7Bv8RF0k6xjo7d4A/PxYLbUCfb6c+Vpd2/mB2yRDlew7Jb5hEXiCD9ibfO7wpk8i4sevK6DFny9h7EYbM3/sHg==",
+ "dev": true,
+ "dependencies": {
+ "foreground-child": "^3.1.0",
+ "jackspeak": "^3.1.2",
+ "minimatch": "^9.0.4",
+ "minipass": "^7.1.2",
+ "package-json-from-dist": "^1.0.0",
+ "path-scurry": "^1.11.1"
+ },
+ "bin": {
+ "glob": "dist/esm/bin.mjs"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/isaacs"
+ }
+ },
+ "node_modules/glob-parent": {
+ "version": "5.1.2",
+ "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz",
+ "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==",
+ "dev": true,
+ "dependencies": {
+ "is-glob": "^4.0.1"
+ },
+ "engines": {
+ "node": ">= 6"
+ }
+ },
+ "node_modules/graceful-fs": {
+ "version": "4.2.11",
+ "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz",
+ "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==",
+ "dev": true
+ },
+ "node_modules/hasown": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz",
+ "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==",
+ "dev": true,
+ "dependencies": {
+ "function-bind": "^1.1.2"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/highlight.js": {
+ "version": "11.9.0",
+ "resolved": "https://registry.npmjs.org/highlight.js/-/highlight.js-11.9.0.tgz",
+ "integrity": "sha512-fJ7cW7fQGCYAkgv4CPfwFHrfd/cLS4Hau96JuJ+ZTOWhjnhoeN1ub1tFmALm/+lW5z4WCAuAV9bm05AP0mS6Gw==",
+ "license": "BSD-3-Clause",
+ "optional": true,
+ "engines": {
+ "node": ">=12.0.0"
+ }
+ },
+ "node_modules/hogan.js": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/hogan.js/-/hogan.js-3.0.2.tgz",
+ "integrity": "sha512-RqGs4wavGYJWE07t35JQccByczmNUXQT0E12ZYV1VKYu5UiAU9lsos/yBAcf840+zrUQQxgVduCR5/B8nNtibg==",
+ "dependencies": {
+ "mkdirp": "0.3.0",
+ "nopt": "1.0.10"
+ },
+ "bin": {
+ "hulk": "bin/hulk"
+ }
+ },
+ "node_modules/iconv-lite": {
+ "version": "0.6.3",
+ "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.6.3.tgz",
+ "integrity": "sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==",
+ "license": "MIT",
+ "dependencies": {
+ "safer-buffer": ">= 2.1.2 < 3.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/internmap": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/internmap/-/internmap-2.0.3.tgz",
+ "integrity": "sha512-5Hh7Y1wQbvY5ooGgPbDaL5iYLAPzMTUrjMulskHLH6wnv/A+1q5rgEaiuqEjB+oxGXIVZs1FF+R/KPN3ZSQYYg==",
+ "license": "ISC",
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/is-binary-path": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz",
+ "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==",
+ "dev": true,
+ "dependencies": {
+ "binary-extensions": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/is-core-module": {
+ "version": "2.16.1",
+ "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.16.1.tgz",
+ "integrity": "sha512-UfoeMA6fIJ8wTYFEUjelnaGI67v6+N7qXJEvQuIGa99l4xsCruSYOVSQ0uPANn4dAzm8lkYPaKLrrijLq7x23w==",
+ "dev": true,
+ "dependencies": {
+ "hasown": "^2.0.2"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/is-extglob": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz",
+ "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==",
+ "dev": true,
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/is-fullwidth-code-point": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz",
+ "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/is-glob": {
+ "version": "4.0.3",
+ "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz",
+ "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==",
+ "dev": true,
+ "dependencies": {
+ "is-extglob": "^2.1.1"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/is-number": {
+ "version": "7.0.0",
+ "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz",
+ "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==",
+ "dev": true,
+ "engines": {
+ "node": ">=0.12.0"
+ }
+ },
+ "node_modules/isexe": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz",
+ "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==",
+ "dev": true
+ },
+ "node_modules/jackspeak": {
+ "version": "3.4.3",
+ "resolved": "https://registry.npmjs.org/jackspeak/-/jackspeak-3.4.3.tgz",
+ "integrity": "sha512-OGlZQpz2yfahA/Rd1Y8Cd9SIEsqvXkLVoSw/cgwhnhFMDbsQFeZYoJJ7bIZBS9BcamUW96asq/npPWugM+RQBw==",
+ "dev": true,
+ "dependencies": {
+ "@isaacs/cliui": "^8.0.2"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/isaacs"
+ },
+ "optionalDependencies": {
+ "@pkgjs/parseargs": "^0.11.0"
+ }
+ },
+ "node_modules/jiti": {
+ "version": "1.21.7",
+ "resolved": "https://registry.npmjs.org/jiti/-/jiti-1.21.7.tgz",
+ "integrity": "sha512-/imKNG4EbWNrVjoNC/1H5/9GFy+tqjGBHCaSsN+P2RnPqjsLmv6UD3Ej+Kj8nBWaRAwyk7kK5ZUc+OEatnTR3A==",
+ "dev": true,
+ "bin": {
+ "jiti": "bin/jiti.js"
+ }
+ },
+ "node_modules/json-stringify-pretty-compact": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/json-stringify-pretty-compact/-/json-stringify-pretty-compact-4.0.0.tgz",
+ "integrity": "sha512-3CNZ2DnrpByG9Nqj6Xo8vqbjT4F6N+tb4Gb28ESAZjYZ5yqvmc56J+/kuIwkaAMOyblTQhUW7PxMkUb8Q36N3Q==",
+ "license": "MIT"
+ },
+ "node_modules/jsonfile": {
+ "version": "6.1.0",
+ "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz",
+ "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==",
+ "dev": true,
+ "dependencies": {
+ "universalify": "^2.0.0"
+ },
+ "optionalDependencies": {
+ "graceful-fs": "^4.1.6"
+ }
+ },
+ "node_modules/lilconfig": {
+ "version": "3.1.3",
+ "resolved": "https://registry.npmjs.org/lilconfig/-/lilconfig-3.1.3.tgz",
+ "integrity": "sha512-/vlFKAoH5Cgt3Ie+JLhRbwOsCQePABiU3tJ1egGvyQ+33R/vcwM2Zl2QR/LzjsBeItPt3oSVXapn+m4nQDvpzw==",
+ "dev": true,
+ "engines": {
+ "node": ">=14"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/antonk52"
+ }
+ },
+ "node_modules/lines-and-columns": {
+ "version": "1.2.4",
+ "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz",
+ "integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==",
+ "dev": true
+ },
+ "node_modules/lit-html": {
+ "version": "3.2.1",
+ "resolved": "https://registry.npmjs.org/lit-html/-/lit-html-3.2.1.tgz",
+ "integrity": "sha512-qI/3lziaPMSKsrwlxH/xMgikhQ0EGOX2ICU73Bi/YHFvz2j/yMCIrw4+puF2IpQ4+upd3EWbvnHM9+PnJn48YA==",
+ "license": "BSD-3-Clause",
+ "dependencies": {
+ "@types/trusted-types": "^2.0.2"
+ }
+ },
+ "node_modules/lru-cache": {
+ "version": "10.4.3",
+ "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-10.4.3.tgz",
+ "integrity": "sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ==",
+ "dev": true
+ },
+ "node_modules/marked": {
+ "version": "15.0.7",
+ "resolved": "https://registry.npmjs.org/marked/-/marked-15.0.7.tgz",
+ "integrity": "sha512-dgLIeKGLx5FwziAnsk4ONoGwHwGPJzselimvlVskE9XLN4Orv9u2VA3GWw/lYUqjfA0rUT/6fqKwfZJapP9BEg==",
+ "license": "MIT",
+ "bin": {
+ "marked": "bin/marked.js"
+ },
+ "engines": {
+ "node": ">= 18"
+ }
+ },
+ "node_modules/merge2": {
+ "version": "1.4.1",
+ "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz",
+ "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==",
+ "dev": true,
+ "engines": {
+ "node": ">= 8"
+ }
+ },
+ "node_modules/micromatch": {
+ "version": "4.0.8",
+ "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz",
+ "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==",
+ "dev": true,
+ "dependencies": {
+ "braces": "^3.0.3",
+ "picomatch": "^2.3.1"
+ },
+ "engines": {
+ "node": ">=8.6"
+ }
+ },
+ "node_modules/minimatch": {
+ "version": "9.0.5",
+ "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz",
+ "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==",
+ "dev": true,
+ "dependencies": {
+ "brace-expansion": "^2.0.1"
+ },
+ "engines": {
+ "node": ">=16 || 14 >=14.17"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/isaacs"
+ }
+ },
+ "node_modules/minipass": {
+ "version": "7.1.2",
+ "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz",
+ "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==",
+ "dev": true,
+ "engines": {
+ "node": ">=16 || 14 >=14.17"
+ }
+ },
+ "node_modules/mkdirp": {
+ "version": "0.3.0",
+ "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.3.0.tgz",
+ "integrity": "sha512-OHsdUcVAQ6pOtg5JYWpCBo9W/GySVuwvP9hueRMW7UqshC0tbfzLv8wjySTPm3tfUZ/21CE9E1pJagOA91Pxew==",
+ "deprecated": "Legacy versions of mkdirp are no longer supported. Please update to mkdirp 1.x. (Note that the API surface has changed to use Promises in 1.x.)",
+ "license": "MIT/X11",
+ "engines": {
+ "node": "*"
+ }
+ },
+ "node_modules/mz": {
+ "version": "2.7.0",
+ "resolved": "https://registry.npmjs.org/mz/-/mz-2.7.0.tgz",
+ "integrity": "sha512-z81GNO7nnYMEhrGh9LeymoE4+Yr0Wn5McHIZMK5cfQCl+NDX08sCZgUc9/6MHni9IWuFLm1Z3HTCXu2z9fN62Q==",
+ "dev": true,
+ "dependencies": {
+ "any-promise": "^1.0.0",
+ "object-assign": "^4.0.1",
+ "thenify-all": "^1.0.0"
+ }
+ },
+ "node_modules/nanoid": {
+ "version": "3.3.11",
+ "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.11.tgz",
+ "integrity": "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ],
+ "bin": {
+ "nanoid": "bin/nanoid.cjs"
+ },
+ "engines": {
+ "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1"
+ }
+ },
+ "node_modules/node-fetch": {
+ "version": "2.7.0",
+ "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.7.0.tgz",
+ "integrity": "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==",
+ "license": "MIT",
+ "dependencies": {
+ "whatwg-url": "^5.0.0"
+ },
+ "engines": {
+ "node": "4.x || >=6.0.0"
+ },
+ "peerDependencies": {
+ "encoding": "^0.1.0"
+ },
+ "peerDependenciesMeta": {
+ "encoding": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/node-releases": {
+ "version": "2.0.19",
+ "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.19.tgz",
+ "integrity": "sha512-xxOWJsBKtzAq7DY0J+DTzuz58K8e7sJbdgwkbMWQe8UYB6ekmsQ45q0M/tJDsGaZmbC+l7n57UV8Hl5tHxO9uw==",
+ "dev": true
+ },
+ "node_modules/nopt": {
+ "version": "1.0.10",
+ "resolved": "https://registry.npmjs.org/nopt/-/nopt-1.0.10.tgz",
+ "integrity": "sha512-NWmpvLSqUrgrAC9HCuxEvb+PSloHpqVu+FqcO4eeF2h5qYRhA7ev6KvelyQAKtegUbC6RypJnlEOhd8vloNKYg==",
+ "license": "MIT",
+ "dependencies": {
+ "abbrev": "1"
+ },
+ "bin": {
+ "nopt": "bin/nopt.js"
+ },
+ "engines": {
+ "node": "*"
+ }
+ },
+ "node_modules/normalize-path": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz",
+ "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==",
+ "dev": true,
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/normalize-range": {
+ "version": "0.1.2",
+ "resolved": "https://registry.npmjs.org/normalize-range/-/normalize-range-0.1.2.tgz",
+ "integrity": "sha512-bdok/XvKII3nUpklnV6P2hxtMNrCboOjAcyBuQnWEhO665FwrSNRxU+AqpsyvO6LgGYPspN+lu5CLtw4jPRKNA==",
+ "dev": true,
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/object-assign": {
+ "version": "4.1.1",
+ "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz",
+ "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==",
+ "dev": true,
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/object-hash": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/object-hash/-/object-hash-3.0.0.tgz",
+ "integrity": "sha512-RSn9F68PjH9HqtltsSnqYC1XXoWe9Bju5+213R98cNGttag9q9yAOTzdbsqvIa7aNm5WffBZFpWYr2aWrklWAw==",
+ "dev": true,
+ "engines": {
+ "node": ">= 6"
+ }
+ },
+ "node_modules/package-json-from-dist": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/package-json-from-dist/-/package-json-from-dist-1.0.1.tgz",
+ "integrity": "sha512-UEZIS3/by4OC8vL3P2dTXRETpebLI2NiI5vIrjaD/5UtrkFX/tNbwjTSRAGC/+7CAo2pIcBaRgWmcBBHcsaCIw==",
+ "dev": true
+ },
+ "node_modules/path-key": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz",
+ "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==",
+ "dev": true,
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/path-parse": {
+ "version": "1.0.7",
+ "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz",
+ "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==",
+ "dev": true
+ },
+ "node_modules/path-scurry": {
+ "version": "1.11.1",
+ "resolved": "https://registry.npmjs.org/path-scurry/-/path-scurry-1.11.1.tgz",
+ "integrity": "sha512-Xa4Nw17FS9ApQFJ9umLiJS4orGjm7ZzwUrwamcGQuHSzDyth9boKDaycYdDcZDuqYATXw4HFXgaqWTctW/v1HA==",
+ "dev": true,
+ "dependencies": {
+ "lru-cache": "^10.2.0",
+ "minipass": "^5.0.0 || ^6.0.2 || ^7.0.0"
+ },
+ "engines": {
+ "node": ">=16 || 14 >=14.18"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/isaacs"
+ }
+ },
+ "node_modules/picocolors": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz",
+ "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==",
+ "dev": true
+ },
+ "node_modules/picomatch": {
+ "version": "2.3.1",
+ "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz",
+ "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==",
+ "dev": true,
+ "engines": {
+ "node": ">=8.6"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/jonschlinkert"
+ }
+ },
+ "node_modules/pify": {
+ "version": "2.3.0",
+ "resolved": "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz",
+ "integrity": "sha512-udgsAY+fTnvv7kI7aaxbqwWNb0AHiB0qBO89PZKPkoTmGOgdbrHDKD+0B2X4uTfJ/FT1R09r9gTsjUjNJotuog==",
+ "dev": true,
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/pirates": {
+ "version": "4.0.7",
+ "resolved": "https://registry.npmjs.org/pirates/-/pirates-4.0.7.tgz",
+ "integrity": "sha512-TfySrs/5nm8fQJDcBDuUng3VOUKsd7S+zqvbOTiGXHfxX4wK31ard+hoNuvkicM/2YFzlpDgABOevKSsB4G/FA==",
+ "dev": true,
+ "engines": {
+ "node": ">= 6"
+ }
+ },
+ "node_modules/postcss": {
+ "version": "8.5.3",
+ "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.5.3.tgz",
+ "integrity": "sha512-dle9A3yYxlBSrt8Fu+IpjGT8SY8hN0mlaA6GY8t0P5PjIOZemULz/E2Bnm/2dcUOena75OTNkHI76uZBNUUq3A==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "opencollective",
+ "url": "https://opencollective.com/postcss/"
+ },
+ {
+ "type": "tidelift",
+ "url": "https://tidelift.com/funding/github/npm/postcss"
+ },
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ],
+ "dependencies": {
+ "nanoid": "^3.3.8",
+ "picocolors": "^1.1.1",
+ "source-map-js": "^1.2.1"
+ },
+ "engines": {
+ "node": "^10 || ^12 || >=14"
+ }
+ },
+ "node_modules/postcss-cli": {
+ "version": "11.0.1",
+ "resolved": "https://registry.npmjs.org/postcss-cli/-/postcss-cli-11.0.1.tgz",
+ "integrity": "sha512-0UnkNPSayHKRe/tc2YGW6XnSqqOA9eqpiRMgRlV1S6HdGi16vwJBx7lviARzbV1HpQHqLLRH3o8vTcB0cLc+5g==",
+ "dev": true,
+ "dependencies": {
+ "chokidar": "^3.3.0",
+ "dependency-graph": "^1.0.0",
+ "fs-extra": "^11.0.0",
+ "picocolors": "^1.0.0",
+ "postcss-load-config": "^5.0.0",
+ "postcss-reporter": "^7.0.0",
+ "pretty-hrtime": "^1.0.3",
+ "read-cache": "^1.0.0",
+ "slash": "^5.0.0",
+ "tinyglobby": "^0.2.12",
+ "yargs": "^17.0.0"
+ },
+ "bin": {
+ "postcss": "index.js"
+ },
+ "engines": {
+ "node": ">=18"
+ },
+ "peerDependencies": {
+ "postcss": "^8.0.0"
+ }
+ },
+ "node_modules/postcss-import": {
+ "version": "15.1.0",
+ "resolved": "https://registry.npmjs.org/postcss-import/-/postcss-import-15.1.0.tgz",
+ "integrity": "sha512-hpr+J05B2FVYUAXHeK1YyI267J/dDDhMU6B6civm8hSY1jYJnBXxzKDKDswzJmtLHryrjhnDjqqp/49t8FALew==",
+ "dev": true,
+ "dependencies": {
+ "postcss-value-parser": "^4.0.0",
+ "read-cache": "^1.0.0",
+ "resolve": "^1.1.7"
+ },
+ "engines": {
+ "node": ">=14.0.0"
+ },
+ "peerDependencies": {
+ "postcss": "^8.0.0"
+ }
+ },
+ "node_modules/postcss-js": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/postcss-js/-/postcss-js-4.0.1.tgz",
+ "integrity": "sha512-dDLF8pEO191hJMtlHFPRa8xsizHaM82MLfNkUHdUtVEV3tgTp5oj+8qbEqYM57SLfc74KSbw//4SeJma2LRVIw==",
+ "dev": true,
+ "dependencies": {
+ "camelcase-css": "^2.0.1"
+ },
+ "engines": {
+ "node": "^12 || ^14 || >= 16"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/postcss/"
+ },
+ "peerDependencies": {
+ "postcss": "^8.4.21"
+ }
+ },
+ "node_modules/postcss-load-config": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/postcss-load-config/-/postcss-load-config-5.1.0.tgz",
+ "integrity": "sha512-G5AJ+IX0aD0dygOE0yFZQ/huFFMSNneyfp0e3/bT05a8OfPC5FUoZRPfGijUdGOJNMewJiwzcHJXFafFzeKFVA==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "opencollective",
+ "url": "https://opencollective.com/postcss/"
+ },
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ],
+ "dependencies": {
+ "lilconfig": "^3.1.1",
+ "yaml": "^2.4.2"
+ },
+ "engines": {
+ "node": ">= 18"
+ },
+ "peerDependencies": {
+ "jiti": ">=1.21.0",
+ "postcss": ">=8.0.9",
+ "tsx": "^4.8.1"
+ },
+ "peerDependenciesMeta": {
+ "jiti": {
+ "optional": true
+ },
+ "postcss": {
+ "optional": true
+ },
+ "tsx": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/postcss-nested": {
+ "version": "6.2.0",
+ "resolved": "https://registry.npmjs.org/postcss-nested/-/postcss-nested-6.2.0.tgz",
+ "integrity": "sha512-HQbt28KulC5AJzG+cZtj9kvKB93CFCdLvog1WFLf1D+xmMvPGlBstkpTEZfK5+AN9hfJocyBFCNiqyS48bpgzQ==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "opencollective",
+ "url": "https://opencollective.com/postcss/"
+ },
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ],
+ "dependencies": {
+ "postcss-selector-parser": "^6.1.1"
+ },
+ "engines": {
+ "node": ">=12.0"
+ },
+ "peerDependencies": {
+ "postcss": "^8.2.14"
+ }
+ },
+ "node_modules/postcss-reporter": {
+ "version": "7.1.0",
+ "resolved": "https://registry.npmjs.org/postcss-reporter/-/postcss-reporter-7.1.0.tgz",
+ "integrity": "sha512-/eoEylGWyy6/DOiMP5lmFRdmDKThqgn7D6hP2dXKJI/0rJSO1ADFNngZfDzxL0YAxFvws+Rtpuji1YIHj4mySA==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "opencollective",
+ "url": "https://opencollective.com/postcss/"
+ },
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ],
+ "dependencies": {
+ "picocolors": "^1.0.0",
+ "thenby": "^1.3.4"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "peerDependencies": {
+ "postcss": "^8.1.0"
+ }
+ },
+ "node_modules/postcss-selector-parser": {
+ "version": "6.1.2",
+ "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.1.2.tgz",
+ "integrity": "sha512-Q8qQfPiZ+THO/3ZrOrO0cJJKfpYCagtMUkXbnEfmgUjwXg6z/WBeOyS9APBBPCTSiDV+s4SwQGu8yFsiMRIudg==",
+ "dev": true,
+ "dependencies": {
+ "cssesc": "^3.0.0",
+ "util-deprecate": "^1.0.2"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/postcss-value-parser": {
+ "version": "4.2.0",
+ "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-4.2.0.tgz",
+ "integrity": "sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ==",
+ "dev": true
+ },
+ "node_modules/pretty-hrtime": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/pretty-hrtime/-/pretty-hrtime-1.0.3.tgz",
+ "integrity": "sha512-66hKPCr+72mlfiSjlEB1+45IjXSqvVAIy6mocupoww4tBFE9R9IhwwUGoI4G++Tc9Aq+2rxOt0RFU6gPcrte0A==",
+ "dev": true,
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/queue-microtask": {
+ "version": "1.2.3",
+ "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz",
+ "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/feross"
+ },
+ {
+ "type": "patreon",
+ "url": "https://www.patreon.com/feross"
+ },
+ {
+ "type": "consulting",
+ "url": "https://feross.org/support"
+ }
+ ]
+ },
+ "node_modules/read-cache": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/read-cache/-/read-cache-1.0.0.tgz",
+ "integrity": "sha512-Owdv/Ft7IjOgm/i0xvNDZ1LrRANRfew4b2prF3OWMQLxLfu3bS8FVhCsrSCMK4lR56Y9ya+AThoTpDCTxCmpRA==",
+ "dev": true,
+ "dependencies": {
+ "pify": "^2.3.0"
+ }
+ },
+ "node_modules/readdirp": {
+ "version": "3.6.0",
+ "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz",
+ "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==",
+ "dev": true,
+ "dependencies": {
+ "picomatch": "^2.2.1"
+ },
+ "engines": {
+ "node": ">=8.10.0"
+ }
+ },
+ "node_modules/require-directory": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz",
+ "integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/resolve": {
+ "version": "1.22.10",
+ "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.10.tgz",
+ "integrity": "sha512-NPRy+/ncIMeDlTAsuqwKIiferiawhefFJtkNSW0qZJEqMEb+qBt/77B/jGeeek+F0uOeN05CDa6HXbbIgtVX4w==",
+ "dev": true,
+ "dependencies": {
+ "is-core-module": "^2.16.0",
+ "path-parse": "^1.0.7",
+ "supports-preserve-symlinks-flag": "^1.0.0"
+ },
+ "bin": {
+ "resolve": "bin/resolve"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/reusify": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.1.0.tgz",
+ "integrity": "sha512-g6QUff04oZpHs0eG5p83rFLhHeV00ug/Yf9nZM6fLeUrPguBTkTQOdpAWWspMh55TZfVQDPaN3NQJfbVRAxdIw==",
+ "dev": true,
+ "engines": {
+ "iojs": ">=1.0.0",
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/robust-predicates": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/robust-predicates/-/robust-predicates-3.0.2.tgz",
+ "integrity": "sha512-IXgzBWvWQwE6PrDI05OvmXUIruQTcoMDzRsOd5CDvHCVLcLHMTSYvOK5Cm46kWqlV3yAbuSpBZdJ5oP5OUoStg==",
+ "license": "Unlicense"
+ },
+ "node_modules/run-parallel": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz",
+ "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/feross"
+ },
+ {
+ "type": "patreon",
+ "url": "https://www.patreon.com/feross"
+ },
+ {
+ "type": "consulting",
+ "url": "https://feross.org/support"
+ }
+ ],
+ "dependencies": {
+ "queue-microtask": "^1.2.2"
+ }
+ },
+ "node_modules/rw": {
+ "version": "1.3.3",
+ "resolved": "https://registry.npmjs.org/rw/-/rw-1.3.3.tgz",
+ "integrity": "sha512-PdhdWy89SiZogBLaw42zdeqtRJ//zFd2PgQavcICDUgJT5oW10QCRKbJ6bg4r0/UY2M6BWd5tkxuGFRvCkgfHQ==",
+ "license": "BSD-3-Clause"
+ },
+ "node_modules/safer-buffer": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz",
+ "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==",
+ "license": "MIT"
+ },
+ "node_modules/semver": {
+ "version": "7.7.1",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.1.tgz",
+ "integrity": "sha512-hlq8tAfn0m/61p4BVRcPzIGr6LKiMwo4VM6dGi6pt4qcRkmNzTcWq6eCEjEh+qXjkMDvPlOFFSGwQjoEa6gyMA==",
+ "license": "ISC",
+ "bin": {
+ "semver": "bin/semver.js"
+ },
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/shebang-command": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz",
+ "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==",
+ "dev": true,
+ "dependencies": {
+ "shebang-regex": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/shebang-regex": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz",
+ "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==",
+ "dev": true,
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/signal-exit": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz",
+ "integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==",
+ "dev": true,
+ "engines": {
+ "node": ">=14"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/isaacs"
+ }
+ },
+ "node_modules/slash": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/slash/-/slash-5.1.0.tgz",
+ "integrity": "sha512-ZA6oR3T/pEyuqwMgAKT0/hAv8oAXckzbkmR0UkUosQ+Mc4RxGoJkRmwHgHufaenlyAgE1Mxgpdcrf75y6XcnDg==",
+ "dev": true,
+ "engines": {
+ "node": ">=14.16"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/source-map-js": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz",
+ "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==",
+ "dev": true,
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/string-width": {
+ "version": "4.2.3",
+ "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz",
+ "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==",
+ "license": "MIT",
+ "dependencies": {
+ "emoji-regex": "^8.0.0",
+ "is-fullwidth-code-point": "^3.0.0",
+ "strip-ansi": "^6.0.1"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/string-width-cjs": {
+ "name": "string-width",
+ "version": "4.2.3",
+ "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz",
+ "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==",
+ "dev": true,
+ "dependencies": {
+ "emoji-regex": "^8.0.0",
+ "is-fullwidth-code-point": "^3.0.0",
+ "strip-ansi": "^6.0.1"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/strip-ansi": {
+ "version": "6.0.1",
+ "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz",
+ "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==",
+ "license": "MIT",
+ "dependencies": {
+ "ansi-regex": "^5.0.1"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/strip-ansi-cjs": {
+ "name": "strip-ansi",
+ "version": "6.0.1",
+ "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz",
+ "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==",
+ "dev": true,
+ "dependencies": {
+ "ansi-regex": "^5.0.1"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/sucrase": {
+ "version": "3.35.0",
+ "resolved": "https://registry.npmjs.org/sucrase/-/sucrase-3.35.0.tgz",
+ "integrity": "sha512-8EbVDiu9iN/nESwxeSxDKe0dunta1GOlHufmSSXxMD2z2/tMZpDMpvXQGsc+ajGo8y2uYUmixaSRUc/QPoQ0GA==",
+ "dev": true,
+ "dependencies": {
+ "@jridgewell/gen-mapping": "^0.3.2",
+ "commander": "^4.0.0",
+ "glob": "^10.3.10",
+ "lines-and-columns": "^1.1.6",
+ "mz": "^2.7.0",
+ "pirates": "^4.0.1",
+ "ts-interface-checker": "^0.1.9"
+ },
+ "bin": {
+ "sucrase": "bin/sucrase",
+ "sucrase-node": "bin/sucrase-node"
+ },
+ "engines": {
+ "node": ">=16 || 14 >=14.17"
+ }
+ },
+ "node_modules/sucrase/node_modules/commander": {
+ "version": "4.1.1",
+ "resolved": "https://registry.npmjs.org/commander/-/commander-4.1.1.tgz",
+ "integrity": "sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA==",
+ "dev": true,
+ "engines": {
+ "node": ">= 6"
+ }
+ },
+ "node_modules/supports-preserve-symlinks-flag": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz",
+ "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==",
+ "dev": true,
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/tailwindcss": {
+ "version": "3.4.1",
+ "resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-3.4.1.tgz",
+ "integrity": "sha512-qAYmXRfk3ENzuPBakNK0SRrUDipP8NQnEY6772uDhflcQz5EhRdD7JNZxyrFHVQNCwULPBn6FNPp9brpO7ctcA==",
+ "dev": true,
+ "dependencies": {
+ "@alloc/quick-lru": "^5.2.0",
+ "arg": "^5.0.2",
+ "chokidar": "^3.5.3",
+ "didyoumean": "^1.2.2",
+ "dlv": "^1.1.3",
+ "fast-glob": "^3.3.0",
+ "glob-parent": "^6.0.2",
+ "is-glob": "^4.0.3",
+ "jiti": "^1.19.1",
+ "lilconfig": "^2.1.0",
+ "micromatch": "^4.0.5",
+ "normalize-path": "^3.0.0",
+ "object-hash": "^3.0.0",
+ "picocolors": "^1.0.0",
+ "postcss": "^8.4.23",
+ "postcss-import": "^15.1.0",
+ "postcss-js": "^4.0.1",
+ "postcss-load-config": "^4.0.1",
+ "postcss-nested": "^6.0.1",
+ "postcss-selector-parser": "^6.0.11",
+ "resolve": "^1.22.2",
+ "sucrase": "^3.32.0"
+ },
+ "bin": {
+ "tailwind": "lib/cli.js",
+ "tailwindcss": "lib/cli.js"
+ },
+ "engines": {
+ "node": ">=14.0.0"
+ }
+ },
+ "node_modules/tailwindcss/node_modules/glob-parent": {
+ "version": "6.0.2",
+ "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz",
+ "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==",
+ "dev": true,
+ "dependencies": {
+ "is-glob": "^4.0.3"
+ },
+ "engines": {
+ "node": ">=10.13.0"
+ }
+ },
+ "node_modules/tailwindcss/node_modules/lilconfig": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/lilconfig/-/lilconfig-2.1.0.tgz",
+ "integrity": "sha512-utWOt/GHzuUxnLKxB6dk81RoOeoNeHgbrXiuGk4yyF5qlRz+iIVWu56E2fqGHFrXz0QNUhLB/8nKqvRH66JKGQ==",
+ "dev": true,
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/tailwindcss/node_modules/postcss-load-config": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/postcss-load-config/-/postcss-load-config-4.0.2.tgz",
+ "integrity": "sha512-bSVhyJGL00wMVoPUzAVAnbEoWyqRxkjv64tUl427SKnPrENtq6hJwUojroMz2VB+Q1edmi4IfrAPpami5VVgMQ==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "opencollective",
+ "url": "https://opencollective.com/postcss/"
+ },
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ],
+ "dependencies": {
+ "lilconfig": "^3.0.0",
+ "yaml": "^2.3.4"
+ },
+ "engines": {
+ "node": ">= 14"
+ },
+ "peerDependencies": {
+ "postcss": ">=8.0.9",
+ "ts-node": ">=9.0.0"
+ },
+ "peerDependenciesMeta": {
+ "postcss": {
+ "optional": true
+ },
+ "ts-node": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/tailwindcss/node_modules/postcss-load-config/node_modules/lilconfig": {
+ "version": "3.1.3",
+ "resolved": "https://registry.npmjs.org/lilconfig/-/lilconfig-3.1.3.tgz",
+ "integrity": "sha512-/vlFKAoH5Cgt3Ie+JLhRbwOsCQePABiU3tJ1egGvyQ+33R/vcwM2Zl2QR/LzjsBeItPt3oSVXapn+m4nQDvpzw==",
+ "dev": true,
+ "engines": {
+ "node": ">=14"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/antonk52"
+ }
+ },
+ "node_modules/thenby": {
+ "version": "1.3.4",
+ "resolved": "https://registry.npmjs.org/thenby/-/thenby-1.3.4.tgz",
+ "integrity": "sha512-89Gi5raiWA3QZ4b2ePcEwswC3me9JIg+ToSgtE0JWeCynLnLxNr/f9G+xfo9K+Oj4AFdom8YNJjibIARTJmapQ==",
+ "dev": true
+ },
+ "node_modules/thenify": {
+ "version": "3.3.1",
+ "resolved": "https://registry.npmjs.org/thenify/-/thenify-3.3.1.tgz",
+ "integrity": "sha512-RVZSIV5IG10Hk3enotrhvz0T9em6cyHBLkH/YAZuKqd8hRkKhSfCGIcP2KUY0EPxndzANBmNllzWPwak+bheSw==",
+ "dev": true,
+ "dependencies": {
+ "any-promise": "^1.0.0"
+ }
+ },
+ "node_modules/thenify-all": {
+ "version": "1.6.0",
+ "resolved": "https://registry.npmjs.org/thenify-all/-/thenify-all-1.6.0.tgz",
+ "integrity": "sha512-RNxQH/qI8/t3thXJDwcstUO4zeqo64+Uy/+sNVRBx4Xn2OX+OZ9oP+iJnNFqplFra2ZUVeKCSa2oVWi3T4uVmA==",
+ "dev": true,
+ "dependencies": {
+ "thenify": ">= 3.1.0 < 4"
+ },
+ "engines": {
+ "node": ">=0.8"
+ }
+ },
+ "node_modules/tinyglobby": {
+ "version": "0.2.12",
+ "resolved": "https://registry.npmjs.org/tinyglobby/-/tinyglobby-0.2.12.tgz",
+ "integrity": "sha512-qkf4trmKSIiMTs/E63cxH+ojC2unam7rJ0WrauAzpT3ECNTxGRMlaXxVbfxMUC/w0LaYk6jQ4y/nGR9uBO3tww==",
+ "dev": true,
+ "dependencies": {
+ "fdir": "^6.4.3",
+ "picomatch": "^4.0.2"
+ },
+ "engines": {
+ "node": ">=12.0.0"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/SuperchupuDev"
+ }
+ },
+ "node_modules/tinyglobby/node_modules/fdir": {
+ "version": "6.4.3",
+ "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.4.3.tgz",
+ "integrity": "sha512-PMXmW2y1hDDfTSRc9gaXIuCCRpuoz3Kaz8cUelp3smouvfT632ozg2vrT6lJsHKKOF59YLbOGfAWGUcKEfRMQw==",
+ "dev": true,
+ "peerDependencies": {
+ "picomatch": "^3 || ^4"
+ },
+ "peerDependenciesMeta": {
+ "picomatch": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/tinyglobby/node_modules/picomatch": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.2.tgz",
+ "integrity": "sha512-M7BAV6Rlcy5u+m6oPhAPFgJTzAioX/6B0DxyvDlo9l8+T3nLKbrczg2WLUyzd45L8RqfUMyGPzekbMvX2Ldkwg==",
+ "dev": true,
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/jonschlinkert"
+ }
+ },
+ "node_modules/to-regex-range": {
+ "version": "5.0.1",
+ "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz",
+ "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==",
+ "dev": true,
+ "dependencies": {
+ "is-number": "^7.0.0"
+ },
+ "engines": {
+ "node": ">=8.0"
+ }
+ },
+ "node_modules/topojson-client": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/topojson-client/-/topojson-client-3.1.0.tgz",
+ "integrity": "sha512-605uxS6bcYxGXw9qi62XyrV6Q3xwbndjachmNxu8HWTtVPxZfEJN9fd/SZS1Q54Sn2y0TMyMxFj/cJINqGHrKw==",
+ "license": "ISC",
+ "dependencies": {
+ "commander": "2"
+ },
+ "bin": {
+ "topo2geo": "bin/topo2geo",
+ "topomerge": "bin/topomerge",
+ "topoquantize": "bin/topoquantize"
+ }
+ },
+ "node_modules/topojson-client/node_modules/commander": {
+ "version": "2.20.3",
+ "resolved": "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz",
+ "integrity": "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==",
+ "license": "MIT"
+ },
+ "node_modules/tr46": {
+ "version": "0.0.3",
+ "resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz",
+ "integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==",
+ "license": "MIT"
+ },
+ "node_modules/ts-interface-checker": {
+ "version": "0.1.13",
+ "resolved": "https://registry.npmjs.org/ts-interface-checker/-/ts-interface-checker-0.1.13.tgz",
+ "integrity": "sha512-Y/arvbn+rrz3JCKl9C4kVNfTfSm2/mEp5FSz5EsZSANGPSlQrpRI5M4PKF+mJnE52jOO90PnPSc3Ur3bTQw0gA==",
+ "dev": true
+ },
+ "node_modules/tslib": {
+ "version": "2.8.1",
+ "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz",
+ "integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==",
+ "license": "0BSD"
+ },
+ "node_modules/typescript": {
+ "version": "5.8.2",
+ "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.8.2.tgz",
+ "integrity": "sha512-aJn6wq13/afZp/jT9QZmwEjDqqvSGp1VT5GVg+f/t6/oVyrgXM6BY1h9BRh/O5p3PlUPAe+WuiEZOmb/49RqoQ==",
+ "dev": true,
+ "license": "Apache-2.0",
+ "bin": {
+ "tsc": "bin/tsc",
+ "tsserver": "bin/tsserver"
+ },
+ "engines": {
+ "node": ">=14.17"
+ }
+ },
+ "node_modules/undici-types": {
+ "version": "6.20.0",
+ "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.20.0.tgz",
+ "integrity": "sha512-Ny6QZ2Nju20vw1SRHe3d9jVu6gJ+4e3+MMpqu7pqE5HT6WsTSlce++GQmK5UXS8mzV8DSYHrQH+Xrf2jVcuKNg==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/universalify": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz",
+ "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==",
+ "dev": true,
+ "engines": {
+ "node": ">= 10.0.0"
+ }
+ },
+ "node_modules/update-browserslist-db": {
+ "version": "1.1.3",
+ "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.1.3.tgz",
+ "integrity": "sha512-UxhIZQ+QInVdunkDAaiazvvT/+fXL5Osr0JZlJulepYu6Jd7qJtDZjlur0emRlT71EN3ScPoE7gvsuIKKNavKw==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "opencollective",
+ "url": "https://opencollective.com/browserslist"
+ },
+ {
+ "type": "tidelift",
+ "url": "https://tidelift.com/funding/github/npm/browserslist"
+ },
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ],
+ "dependencies": {
+ "escalade": "^3.2.0",
+ "picocolors": "^1.1.1"
+ },
+ "bin": {
+ "update-browserslist-db": "cli.js"
+ },
+ "peerDependencies": {
+ "browserslist": ">= 4.21.0"
+ }
+ },
+ "node_modules/util-deprecate": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz",
+ "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==",
+ "dev": true
+ },
+ "node_modules/vega": {
+ "version": "5.33.0",
+ "resolved": "https://registry.npmjs.org/vega/-/vega-5.33.0.tgz",
+ "integrity": "sha512-jNAGa7TxLojOpMMMrKMXXBos4K6AaLJbCgGDOw1YEkLRjUkh12pcf65J2lMSdEHjcEK47XXjKiOUVZ8L+MniBA==",
+ "license": "BSD-3-Clause",
+ "dependencies": {
+ "vega-crossfilter": "~4.1.3",
+ "vega-dataflow": "~5.7.7",
+ "vega-encode": "~4.10.2",
+ "vega-event-selector": "~3.0.1",
+ "vega-expression": "~5.2.0",
+ "vega-force": "~4.2.2",
+ "vega-format": "~1.1.3",
+ "vega-functions": "~5.18.0",
+ "vega-geo": "~4.4.3",
+ "vega-hierarchy": "~4.1.3",
+ "vega-label": "~1.3.1",
+ "vega-loader": "~4.5.3",
+ "vega-parser": "~6.6.0",
+ "vega-projection": "~1.6.2",
+ "vega-regression": "~1.3.1",
+ "vega-runtime": "~6.2.1",
+ "vega-scale": "~7.4.2",
+ "vega-scenegraph": "~4.13.1",
+ "vega-statistics": "~1.9.0",
+ "vega-time": "~2.1.3",
+ "vega-transforms": "~4.12.1",
+ "vega-typings": "~1.5.0",
+ "vega-util": "~1.17.2",
+ "vega-view": "~5.16.0",
+ "vega-view-transforms": "~4.6.1",
+ "vega-voronoi": "~4.2.4",
+ "vega-wordcloud": "~4.1.6"
+ }
+ },
+ "node_modules/vega-canvas": {
+ "version": "1.2.7",
+ "resolved": "https://registry.npmjs.org/vega-canvas/-/vega-canvas-1.2.7.tgz",
+ "integrity": "sha512-OkJ9CACVcN9R5Pi9uF6MZBF06pO6qFpDYHWSKBJsdHP5o724KrsgR6UvbnXFH82FdsiTOff/HqjuaG8C7FL+9Q==",
+ "license": "BSD-3-Clause"
+ },
+ "node_modules/vega-crossfilter": {
+ "version": "4.1.3",
+ "resolved": "https://registry.npmjs.org/vega-crossfilter/-/vega-crossfilter-4.1.3.tgz",
+ "integrity": "sha512-nyPJAXAUABc3EocUXvAL1J/IWotZVsApIcvOeZaUdEQEtZ7bt8VtP2nj3CLbHBA8FZZVV+K6SmdwvCOaAD4wFQ==",
+ "license": "BSD-3-Clause",
+ "dependencies": {
+ "d3-array": "^3.2.2",
+ "vega-dataflow": "^5.7.7",
+ "vega-util": "^1.17.3"
+ }
+ },
+ "node_modules/vega-dataflow": {
+ "version": "5.7.7",
+ "resolved": "https://registry.npmjs.org/vega-dataflow/-/vega-dataflow-5.7.7.tgz",
+ "integrity": "sha512-R2NX2HvgXL+u4E6u+L5lKvvRiCtnE6N6l+umgojfi53suhhkFP+zB+2UAQo4syxuZ4763H1csfkKc4xpqLzKnw==",
+ "license": "BSD-3-Clause",
+ "dependencies": {
+ "vega-format": "^1.1.3",
+ "vega-loader": "^4.5.3",
+ "vega-util": "^1.17.3"
+ }
+ },
+ "node_modules/vega-embed": {
+ "version": "6.29.0",
+ "resolved": "https://registry.npmjs.org/vega-embed/-/vega-embed-6.29.0.tgz",
+ "integrity": "sha512-PmlshTLtLFLgWtF/b23T1OwX53AugJ9RZ3qPE2c01VFAbgt3/GSNI/etzA/GzdrkceXFma+FDHNXUppKuM0U6Q==",
+ "license": "BSD-3-Clause",
+ "dependencies": {
+ "fast-json-patch": "^3.1.1",
+ "json-stringify-pretty-compact": "^4.0.0",
+ "semver": "^7.6.3",
+ "tslib": "^2.8.1",
+ "vega-interpreter": "^1.0.5",
+ "vega-schema-url-parser": "^2.2.0",
+ "vega-themes": "^2.15.0",
+ "vega-tooltip": "^0.35.2"
+ },
+ "peerDependencies": {
+ "vega": "^5.21.0",
+ "vega-lite": "*"
+ }
+ },
+ "node_modules/vega-encode": {
+ "version": "4.10.2",
+ "resolved": "https://registry.npmjs.org/vega-encode/-/vega-encode-4.10.2.tgz",
+ "integrity": "sha512-fsjEY1VaBAmqwt7Jlpz0dpPtfQFiBdP9igEefvumSpy7XUxOJmDQcRDnT3Qh9ctkv3itfPfI9g8FSnGcv2b4jQ==",
+ "license": "BSD-3-Clause",
+ "dependencies": {
+ "d3-array": "^3.2.2",
+ "d3-interpolate": "^3.0.1",
+ "vega-dataflow": "^5.7.7",
+ "vega-scale": "^7.4.2",
+ "vega-util": "^1.17.3"
+ }
+ },
+ "node_modules/vega-event-selector": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/vega-event-selector/-/vega-event-selector-3.0.1.tgz",
+ "integrity": "sha512-K5zd7s5tjr1LiOOkjGpcVls8GsH/f2CWCrWcpKy74gTCp+llCdwz0Enqo013ZlGaRNjfgD/o1caJRt3GSaec4A==",
+ "license": "BSD-3-Clause"
+ },
+ "node_modules/vega-expression": {
+ "version": "5.2.0",
+ "resolved": "https://registry.npmjs.org/vega-expression/-/vega-expression-5.2.0.tgz",
+ "integrity": "sha512-WRMa4ny3iZIVAzDlBh3ipY2QUuLk2hnJJbfbncPgvTF7BUgbIbKq947z+JicWksYbokl8n1JHXJoqi3XvpG0Zw==",
+ "license": "BSD-3-Clause",
+ "dependencies": {
+ "@types/estree": "^1.0.0",
+ "vega-util": "^1.17.3"
+ }
+ },
+ "node_modules/vega-force": {
+ "version": "4.2.2",
+ "resolved": "https://registry.npmjs.org/vega-force/-/vega-force-4.2.2.tgz",
+ "integrity": "sha512-cHZVaY2VNNIG2RyihhSiWniPd2W9R9kJq0znxzV602CgUVgxEfTKtx/lxnVCn8nNrdKAYrGiqIsBzIeKG1GWHw==",
+ "license": "BSD-3-Clause",
+ "dependencies": {
+ "d3-force": "^3.0.0",
+ "vega-dataflow": "^5.7.7",
+ "vega-util": "^1.17.3"
+ }
+ },
+ "node_modules/vega-format": {
+ "version": "1.1.3",
+ "resolved": "https://registry.npmjs.org/vega-format/-/vega-format-1.1.3.tgz",
+ "integrity": "sha512-wQhw7KR46wKJAip28FF/CicW+oiJaPAwMKdrxlnTA0Nv8Bf7bloRlc+O3kON4b4H1iALLr9KgRcYTOeXNs2MOA==",
+ "license": "BSD-3-Clause",
+ "dependencies": {
+ "d3-array": "^3.2.2",
+ "d3-format": "^3.1.0",
+ "d3-time-format": "^4.1.0",
+ "vega-time": "^2.1.3",
+ "vega-util": "^1.17.3"
+ }
+ },
+ "node_modules/vega-functions": {
+ "version": "5.18.0",
+ "resolved": "https://registry.npmjs.org/vega-functions/-/vega-functions-5.18.0.tgz",
+ "integrity": "sha512-+D+ey4bDAhZA2CChh7bRZrcqRUDevv05kd2z8xH+il7PbYQLrhi6g1zwvf8z3KpgGInFf5O13WuFK5DQGkz5lQ==",
+ "license": "BSD-3-Clause",
+ "dependencies": {
+ "d3-array": "^3.2.2",
+ "d3-color": "^3.1.0",
+ "d3-geo": "^3.1.0",
+ "vega-dataflow": "^5.7.7",
+ "vega-expression": "^5.2.0",
+ "vega-scale": "^7.4.2",
+ "vega-scenegraph": "^4.13.1",
+ "vega-selections": "^5.6.0",
+ "vega-statistics": "^1.9.0",
+ "vega-time": "^2.1.3",
+ "vega-util": "^1.17.3"
+ }
+ },
+ "node_modules/vega-geo": {
+ "version": "4.4.3",
+ "resolved": "https://registry.npmjs.org/vega-geo/-/vega-geo-4.4.3.tgz",
+ "integrity": "sha512-+WnnzEPKIU1/xTFUK3EMu2htN35gp9usNZcC0ZFg2up1/Vqu6JyZsX0PIO51oXSIeXn9bwk6VgzlOmJUcx92tA==",
+ "license": "BSD-3-Clause",
+ "dependencies": {
+ "d3-array": "^3.2.2",
+ "d3-color": "^3.1.0",
+ "d3-geo": "^3.1.0",
+ "vega-canvas": "^1.2.7",
+ "vega-dataflow": "^5.7.7",
+ "vega-projection": "^1.6.2",
+ "vega-statistics": "^1.9.0",
+ "vega-util": "^1.17.3"
+ }
+ },
+ "node_modules/vega-hierarchy": {
+ "version": "4.1.3",
+ "resolved": "https://registry.npmjs.org/vega-hierarchy/-/vega-hierarchy-4.1.3.tgz",
+ "integrity": "sha512-0Z+TYKRgOEo8XYXnJc2HWg1EGpcbNAhJ9Wpi9ubIbEyEHqIgjCIyFVN8d4nSfsJOcWDzsSmRqohBztxAhOCSaw==",
+ "license": "BSD-3-Clause",
+ "dependencies": {
+ "d3-hierarchy": "^3.1.2",
+ "vega-dataflow": "^5.7.7",
+ "vega-util": "^1.17.3"
+ }
+ },
+ "node_modules/vega-interpreter": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/vega-interpreter/-/vega-interpreter-1.2.0.tgz",
+ "integrity": "sha512-p408/0IPevyR/bIKdXGNzOixkTYCkH83zNhGypRqDxd/qVrdJVrh9RcECOYx1MwEc6JTB1BeK2lArHiGGuG7Hw==",
+ "license": "BSD-3-Clause",
+ "dependencies": {
+ "vega-util": "^1.17.3"
+ }
+ },
+ "node_modules/vega-label": {
+ "version": "1.3.1",
+ "resolved": "https://registry.npmjs.org/vega-label/-/vega-label-1.3.1.tgz",
+ "integrity": "sha512-Emx4b5s7pvuRj3fBkAJ/E2snCoZACfKAwxVId7f/4kYVlAYLb5Swq6W8KZHrH4M9Qds1XJRUYW9/Y3cceqzEFA==",
+ "license": "BSD-3-Clause",
+ "dependencies": {
+ "vega-canvas": "^1.2.7",
+ "vega-dataflow": "^5.7.7",
+ "vega-scenegraph": "^4.13.1",
+ "vega-util": "^1.17.3"
+ }
+ },
+ "node_modules/vega-lite": {
+ "version": "5.23.0",
+ "resolved": "https://registry.npmjs.org/vega-lite/-/vega-lite-5.23.0.tgz",
+ "integrity": "sha512-l4J6+AWE3DIjvovEoHl2LdtCUkfm4zs8Xxx7INwZEAv+XVb6kR6vIN1gt3t2gN2gs/y4DYTs/RPoTeYAuEg6mA==",
+ "license": "BSD-3-Clause",
+ "dependencies": {
+ "json-stringify-pretty-compact": "~4.0.0",
+ "tslib": "~2.8.1",
+ "vega-event-selector": "~3.0.1",
+ "vega-expression": "~5.1.1",
+ "vega-util": "~1.17.2",
+ "yargs": "~17.7.2"
+ },
+ "bin": {
+ "vl2pdf": "bin/vl2pdf",
+ "vl2png": "bin/vl2png",
+ "vl2svg": "bin/vl2svg",
+ "vl2vg": "bin/vl2vg"
+ },
+ "engines": {
+ "node": ">=18"
+ },
+ "peerDependencies": {
+ "vega": "^5.24.0"
+ }
+ },
+ "node_modules/vega-lite/node_modules/vega-expression": {
+ "version": "5.1.2",
+ "resolved": "https://registry.npmjs.org/vega-expression/-/vega-expression-5.1.2.tgz",
+ "integrity": "sha512-fFeDTh4UtOxlZWL54jf1ZqJHinyerWq/ROiqrQxqLkNJRJ86RmxYTgXwt65UoZ/l4VUv9eAd2qoJeDEf610Umw==",
+ "license": "BSD-3-Clause",
+ "dependencies": {
+ "@types/estree": "^1.0.0",
+ "vega-util": "^1.17.3"
+ }
+ },
+ "node_modules/vega-loader": {
+ "version": "4.5.3",
+ "resolved": "https://registry.npmjs.org/vega-loader/-/vega-loader-4.5.3.tgz",
+ "integrity": "sha512-dUfIpxTLF2magoMaur+jXGvwMxjtdlDZaIS8lFj6N7IhUST6nIvBzuUlRM+zLYepI5GHtCLOnqdKU4XV0NggCA==",
+ "license": "BSD-3-Clause",
+ "dependencies": {
+ "d3-dsv": "^3.0.1",
+ "node-fetch": "^2.6.7",
+ "topojson-client": "^3.1.0",
+ "vega-format": "^1.1.3",
+ "vega-util": "^1.17.3"
+ }
+ },
+ "node_modules/vega-parser": {
+ "version": "6.6.0",
+ "resolved": "https://registry.npmjs.org/vega-parser/-/vega-parser-6.6.0.tgz",
+ "integrity": "sha512-jltyrwCTtWeidi/6VotLCybhIl+ehwnzvFWYOdWNUP0z/EskdB64YmawNwjCjzTBMemeiQtY6sJPPbewYqe3Vg==",
+ "license": "BSD-3-Clause",
+ "dependencies": {
+ "vega-dataflow": "^5.7.7",
+ "vega-event-selector": "^3.0.1",
+ "vega-functions": "^5.18.0",
+ "vega-scale": "^7.4.2",
+ "vega-util": "^1.17.3"
+ }
+ },
+ "node_modules/vega-projection": {
+ "version": "1.6.2",
+ "resolved": "https://registry.npmjs.org/vega-projection/-/vega-projection-1.6.2.tgz",
+ "integrity": "sha512-3pcVaQL9R3Zfk6PzopLX6awzrQUeYOXJzlfLGP2Xd93mqUepBa6m/reVrTUoSFXA3v9lfK4W/PS2AcVzD/MIcQ==",
+ "license": "BSD-3-Clause",
+ "dependencies": {
+ "d3-geo": "^3.1.0",
+ "d3-geo-projection": "^4.0.0",
+ "vega-scale": "^7.4.2"
+ }
+ },
+ "node_modules/vega-regression": {
+ "version": "1.3.1",
+ "resolved": "https://registry.npmjs.org/vega-regression/-/vega-regression-1.3.1.tgz",
+ "integrity": "sha512-AmccF++Z9uw4HNZC/gmkQGe6JsRxTG/R4QpbcSepyMvQN1Rj5KtVqMcmVFP1r3ivM4dYGFuPlzMWvuqp0iKMkQ==",
+ "license": "BSD-3-Clause",
+ "dependencies": {
+ "d3-array": "^3.2.2",
+ "vega-dataflow": "^5.7.7",
+ "vega-statistics": "^1.9.0",
+ "vega-util": "^1.17.3"
+ }
+ },
+ "node_modules/vega-runtime": {
+ "version": "6.2.1",
+ "resolved": "https://registry.npmjs.org/vega-runtime/-/vega-runtime-6.2.1.tgz",
+ "integrity": "sha512-b4eot3tWKCk++INWqot+6sLn3wDTj/HE+tRSbiaf8aecuniPMlwJEK7wWuhVGeW2Ae5n8fI/8TeTViaC94bNHA==",
+ "license": "BSD-3-Clause",
+ "dependencies": {
+ "vega-dataflow": "^5.7.7",
+ "vega-util": "^1.17.3"
+ }
+ },
+ "node_modules/vega-scale": {
+ "version": "7.4.2",
+ "resolved": "https://registry.npmjs.org/vega-scale/-/vega-scale-7.4.2.tgz",
+ "integrity": "sha512-o6Hl76aU1jlCK7Q8DPYZ8OGsp4PtzLdzI6nGpLt8rxoE78QuB3GBGEwGAQJitp4IF7Lb2rL5oAXEl3ZP6xf9jg==",
+ "license": "BSD-3-Clause",
+ "dependencies": {
+ "d3-array": "^3.2.2",
+ "d3-interpolate": "^3.0.1",
+ "d3-scale": "^4.0.2",
+ "d3-scale-chromatic": "^3.1.0",
+ "vega-time": "^2.1.3",
+ "vega-util": "^1.17.3"
+ }
+ },
+ "node_modules/vega-scenegraph": {
+ "version": "4.13.1",
+ "resolved": "https://registry.npmjs.org/vega-scenegraph/-/vega-scenegraph-4.13.1.tgz",
+ "integrity": "sha512-LFY9+sLIxRfdDI9ZTKjLoijMkIAzPLBWHpPkwv4NPYgdyx+0qFmv+puBpAUGUY9VZqAZ736Uj5NJY9zw+/M3yQ==",
+ "license": "BSD-3-Clause",
+ "dependencies": {
+ "d3-path": "^3.1.0",
+ "d3-shape": "^3.2.0",
+ "vega-canvas": "^1.2.7",
+ "vega-loader": "^4.5.3",
+ "vega-scale": "^7.4.2",
+ "vega-util": "^1.17.3"
+ }
+ },
+ "node_modules/vega-schema-url-parser": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/vega-schema-url-parser/-/vega-schema-url-parser-2.2.0.tgz",
+ "integrity": "sha512-yAtdBnfYOhECv9YC70H2gEiqfIbVkq09aaE4y/9V/ovEFmH9gPKaEgzIZqgT7PSPQjKhsNkb6jk6XvSoboxOBw==",
+ "license": "BSD-3-Clause"
+ },
+ "node_modules/vega-selections": {
+ "version": "5.6.0",
+ "resolved": "https://registry.npmjs.org/vega-selections/-/vega-selections-5.6.0.tgz",
+ "integrity": "sha512-UE2w78rUUbaV3Ph+vQbQDwh8eywIJYRxBiZdxEG/Tr/KtFMLdy2BDgNZuuDO1Nv8jImPJwONmqjNhNDYwM0VJQ==",
+ "license": "BSD-3-Clause",
+ "dependencies": {
+ "d3-array": "3.2.4",
+ "vega-expression": "^5.2.0",
+ "vega-util": "^1.17.3"
+ }
+ },
+ "node_modules/vega-statistics": {
+ "version": "1.9.0",
+ "resolved": "https://registry.npmjs.org/vega-statistics/-/vega-statistics-1.9.0.tgz",
+ "integrity": "sha512-GAqS7mkatpXcMCQKWtFu1eMUKLUymjInU0O8kXshWaQrVWjPIO2lllZ1VNhdgE0qGj4oOIRRS11kzuijLshGXQ==",
+ "license": "BSD-3-Clause",
+ "dependencies": {
+ "d3-array": "^3.2.2"
+ }
+ },
+ "node_modules/vega-themes": {
+ "version": "2.15.0",
+ "resolved": "https://registry.npmjs.org/vega-themes/-/vega-themes-2.15.0.tgz",
+ "integrity": "sha512-DicRAKG9z+23A+rH/3w3QjJvKnlGhSbbUXGjBvYGseZ1lvj9KQ0BXZ2NS/+MKns59LNpFNHGi9us/wMlci4TOA==",
+ "license": "BSD-3-Clause",
+ "peerDependencies": {
+ "vega": "*",
+ "vega-lite": "*"
+ }
+ },
+ "node_modules/vega-time": {
+ "version": "2.1.3",
+ "resolved": "https://registry.npmjs.org/vega-time/-/vega-time-2.1.3.tgz",
+ "integrity": "sha512-hFcWPdTV844IiY0m97+WUoMLADCp+8yUQR1NStWhzBzwDDA7QEGGwYGxALhdMOaDTwkyoNj3V/nox2rQAJD/vQ==",
+ "license": "BSD-3-Clause",
+ "dependencies": {
+ "d3-array": "^3.2.2",
+ "d3-time": "^3.1.0",
+ "vega-util": "^1.17.3"
+ }
+ },
+ "node_modules/vega-tooltip": {
+ "version": "0.35.2",
+ "resolved": "https://registry.npmjs.org/vega-tooltip/-/vega-tooltip-0.35.2.tgz",
+ "integrity": "sha512-kuYcsAAKYn39ye5wKf2fq1BAxVcjoz0alvKp/G+7BWfIb94J0PHmwrJ5+okGefeStZnbXxINZEOKo7INHaj9GA==",
+ "license": "BSD-3-Clause",
+ "dependencies": {
+ "vega-util": "^1.17.2"
+ },
+ "optionalDependencies": {
+ "@rollup/rollup-linux-x64-gnu": "^4.24.4"
+ }
+ },
+ "node_modules/vega-transforms": {
+ "version": "4.12.1",
+ "resolved": "https://registry.npmjs.org/vega-transforms/-/vega-transforms-4.12.1.tgz",
+ "integrity": "sha512-Qxo+xeEEftY1jYyKgzOGc9NuW4/MqGm1YPZ5WrL9eXg2G0410Ne+xL/MFIjHF4hRX+3mgFF4Io2hPpfy/thjLg==",
+ "license": "BSD-3-Clause",
+ "dependencies": {
+ "d3-array": "^3.2.2",
+ "vega-dataflow": "^5.7.7",
+ "vega-statistics": "^1.9.0",
+ "vega-time": "^2.1.3",
+ "vega-util": "^1.17.3"
+ }
+ },
+ "node_modules/vega-typings": {
+ "version": "1.5.0",
+ "resolved": "https://registry.npmjs.org/vega-typings/-/vega-typings-1.5.0.tgz",
+ "integrity": "sha512-tcZ2HwmiQEOXIGyBMP8sdCnoFoVqHn4KQ4H0MQiHwzFU1hb1EXURhfc+Uamthewk4h/9BICtAM3AFQMjBGpjQA==",
+ "license": "BSD-3-Clause",
+ "dependencies": {
+ "@types/geojson": "7946.0.4",
+ "vega-event-selector": "^3.0.1",
+ "vega-expression": "^5.2.0",
+ "vega-util": "^1.17.3"
+ }
+ },
+ "node_modules/vega-util": {
+ "version": "1.17.3",
+ "resolved": "https://registry.npmjs.org/vega-util/-/vega-util-1.17.3.tgz",
+ "integrity": "sha512-nSNpZLUrRvFo46M5OK4O6x6f08WD1yOcEzHNlqivF+sDLSsVpstaF6fdJYwrbf/debFi2L9Tkp4gZQtssup9iQ==",
+ "license": "BSD-3-Clause"
+ },
+ "node_modules/vega-view": {
+ "version": "5.16.0",
+ "resolved": "https://registry.npmjs.org/vega-view/-/vega-view-5.16.0.tgz",
+ "integrity": "sha512-Nxp1MEAY+8bphIm+7BeGFzWPoJnX9+hgvze6wqCAPoM69YiyVR0o0VK8M2EESIL+22+Owr0Fdy94hWHnmon5tQ==",
+ "license": "BSD-3-Clause",
+ "dependencies": {
+ "d3-array": "^3.2.2",
+ "d3-timer": "^3.0.1",
+ "vega-dataflow": "^5.7.7",
+ "vega-format": "^1.1.3",
+ "vega-functions": "^5.18.0",
+ "vega-runtime": "^6.2.1",
+ "vega-scenegraph": "^4.13.1",
+ "vega-util": "^1.17.3"
+ }
+ },
+ "node_modules/vega-view-transforms": {
+ "version": "4.6.1",
+ "resolved": "https://registry.npmjs.org/vega-view-transforms/-/vega-view-transforms-4.6.1.tgz",
+ "integrity": "sha512-RYlyMJu5kZV4XXjmyTQKADJWDB25SMHsiF+B1rbE1p+pmdQPlp5tGdPl9r5dUJOp3p8mSt/NGI8GPGucmPMxtw==",
+ "license": "BSD-3-Clause",
+ "dependencies": {
+ "vega-dataflow": "^5.7.7",
+ "vega-scenegraph": "^4.13.1",
+ "vega-util": "^1.17.3"
+ }
+ },
+ "node_modules/vega-voronoi": {
+ "version": "4.2.4",
+ "resolved": "https://registry.npmjs.org/vega-voronoi/-/vega-voronoi-4.2.4.tgz",
+ "integrity": "sha512-lWNimgJAXGeRFu2Pz8axOUqVf1moYhD+5yhBzDSmckE9I5jLOyZc/XvgFTXwFnsVkMd1QW1vxJa+y9yfUblzYw==",
+ "license": "BSD-3-Clause",
+ "dependencies": {
+ "d3-delaunay": "^6.0.2",
+ "vega-dataflow": "^5.7.7",
+ "vega-util": "^1.17.3"
+ }
+ },
+ "node_modules/vega-wordcloud": {
+ "version": "4.1.6",
+ "resolved": "https://registry.npmjs.org/vega-wordcloud/-/vega-wordcloud-4.1.6.tgz",
+ "integrity": "sha512-lFmF3u9/ozU0P+WqPjeThQfZm0PigdbXDwpIUCxczrCXKYJLYFmZuZLZR7cxtmpZ0/yuvRvAJ4g123LXbSZF8A==",
+ "license": "BSD-3-Clause",
+ "dependencies": {
+ "vega-canvas": "^1.2.7",
+ "vega-dataflow": "^5.7.7",
+ "vega-scale": "^7.4.2",
+ "vega-statistics": "^1.9.0",
+ "vega-util": "^1.17.3"
+ }
+ },
+ "node_modules/webidl-conversions": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz",
+ "integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==",
+ "license": "BSD-2-Clause"
+ },
+ "node_modules/whatwg-url": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz",
+ "integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==",
+ "license": "MIT",
+ "dependencies": {
+ "tr46": "~0.0.3",
+ "webidl-conversions": "^3.0.0"
+ }
+ },
+ "node_modules/which": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz",
+ "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==",
+ "dev": true,
+ "dependencies": {
+ "isexe": "^2.0.0"
+ },
+ "bin": {
+ "node-which": "bin/node-which"
+ },
+ "engines": {
+ "node": ">= 8"
+ }
+ },
+ "node_modules/wrap-ansi": {
+ "version": "7.0.0",
+ "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz",
+ "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==",
+ "license": "MIT",
+ "dependencies": {
+ "ansi-styles": "^4.0.0",
+ "string-width": "^4.1.0",
+ "strip-ansi": "^6.0.0"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/wrap-ansi?sponsor=1"
+ }
+ },
+ "node_modules/wrap-ansi-cjs": {
+ "name": "wrap-ansi",
+ "version": "7.0.0",
+ "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz",
+ "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==",
+ "dev": true,
+ "dependencies": {
+ "ansi-styles": "^4.0.0",
+ "string-width": "^4.1.0",
+ "strip-ansi": "^6.0.0"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/wrap-ansi?sponsor=1"
+ }
+ },
+ "node_modules/y18n": {
+ "version": "5.0.8",
+ "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz",
+ "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==",
+ "license": "ISC",
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/yaml": {
+ "version": "2.7.1",
+ "resolved": "https://registry.npmjs.org/yaml/-/yaml-2.7.1.tgz",
+ "integrity": "sha512-10ULxpnOCQXxJvBgxsn9ptjq6uviG/htZKk9veJGhlqn3w/DxQ631zFF+nlQXLwmImeS5amR2dl2U8sg6U9jsQ==",
+ "dev": true,
+ "bin": {
+ "yaml": "bin.mjs"
+ },
+ "engines": {
+ "node": ">= 14"
+ }
+ },
+ "node_modules/yargs": {
+ "version": "17.7.2",
+ "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz",
+ "integrity": "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==",
+ "license": "MIT",
+ "dependencies": {
+ "cliui": "^8.0.1",
+ "escalade": "^3.1.1",
+ "get-caller-file": "^2.0.5",
+ "require-directory": "^2.1.1",
+ "string-width": "^4.2.3",
+ "y18n": "^5.0.5",
+ "yargs-parser": "^21.1.1"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/yargs-parser": {
+ "version": "21.1.1",
+ "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz",
+ "integrity": "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==",
+ "license": "ISC",
+ "engines": {
+ "node": ">=12"
+ }
+ }
+ }
+}
diff --git a/loop/webui/package.json b/loop/webui/package.json
new file mode 100644
index 0000000..06cc3c0
--- /dev/null
+++ b/loop/webui/package.json
@@ -0,0 +1,35 @@
+{
+ "name": "webui",
+ "version": "1.0.0",
+ "description": "Web UI for CodingAgent.",
+ "main": "dist/index.js",
+ "scripts": {
+ "check": "tsc --noEmit",
+ "test": "echo \"Error: no test specified\" && exit 1",
+ "build:tailwind": "npx postcss ./src/input.css -o ./src/tailwind.css",
+ "build:tailwind:watch": "npx postcss ./src/input.css -o ./src/tailwind.css --watch"
+ },
+ "keywords": [],
+ "author": "",
+ "license": "ISC",
+ "devDependencies": {
+ "@types/marked": "^5.0.2",
+ "@types/node": "^22.13.14",
+ "autoprefixer": "^10.4.21",
+ "esbuild": "^0.25.1",
+ "postcss": "^8.5.3",
+ "postcss-cli": "^11.0.1",
+ "tailwindcss": "^3.4.1",
+ "typescript": "^5.8.2"
+ },
+ "dependencies": {
+ "@xterm/addon-fit": "^0.10.0",
+ "@xterm/xterm": "^5.5.0",
+ "diff2html": "3.4.51",
+ "lit-html": "^3.2.1",
+ "marked": "^15.0.7",
+ "vega": "^5.33.0",
+ "vega-embed": "^6.29.0",
+ "vega-lite": "^5.23.0"
+ }
+}
diff --git a/loop/webui/postcss.config.js b/loop/webui/postcss.config.js
new file mode 100644
index 0000000..12a703d
--- /dev/null
+++ b/loop/webui/postcss.config.js
@@ -0,0 +1,6 @@
+module.exports = {
+ plugins: {
+ tailwindcss: {},
+ autoprefixer: {},
+ },
+};
diff --git a/loop/webui/readme.md b/loop/webui/readme.md
new file mode 100644
index 0000000..861a8cd
--- /dev/null
+++ b/loop/webui/readme.md
@@ -0,0 +1,51 @@
+# Loop WebUI
+
+A modern web interface for the CodingAgent loop.
+
+The server in the sibling directory (../server) exposes an HTTP API for
+the CodingAgent.
+
+## Development
+
+This module contains a TypeScript-based web UI for the Loop service. The TypeScript code is compiled into JavaScript using esbuild, and the resulting bundle is served by the Go server.
+
+### Prerequisites
+
+- Node.js and npm
+- Go 1.20 or later
+
+### Setup
+
+```bash
+# Install dependencies
+make install
+
+# Build the TypeScript code
+make build
+
+# Type checking only
+make check
+```
+
+### Development Mode
+
+For development, you can use watch mode:
+
+```bash
+make dev
+```
+
+This will rebuild the TypeScript files whenever they change.
+
+## Integration with Go Server
+
+The TypeScript code is bundled into JavaScript using esbuild and then served by the Go HTTP server. The integration happens through the `webui` package, which provides a function to retrieve the built bundle.
+
+The server code accesses the built web UI through the `webui.GetBundle()` function, which returns a filesystem that can be used to serve the files.
+
+## File Structure
+
+- `src/`: TypeScript source files
+- `dist/`: Generated JavaScript bundle
+- `esbuild.go`: Go code for bundling TypeScript files
+- `Makefile`: Build tasks
diff --git a/loop/webui/src/diff2.css b/loop/webui/src/diff2.css
new file mode 100644
index 0000000..5a7ad71
--- /dev/null
+++ b/loop/webui/src/diff2.css
@@ -0,0 +1,142 @@
+/* Custom styles for diff2 view */
+
+/* Override container max-width for diff2 view */
+#diff2View .diff-container {
+ max-width: 100%;
+ width: 100%;
+}
+
+/* When diff2 view is active, allow container to expand to full width */
+.container.diff2-active,
+.timeline-container.diff-active {
+ max-width: 100%;
+ padding-left: 20px;
+ padding-right: 20px;
+}
+
+/* Fix line-height inheritance issue */
+.d2h-code-line,
+.d2h-code-line-ctn,
+.d2h-code-linenumber {
+ line-height: 1.4 !important;
+}
+
+/* Make diff2 file container use the full width */
+.d2h-file-wrapper {
+ width: 100%;
+ margin-bottom: 20px;
+}
+
+/* Make side-by-side view use the full width */
+.d2h-file-side-diff {
+ width: 50% !important;
+}
+
+/* Style for diff lines - for both side-by-side and unified views */
+.d2h-code-line,
+.d2h-code-side-line {
+ transition: background-color 0.2s;
+ position: relative;
+}
+
+.d2h-code-line:hover,
+.d2h-code-side-line:hover {
+ background-color: #e6f7ff !important;
+}
+
+/* Plus button styles for commenting */
+.d2h-gutter-comment-button {
+ display: none;
+ position: absolute;
+ right: 0; /* Adjusted from -11px to prevent layout shifts */
+ top: 50%;
+ transform: translateY(-50%);
+ width: 22px;
+ height: 22px;
+ background-color: #0366d6;
+ color: white;
+ border-radius: 50%;
+ text-align: center;
+ line-height: 20px;
+ font-size: 16px;
+ font-weight: bold;
+ cursor: pointer;
+ box-shadow: 0 1px 3px rgba(0,0,0,0.2);
+ opacity: 0.9;
+ z-index: 100;
+ user-select: none;
+}
+
+.d2h-gutter-comment-button:hover {
+ background-color: #0256bd;
+ opacity: 1;
+}
+
+/* Show the plus button on row hover (including line number and code) and when hovering over the button itself */
+tr:hover .d2h-gutter-comment-button,
+.d2h-gutter-comment-button:hover {
+ display: block;
+}
+
+/* Ensure diff2html content uses all available space */
+.diff2html-content {
+ width: 100%;
+ overflow-x: auto;
+}
+
+/* Diff view controls */
+#diff-view-controls {
+ display: flex;
+ justify-content: flex-end;
+ padding: 10px;
+ background-color: #f5f5f5;
+ border-bottom: 1px solid #ddd;
+}
+
+.diff-view-format {
+ display: flex;
+ gap: 15px;
+}
+
+.diff-view-format label {
+ display: flex;
+ align-items: center;
+ gap: 5px;
+ cursor: pointer;
+ font-size: 14px;
+ user-select: none;
+}
+
+.diff-view-format input[type="radio"] {
+ margin: 0;
+ cursor: pointer;
+}
+
+/* Adjust code line padding to make room for the gutter button */
+.d2h-code-line-ctn {
+ position: relative;
+ padding-left: 14px !important;
+}
+
+/* Ensure gutter is wide enough for the plus button */
+.d2h-code-linenumber,
+.d2h-code-side-linenumber {
+ position: relative;
+ min-width: 60px !important; /* Increased from 45px to accommodate 3-digit line numbers plus button */
+ padding-right: 15px !important; /* Ensure space for the button */
+ overflow: visible !important; /* Prevent button from being clipped */
+ text-align: right; /* Ensure consistent text alignment */
+ box-sizing: border-box; /* Ensure padding is included in width calculation */
+}
+
+/* Ensure table rows and cells don't clip the button */
+.d2h-diff-table tr,
+.d2h-diff-table td {
+ overflow: visible !important;
+}
+
+/* Add a bit of padding between line number and code content for visual separation */
+.d2h-code-line-ctn,
+.d2h-code-side-line-ctn {
+ padding-left: 8px !important;
+}
diff --git a/loop/webui/src/diff2html.min.css b/loop/webui/src/diff2html.min.css
new file mode 100644
index 0000000..8014a13
--- /dev/null
+++ b/loop/webui/src/diff2html.min.css
@@ -0,0 +1 @@
+:host,:root{--d2h-bg-color:#fff;--d2h-border-color:#ddd;--d2h-dim-color:rgba(0,0,0,.3);--d2h-line-border-color:#eee;--d2h-file-header-bg-color:#f7f7f7;--d2h-file-header-border-color:#d8d8d8;--d2h-empty-placeholder-bg-color:#f1f1f1;--d2h-empty-placeholder-border-color:#e1e1e1;--d2h-selected-color:#c8e1ff;--d2h-ins-bg-color:#dfd;--d2h-ins-border-color:#b4e2b4;--d2h-ins-highlight-bg-color:#97f295;--d2h-ins-label-color:#399839;--d2h-del-bg-color:#fee8e9;--d2h-del-border-color:#e9aeae;--d2h-del-highlight-bg-color:#ffb6ba;--d2h-del-label-color:#c33;--d2h-change-del-color:#fdf2d0;--d2h-change-ins-color:#ded;--d2h-info-bg-color:#f8fafd;--d2h-info-border-color:#d5e4f2;--d2h-change-label-color:#d0b44c;--d2h-moved-label-color:#3572b0;--d2h-dark-color:#e6edf3;--d2h-dark-bg-color:#0d1117;--d2h-dark-border-color:#30363d;--d2h-dark-dim-color:#6e7681;--d2h-dark-line-border-color:#21262d;--d2h-dark-file-header-bg-color:#161b22;--d2h-dark-file-header-border-color:#30363d;--d2h-dark-empty-placeholder-bg-color:hsla(215,8%,47%,.1);--d2h-dark-empty-placeholder-border-color:#30363d;--d2h-dark-selected-color:rgba(56,139,253,.1);--d2h-dark-ins-bg-color:rgba(46,160,67,.15);--d2h-dark-ins-border-color:rgba(46,160,67,.4);--d2h-dark-ins-highlight-bg-color:rgba(46,160,67,.4);--d2h-dark-ins-label-color:#3fb950;--d2h-dark-del-bg-color:rgba(248,81,73,.1);--d2h-dark-del-border-color:rgba(248,81,73,.4);--d2h-dark-del-highlight-bg-color:rgba(248,81,73,.4);--d2h-dark-del-label-color:#f85149;--d2h-dark-change-del-color:rgba(210,153,34,.2);--d2h-dark-change-ins-color:rgba(46,160,67,.25);--d2h-dark-info-bg-color:rgba(56,139,253,.1);--d2h-dark-info-border-color:rgba(56,139,253,.4);--d2h-dark-change-label-color:#d29922;--d2h-dark-moved-label-color:#3572b0}.d2h-wrapper{text-align:left}.d2h-file-header{background-color:#f7f7f7;background-color:var(--d2h-file-header-bg-color);border-bottom:1px solid #d8d8d8;border-bottom:1px solid var(--d2h-file-header-border-color);display:-webkit-box;display:-ms-flexbox;display:flex;font-family:Source Sans Pro,Helvetica Neue,Helvetica,Arial,sans-serif;height:35px;padding:5px 10px}.d2h-file-header.d2h-sticky-header{position:sticky;top:0;z-index:1}.d2h-file-stats{display:-webkit-box;display:-ms-flexbox;display:flex;font-size:14px;margin-left:auto}.d2h-lines-added{border:1px solid #b4e2b4;border:1px solid var(--d2h-ins-border-color);border-radius:5px 0 0 5px;color:#399839;color:var(--d2h-ins-label-color);padding:2px;text-align:right;vertical-align:middle}.d2h-lines-deleted{border:1px solid #e9aeae;border:1px solid var(--d2h-del-border-color);border-radius:0 5px 5px 0;color:#c33;color:var(--d2h-del-label-color);margin-left:1px;padding:2px;text-align:left;vertical-align:middle}.d2h-file-name-wrapper{display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-align:center;-ms-flex-align:center;align-items:center;font-size:15px;width:100%}.d2h-file-name{overflow-x:hidden;text-overflow:ellipsis;white-space:nowrap}.d2h-file-wrapper{border:1px solid #ddd;border:1px solid var(--d2h-border-color);border-radius:3px;margin-bottom:1em}.d2h-file-collapse{-webkit-box-pack:end;-ms-flex-pack:end;cursor:pointer;display:none;font-size:12px;justify-content:flex-end;-webkit-box-align:center;-ms-flex-align:center;align-items:center;border:1px solid #ddd;border:1px solid var(--d2h-border-color);border-radius:3px;padding:4px 8px}.d2h-file-collapse.d2h-selected{background-color:#c8e1ff;background-color:var(--d2h-selected-color)}.d2h-file-collapse-input{margin:0 4px 0 0}.d2h-diff-table{border-collapse:collapse;font-family:Menlo,Consolas,monospace;font-size:13px;width:100%}.d2h-files-diff{display:-webkit-box;display:-ms-flexbox;display:flex;width:100%}.d2h-file-diff{overflow-y:hidden}.d2h-file-diff.d2h-d-none,.d2h-files-diff.d2h-d-none{display:none}.d2h-file-side-diff{display:inline-block;overflow-x:scroll;overflow-y:hidden;width:50%}.d2h-code-line{padding:0 8em;width:calc(100% - 16em)}.d2h-code-line,.d2h-code-side-line{display:inline-block;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;white-space:nowrap}.d2h-code-side-line{padding:0 4.5em;width:calc(100% - 9em)}.d2h-code-line-ctn{background:none;display:inline-block;padding:0;word-wrap:normal;-webkit-user-select:text;-moz-user-select:text;-ms-user-select:text;user-select:text;vertical-align:middle;white-space:pre;width:100%}.d2h-code-line del,.d2h-code-side-line del{background-color:#ffb6ba;background-color:var(--d2h-del-highlight-bg-color)}.d2h-code-line del,.d2h-code-line ins,.d2h-code-side-line del,.d2h-code-side-line ins{border-radius:.2em;display:inline-block;margin-top:-1px;-webkit-text-decoration:none;text-decoration:none}.d2h-code-line ins,.d2h-code-side-line ins{background-color:#97f295;background-color:var(--d2h-ins-highlight-bg-color);text-align:left}.d2h-code-line-prefix{background:none;display:inline;padding:0;word-wrap:normal;white-space:pre}.line-num1{float:left}.line-num1,.line-num2{-webkit-box-sizing:border-box;box-sizing:border-box;overflow:hidden;padding:0 .5em;text-overflow:ellipsis;width:3.5em}.line-num2{float:right}.d2h-code-linenumber{background-color:#fff;background-color:var(--d2h-bg-color);border:solid #eee;border:solid var(--d2h-line-border-color);border-width:0 1px;-webkit-box-sizing:border-box;box-sizing:border-box;color:rgba(0,0,0,.3);color:var(--d2h-dim-color);cursor:pointer;display:inline-block;position:absolute;text-align:right;width:7.5em}.d2h-code-linenumber:after{content:"\200b"}.d2h-code-side-linenumber{background-color:#fff;background-color:var(--d2h-bg-color);border:solid #eee;border:solid var(--d2h-line-border-color);border-width:0 1px;-webkit-box-sizing:border-box;box-sizing:border-box;color:rgba(0,0,0,.3);color:var(--d2h-dim-color);cursor:pointer;display:inline-block;overflow:hidden;padding:0 .5em;position:absolute;text-align:right;text-overflow:ellipsis;width:4em}.d2h-code-side-linenumber:after{content:"\200b"}.d2h-code-side-emptyplaceholder,.d2h-emptyplaceholder{background-color:#f1f1f1;background-color:var(--d2h-empty-placeholder-bg-color);border-color:#e1e1e1;border-color:var(--d2h-empty-placeholder-border-color)}.d2h-code-line-prefix,.d2h-code-linenumber,.d2h-code-side-linenumber,.d2h-emptyplaceholder{-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none}.d2h-code-linenumber,.d2h-code-side-linenumber{direction:rtl}.d2h-del{background-color:#fee8e9;background-color:var(--d2h-del-bg-color);border-color:#e9aeae;border-color:var(--d2h-del-border-color)}.d2h-ins{background-color:#dfd;background-color:var(--d2h-ins-bg-color);border-color:#b4e2b4;border-color:var(--d2h-ins-border-color)}.d2h-info{background-color:#f8fafd;background-color:var(--d2h-info-bg-color);border-color:#d5e4f2;border-color:var(--d2h-info-border-color);color:rgba(0,0,0,.3);color:var(--d2h-dim-color)}.d2h-file-diff .d2h-del.d2h-change{background-color:#fdf2d0;background-color:var(--d2h-change-del-color)}.d2h-file-diff .d2h-ins.d2h-change{background-color:#ded;background-color:var(--d2h-change-ins-color)}.d2h-file-list-wrapper{margin-bottom:10px}.d2h-file-list-wrapper a{-webkit-text-decoration:none;text-decoration:none}.d2h-file-list-wrapper a,.d2h-file-list-wrapper a:visited{color:#3572b0;color:var(--d2h-moved-label-color)}.d2h-file-list-header{text-align:left}.d2h-file-list-title{font-weight:700}.d2h-file-list-line{display:-webkit-box;display:-ms-flexbox;display:flex;text-align:left}.d2h-file-list{display:block;list-style:none;margin:0;padding:0}.d2h-file-list>li{border-bottom:1px solid #ddd;border-bottom:1px solid var(--d2h-border-color);margin:0;padding:5px 10px}.d2h-file-list>li:last-child{border-bottom:none}.d2h-file-switch{cursor:pointer;display:none;font-size:10px}.d2h-icon{margin-right:10px;vertical-align:middle;fill:currentColor}.d2h-deleted{color:#c33;color:var(--d2h-del-label-color)}.d2h-added{color:#399839;color:var(--d2h-ins-label-color)}.d2h-changed{color:#d0b44c;color:var(--d2h-change-label-color)}.d2h-moved{color:#3572b0;color:var(--d2h-moved-label-color)}.d2h-tag{background-color:#fff;background-color:var(--d2h-bg-color);display:-webkit-box;display:-ms-flexbox;display:flex;font-size:10px;margin-left:5px;padding:0 2px}.d2h-deleted-tag{border:1px solid #c33;border:1px solid var(--d2h-del-label-color)}.d2h-added-tag{border:1px solid #399839;border:1px solid var(--d2h-ins-label-color)}.d2h-changed-tag{border:1px solid #d0b44c;border:1px solid var(--d2h-change-label-color)}.d2h-moved-tag{border:1px solid #3572b0;border:1px solid var(--d2h-moved-label-color)}.d2h-dark-color-scheme{background-color:#0d1117;background-color:var(--d2h-dark-bg-color);color:#e6edf3;color:var(--d2h-dark-color)}.d2h-dark-color-scheme .d2h-file-header{background-color:#161b22;background-color:var(--d2h-dark-file-header-bg-color);border-bottom:#30363d;border-bottom:var(--d2h-dark-file-header-border-color)}.d2h-dark-color-scheme .d2h-lines-added{border:1px solid rgba(46,160,67,.4);border:1px solid var(--d2h-dark-ins-border-color);color:#3fb950;color:var(--d2h-dark-ins-label-color)}.d2h-dark-color-scheme .d2h-lines-deleted{border:1px solid rgba(248,81,73,.4);border:1px solid var(--d2h-dark-del-border-color);color:#f85149;color:var(--d2h-dark-del-label-color)}.d2h-dark-color-scheme .d2h-code-line del,.d2h-dark-color-scheme .d2h-code-side-line del{background-color:rgba(248,81,73,.4);background-color:var(--d2h-dark-del-highlight-bg-color)}.d2h-dark-color-scheme .d2h-code-line ins,.d2h-dark-color-scheme .d2h-code-side-line ins{background-color:rgba(46,160,67,.4);background-color:var(--d2h-dark-ins-highlight-bg-color)}.d2h-dark-color-scheme .d2h-diff-tbody{border-color:#30363d;border-color:var(--d2h-dark-border-color)}.d2h-dark-color-scheme .d2h-code-side-linenumber{background-color:#0d1117;background-color:var(--d2h-dark-bg-color);border-color:#21262d;border-color:var(--d2h-dark-line-border-color);color:#6e7681;color:var(--d2h-dark-dim-color)}.d2h-dark-color-scheme .d2h-files-diff .d2h-code-side-emptyplaceholder,.d2h-dark-color-scheme .d2h-files-diff .d2h-emptyplaceholder{background-color:hsla(215,8%,47%,.1);background-color:var(--d2h-dark-empty-placeholder-bg-color);border-color:#30363d;border-color:var(--d2h-dark-empty-placeholder-border-color)}.d2h-dark-color-scheme .d2h-code-linenumber{background-color:#0d1117;background-color:var(--d2h-dark-bg-color);border-color:#21262d;border-color:var(--d2h-dark-line-border-color);color:#6e7681;color:var(--d2h-dark-dim-color)}.d2h-dark-color-scheme .d2h-del{background-color:rgba(248,81,73,.1);background-color:var(--d2h-dark-del-bg-color);border-color:rgba(248,81,73,.4);border-color:var(--d2h-dark-del-border-color)}.d2h-dark-color-scheme .d2h-ins{background-color:rgba(46,160,67,.15);background-color:var(--d2h-dark-ins-bg-color);border-color:rgba(46,160,67,.4);border-color:var(--d2h-dark-ins-border-color)}.d2h-dark-color-scheme .d2h-info{background-color:rgba(56,139,253,.1);background-color:var(--d2h-dark-info-bg-color);border-color:rgba(56,139,253,.4);border-color:var(--d2h-dark-info-border-color);color:#6e7681;color:var(--d2h-dark-dim-color)}.d2h-dark-color-scheme .d2h-file-diff .d2h-del.d2h-change{background-color:rgba(210,153,34,.2);background-color:var(--d2h-dark-change-del-color)}.d2h-dark-color-scheme .d2h-file-diff .d2h-ins.d2h-change{background-color:rgba(46,160,67,.25);background-color:var(--d2h-dark-change-ins-color)}.d2h-dark-color-scheme .d2h-file-wrapper{border:1px solid #30363d;border:1px solid var(--d2h-dark-border-color)}.d2h-dark-color-scheme .d2h-file-collapse{border:1px solid #0d1117;border:1px solid var(--d2h-dark-bg-color)}.d2h-dark-color-scheme .d2h-file-collapse.d2h-selected{background-color:rgba(56,139,253,.1);background-color:var(--d2h-dark-selected-color)}.d2h-dark-color-scheme .d2h-file-list-wrapper a,.d2h-dark-color-scheme .d2h-file-list-wrapper a:visited{color:#3572b0;color:var(--d2h-dark-moved-label-color)}.d2h-dark-color-scheme .d2h-file-list>li{border-bottom:1px solid #0d1117;border-bottom:1px solid var(--d2h-dark-bg-color)}.d2h-dark-color-scheme .d2h-deleted{color:#f85149;color:var(--d2h-dark-del-label-color)}.d2h-dark-color-scheme .d2h-added{color:#3fb950;color:var(--d2h-dark-ins-label-color)}.d2h-dark-color-scheme .d2h-changed{color:#d29922;color:var(--d2h-dark-change-label-color)}.d2h-dark-color-scheme .d2h-moved{color:#3572b0;color:var(--d2h-dark-moved-label-color)}.d2h-dark-color-scheme .d2h-tag{background-color:#0d1117;background-color:var(--d2h-dark-bg-color)}.d2h-dark-color-scheme .d2h-deleted-tag{border:1px solid #f85149;border:1px solid var(--d2h-dark-del-label-color)}.d2h-dark-color-scheme .d2h-added-tag{border:1px solid #3fb950;border:1px solid var(--d2h-dark-ins-label-color)}.d2h-dark-color-scheme .d2h-changed-tag{border:1px solid #d29922;border:1px solid var(--d2h-dark-change-label-color)}.d2h-dark-color-scheme .d2h-moved-tag{border:1px solid #3572b0;border:1px solid var(--d2h-dark-moved-label-color)}@media (prefers-color-scheme:dark){.d2h-auto-color-scheme{background-color:#0d1117;background-color:var(--d2h-dark-bg-color);color:#e6edf3;color:var(--d2h-dark-color)}.d2h-auto-color-scheme .d2h-file-header{background-color:#161b22;background-color:var(--d2h-dark-file-header-bg-color);border-bottom:#30363d;border-bottom:var(--d2h-dark-file-header-border-color)}.d2h-auto-color-scheme .d2h-lines-added{border:1px solid rgba(46,160,67,.4);border:1px solid var(--d2h-dark-ins-border-color);color:#3fb950;color:var(--d2h-dark-ins-label-color)}.d2h-auto-color-scheme .d2h-lines-deleted{border:1px solid rgba(248,81,73,.4);border:1px solid var(--d2h-dark-del-border-color);color:#f85149;color:var(--d2h-dark-del-label-color)}.d2h-auto-color-scheme .d2h-code-line del,.d2h-auto-color-scheme .d2h-code-side-line del{background-color:rgba(248,81,73,.4);background-color:var(--d2h-dark-del-highlight-bg-color)}.d2h-auto-color-scheme .d2h-code-line ins,.d2h-auto-color-scheme .d2h-code-side-line ins{background-color:rgba(46,160,67,.4);background-color:var(--d2h-dark-ins-highlight-bg-color)}.d2h-auto-color-scheme .d2h-diff-tbody{border-color:#30363d;border-color:var(--d2h-dark-border-color)}.d2h-auto-color-scheme .d2h-code-side-linenumber{background-color:#0d1117;background-color:var(--d2h-dark-bg-color);border-color:#21262d;border-color:var(--d2h-dark-line-border-color);color:#6e7681;color:var(--d2h-dark-dim-color)}.d2h-auto-color-scheme .d2h-files-diff .d2h-code-side-emptyplaceholder,.d2h-auto-color-scheme .d2h-files-diff .d2h-emptyplaceholder{background-color:hsla(215,8%,47%,.1);background-color:var(--d2h-dark-empty-placeholder-bg-color);border-color:#30363d;border-color:var(--d2h-dark-empty-placeholder-border-color)}.d2h-auto-color-scheme .d2h-code-linenumber{background-color:#0d1117;background-color:var(--d2h-dark-bg-color);border-color:#21262d;border-color:var(--d2h-dark-line-border-color);color:#6e7681;color:var(--d2h-dark-dim-color)}.d2h-auto-color-scheme .d2h-del{background-color:rgba(248,81,73,.1);background-color:var(--d2h-dark-del-bg-color);border-color:rgba(248,81,73,.4);border-color:var(--d2h-dark-del-border-color)}.d2h-auto-color-scheme .d2h-ins{background-color:rgba(46,160,67,.15);background-color:var(--d2h-dark-ins-bg-color);border-color:rgba(46,160,67,.4);border-color:var(--d2h-dark-ins-border-color)}.d2h-auto-color-scheme .d2h-info{background-color:rgba(56,139,253,.1);background-color:var(--d2h-dark-info-bg-color);border-color:rgba(56,139,253,.4);border-color:var(--d2h-dark-info-border-color);color:#6e7681;color:var(--d2h-dark-dim-color)}.d2h-auto-color-scheme .d2h-file-diff .d2h-del.d2h-change{background-color:rgba(210,153,34,.2);background-color:var(--d2h-dark-change-del-color)}.d2h-auto-color-scheme .d2h-file-diff .d2h-ins.d2h-change{background-color:rgba(46,160,67,.25);background-color:var(--d2h-dark-change-ins-color)}.d2h-auto-color-scheme .d2h-file-wrapper{border:1px solid #30363d;border:1px solid var(--d2h-dark-border-color)}.d2h-auto-color-scheme .d2h-file-collapse{border:1px solid #0d1117;border:1px solid var(--d2h-dark-bg-color)}.d2h-auto-color-scheme .d2h-file-collapse.d2h-selected{background-color:rgba(56,139,253,.1);background-color:var(--d2h-dark-selected-color)}.d2h-auto-color-scheme .d2h-file-list-wrapper a,.d2h-auto-color-scheme .d2h-file-list-wrapper a:visited{color:#3572b0;color:var(--d2h-dark-moved-label-color)}.d2h-auto-color-scheme .d2h-file-list>li{border-bottom:1px solid #0d1117;border-bottom:1px solid var(--d2h-dark-bg-color)}.d2h-dark-color-scheme .d2h-deleted{color:#f85149;color:var(--d2h-dark-del-label-color)}.d2h-auto-color-scheme .d2h-added{color:#3fb950;color:var(--d2h-dark-ins-label-color)}.d2h-auto-color-scheme .d2h-changed{color:#d29922;color:var(--d2h-dark-change-label-color)}.d2h-auto-color-scheme .d2h-moved{color:#3572b0;color:var(--d2h-dark-moved-label-color)}.d2h-auto-color-scheme .d2h-tag{background-color:#0d1117;background-color:var(--d2h-dark-bg-color)}.d2h-auto-color-scheme .d2h-deleted-tag{border:1px solid #f85149;border:1px solid var(--d2h-dark-del-label-color)}.d2h-auto-color-scheme .d2h-added-tag{border:1px solid #3fb950;border:1px solid var(--d2h-dark-ins-label-color)}.d2h-auto-color-scheme .d2h-changed-tag{border:1px solid #d29922;border:1px solid var(--d2h-dark-change-label-color)}.d2h-auto-color-scheme .d2h-moved-tag{border:1px solid #3572b0;border:1px solid var(--d2h-dark-moved-label-color)}}
\ No newline at end of file
diff --git a/loop/webui/src/index.html b/loop/webui/src/index.html
new file mode 100644
index 0000000..a1f62a0
--- /dev/null
+++ b/loop/webui/src/index.html
@@ -0,0 +1,40 @@
+<!DOCTYPE html>
+<html lang="en">
+ <head>
+ <meta charset="UTF-8" />
+ <meta name="viewport" content="width=device-width, initial-scale=1.0" />
+ <title>Loop WebUI</title>
+ <link rel="stylesheet" href="tailwind.css" />
+ <style>
+ body {
+ font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto,
+ Oxygen, Ubuntu, Cantarell, "Open Sans", "Helvetica Neue", sans-serif;
+ margin: 0;
+ padding: 20px;
+ background-color: #f5f5f5;
+ }
+ #app {
+ max-width: 800px;
+ margin: 0 auto;
+ background-color: white;
+ border-radius: 8px;
+ padding: 20px;
+ box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);
+ }
+ h1 {
+ color: #333;
+ }
+ #status {
+ margin-top: 20px;
+ padding: 10px;
+ background-color: #e8f5e9;
+ border-radius: 4px;
+ color: #2e7d32;
+ }
+ </style>
+ </head>
+ <body>
+ <div id="app">Loading...</div>
+ <script src="index.js"></script>
+ </body>
+</html>
diff --git a/loop/webui/src/input.css b/loop/webui/src/input.css
new file mode 100644
index 0000000..176b454
--- /dev/null
+++ b/loop/webui/src/input.css
@@ -0,0 +1,5 @@
+@tailwind base;
+@tailwind components;
+@tailwind utilities;
+
+/* Custom styles can be added below */
diff --git a/loop/webui/src/timeline.css b/loop/webui/src/timeline.css
new file mode 100644
index 0000000..2928c44
--- /dev/null
+++ b/loop/webui/src/timeline.css
@@ -0,0 +1,1306 @@
+body {
+ font-family:
+ system-ui,
+ -apple-system,
+ BlinkMacSystemFont,
+ "Segoe UI",
+ Roboto,
+ sans-serif;
+ margin: 0;
+ padding: 20px;
+ padding-top: 80px; /* Added padding to account for the fixed top banner */
+ padding-bottom: 100px; /* Adjusted padding for chat container */
+ color: #333;
+ line-height: 1.4; /* Reduced line height for more compact text */
+}
+
+.timeline-container {
+ max-width: 1200px;
+ margin: 0 auto;
+ position: relative;
+}
+
+/* When diff view is active, allow timeline container to expand to full width */
+.timeline-container.diff-active {
+ max-width: 100%;
+}
+
+/* Top banner with combined elements */
+.top-banner {
+ display: flex;
+ justify-content: space-between;
+ align-items: center;
+ padding: 5px 20px;
+ margin-bottom: 0;
+ border-bottom: 1px solid #eee;
+ flex-wrap: wrap;
+ gap: 10px;
+ position: fixed;
+ top: 0;
+ left: 0;
+ right: 0;
+ background: white;
+ z-index: 100;
+ box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);
+ max-width: 100%;
+}
+
+.banner-title {
+ font-size: 18px;
+ font-weight: 600;
+ margin: 0;
+ white-space: nowrap;
+ overflow: hidden;
+ text-overflow: ellipsis;
+}
+
+.chat-title {
+ margin: 0;
+ padding: 0;
+ color: rgba(82, 82, 82, 0.85);
+ font-size: 16px;
+ font-weight: normal;
+ font-style: italic;
+ white-space: nowrap;
+ overflow: hidden;
+ text-overflow: ellipsis;
+ max-width: 100%;
+}
+
+/* Original header styles kept for compatibility */
+header {
+ display: none; /* Hidden since we're using top-banner instead */
+}
+
+/* Ensure the container starts below the fixed top banner */
+.timeline-container {
+ padding-top: 10px;
+}
+
+h1 {
+ margin: 0;
+ font-size: 24px;
+ font-weight: 600;
+}
+
+.info-card {
+ background: #f9f9f9;
+ border-radius: 8px;
+ padding: 15px;
+ margin-bottom: 20px;
+ box-shadow: 0 2px 4px rgba(0, 0, 0, 0.05);
+ display: none; /* Hidden in the combined layout */
+}
+
+.info-grid {
+ display: flex;
+ flex-wrap: wrap;
+ gap: 8px;
+ background: #f9f9f9;
+ border-radius: 4px;
+ padding: 4px 10px;
+ box-shadow: 0 1px 3px rgba(0, 0, 0, 0.05);
+ flex: 1;
+}
+
+.info-item {
+ display: flex;
+ align-items: center;
+ white-space: nowrap;
+ margin-right: 10px;
+ font-size: 13px;
+}
+
+.info-label {
+ font-size: 11px;
+ color: #555;
+ margin-right: 3px;
+ font-weight: 500;
+}
+
+.info-value {
+ font-size: 11px;
+ font-weight: 600;
+}
+
+.cost {
+ color: #2e7d32;
+}
+
+.refresh-control {
+ display: flex;
+ align-items: center;
+ margin-bottom: 0;
+ flex-wrap: nowrap;
+ white-space: nowrap;
+ flex-shrink: 0;
+}
+
+.refresh-button {
+ background: #4caf50;
+ color: white;
+ border: none;
+ padding: 4px 10px;
+ border-radius: 4px;
+ cursor: pointer;
+ font-size: 12px;
+ margin: 5px;
+}
+
+.poll-updates {
+ display: flex;
+ align-items: center;
+ margin: 0 5px;
+ font-size: 12px;
+}
+
+.status-container {
+ display: flex;
+ align-items: center;
+}
+
+.polling-indicator {
+ display: inline-block;
+ width: 8px;
+ height: 8px;
+ border-radius: 50%;
+ margin-right: 4px;
+ background-color: #ccc;
+}
+
+.polling-indicator.active {
+ background-color: #4caf50;
+ animation: pulse 1.5s infinite;
+}
+
+.polling-indicator.error {
+ background-color: #f44336;
+ animation: pulse 1.5s infinite;
+}
+
+@keyframes pulse {
+ 0% {
+ opacity: 1;
+ }
+ 50% {
+ opacity: 0.5;
+ }
+ 100% {
+ opacity: 1;
+ }
+}
+
+.status-text {
+ font-size: 11px;
+ color: #666;
+}
+
+/* Timeline styles that should remain unchanged */
+.timeline {
+ position: relative;
+ margin: 10px 0;
+ scroll-behavior: smooth;
+}
+
+.timeline::before {
+ content: "";
+ position: absolute;
+ top: 0;
+ bottom: 0;
+ left: 15px;
+ width: 2px;
+ background: #e0e0e0;
+ border-radius: 1px;
+}
+
+/* Hide the timeline vertical line when there are no messages */
+.timeline.empty::before {
+ display: none;
+}
+
+.message {
+ position: relative;
+ margin-bottom: 5px;
+ padding-left: 30px;
+}
+
+.message-icon {
+ position: absolute;
+ left: 10px;
+ top: 0;
+ transform: translateX(-50%);
+ width: 16px;
+ height: 16px;
+ border-radius: 3px;
+ text-align: center;
+ line-height: 16px;
+ color: #fff;
+ font-size: 10px;
+}
+
+.message-content {
+ position: relative;
+ padding: 5px 10px;
+ background: #fff;
+ border-radius: 3px;
+ box-shadow: 0 1px 2px rgba(0, 0, 0, 0.05);
+ border-left: 3px solid transparent;
+}
+
+/* Removed arrow decoration for a more compact look */
+
+.message-header {
+ display: flex;
+ flex-wrap: wrap;
+ gap: 5px;
+ margin-bottom: 3px;
+ font-size: 12px;
+}
+
+.message-timestamp {
+ font-size: 10px;
+ color: #888;
+ font-style: italic;
+ margin-left: 3px;
+}
+
+.conversation-id {
+ font-family: monospace;
+ font-size: 12px;
+ padding: 2px 4px;
+ background-color: #f0f0f0;
+ border-radius: 3px;
+ margin-left: auto;
+}
+
+.parent-info {
+ font-size: 11px;
+ opacity: 0.8;
+}
+
+.subconversation {
+ border-left: 2px solid transparent;
+ padding-left: 5px;
+ margin-left: 20px;
+ transition: margin-left 0.3s ease;
+}
+
+.message-text {
+ overflow-x: auto;
+ margin-bottom: 3px;
+ font-family: monospace;
+ padding: 3px 5px;
+ background: #f7f7f7;
+ border-radius: 2px;
+ user-select: text;
+ cursor: text;
+ -webkit-user-select: text;
+ -moz-user-select: text;
+ -ms-user-select: text;
+ font-size: 13px;
+ line-height: 1.3;
+}
+
+.tool-details {
+ margin-top: 3px;
+ padding-top: 3px;
+ border-top: 1px dashed #e0e0e0;
+ font-size: 12px;
+}
+
+.tool-name {
+ font-size: 12px;
+ font-weight: bold;
+ margin-bottom: 2px;
+ background: #f0f0f0;
+ padding: 2px 4px;
+ border-radius: 2px;
+ display: flex;
+ align-items: center;
+ gap: 3px;
+}
+
+.tool-input,
+.tool-result {
+ margin-top: 2px;
+ padding: 3px 5px;
+ background: #f7f7f7;
+ border-radius: 2px;
+ font-family: monospace;
+ font-size: 12px;
+ overflow-x: auto;
+ white-space: pre;
+ line-height: 1.3;
+ user-select: text;
+ cursor: text;
+ -webkit-user-select: text;
+ -moz-user-select: text;
+ -ms-user-select: text;
+}
+
+.tool-result {
+ max-height: 300px;
+ overflow-y: auto;
+}
+
+.usage-info {
+ margin-top: 10px;
+ padding-top: 10px;
+ border-top: 1px dashed #e0e0e0;
+ font-size: 12px;
+ color: #666;
+}
+
+/* Message type styles */
+.user .message-icon {
+ background-color: #2196f3;
+}
+
+.agent .message-icon {
+ background-color: #4caf50;
+}
+
+.tool .message-icon {
+ background-color: #ff9800;
+}
+
+.error .message-icon {
+ background-color: #f44336;
+}
+
+.end-of-turn {
+ margin-bottom: 15px;
+}
+
+.end-of-turn::after {
+ content: "End of Turn";
+ position: absolute;
+ left: 15px;
+ bottom: -10px;
+ transform: translateX(-50%);
+ font-size: 10px;
+ color: #666;
+ background: #f0f0f0;
+ padding: 1px 4px;
+ border-radius: 3px;
+}
+
+.collapsible {
+ cursor: pointer;
+ background-color: #f0f0f0;
+ padding: 5px 10px;
+ border: none;
+ border-radius: 4px;
+ text-align: left;
+ font-size: 12px;
+ margin-top: 5px;
+}
+
+.collapsed {
+ max-height: 50px;
+ overflow-y: hidden;
+ position: relative;
+ text-overflow: ellipsis;
+}
+
+/* Removed the gradient effect */
+
+.loader {
+ display: flex;
+ justify-content: center;
+ padding: 20px;
+}
+
+.loader::after {
+ content: "";
+ width: 30px;
+ height: 30px;
+ border: 3px solid #f3f3f3;
+ border-top: 3px solid #3498db;
+ border-radius: 50%;
+ animation: spin 1s linear infinite;
+}
+
+@keyframes spin {
+ 0% {
+ transform: rotate(0deg);
+ }
+ 100% {
+ transform: rotate(360deg);
+ }
+}
+
+/* Chat styles */
+.chat-container {
+ position: fixed;
+ bottom: 0;
+ left: 0;
+ width: 100%;
+ background: #f0f0f0;
+ padding: 15px;
+ box-shadow: 0 -2px 10px rgba(0, 0, 0, 0.1);
+ z-index: 1000;
+}
+
+.chat-input-wrapper {
+ display: flex;
+ max-width: 1200px;
+ margin: 0 auto;
+ gap: 10px;
+}
+
+#chatInput {
+ flex: 1;
+ padding: 12px;
+ border: 1px solid #ddd;
+ border-radius: 4px;
+ resize: none;
+ font-family: monospace;
+ font-size: 12px;
+ min-height: 40px;
+ max-height: 120px;
+ background: #f7f7f7;
+}
+
+#sendChatButton {
+ background-color: #2196f3;
+ color: white;
+ border: none;
+ border-radius: 4px;
+ padding: 0 20px;
+ cursor: pointer;
+ font-weight: 600;
+}
+
+#sendChatButton:hover {
+ background-color: #0d8bf2;
+}
+
+/* Copy button styles */
+.message-text-container,
+.tool-result-container {
+ position: relative;
+}
+
+.message-actions {
+ position: absolute;
+ top: 5px;
+ right: 5px;
+ z-index: 10;
+ opacity: 0;
+ transition: opacity 0.2s ease;
+}
+
+.message-text-container:hover .message-actions,
+.tool-result-container:hover .message-actions {
+ opacity: 1;
+}
+
+.copy-button {
+ background-color: rgba(255, 255, 255, 0.9);
+ border: 1px solid #ddd;
+ border-radius: 4px;
+ color: #555;
+ cursor: pointer;
+ font-size: 12px;
+ padding: 2px 8px;
+ transition: all 0.2s ease;
+}
+
+.copy-button:hover {
+ background-color: #f0f0f0;
+ color: #333;
+}
+
+/* Diff View Styles */
+.diff-view {
+ width: 100%;
+ background-color: #f5f5f5;
+ border-radius: 8px;
+ overflow: hidden;
+ margin-bottom: 20px;
+ box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);
+ display: flex;
+ flex-direction: column;
+}
+
+.diff-tabs {
+ display: flex;
+ background-color: #e0e0e0;
+ border-bottom: 1px solid #ccc;
+}
+
+.diff-tab-button {
+ padding: 8px 16px;
+ border: none;
+ background: none;
+ font-size: 14px;
+ cursor: pointer;
+ outline: none;
+ transition: background-color 0.2s;
+}
+
+.diff-tab-button:hover {
+ background-color: #d0d0d0;
+}
+
+.diff-tab-button.active {
+ background-color: #fff;
+ border-bottom: 2px solid #3498db;
+}
+
+.diff-container {
+ flex: 1;
+ overflow: hidden;
+}
+
+/* Removed diff-header for more space */
+
+.diff-content {
+ padding: 15px;
+ margin: 0;
+ max-height: 70vh;
+ overflow-y: auto;
+ font-family: Consolas, Monaco, "Andale Mono", monospace;
+ font-size: 14px;
+ line-height: 1.5;
+ white-space: pre;
+ tab-size: 4;
+ background-color: #fff;
+}
+
+.diff-content .diff-line {
+ padding: 0 5px;
+ white-space: pre;
+ cursor: pointer;
+ transition: background-color 0.2s;
+}
+
+.diff-content .diff-line:hover {
+ background-color: #e6f7ff;
+}
+
+.diff-content .diff-add {
+ background-color: #e6ffed;
+ color: #22863a;
+}
+
+.diff-content .diff-remove {
+ background-color: #ffeef0;
+ color: #cb2431;
+}
+
+.diff-content .diff-info {
+ color: #6a737d;
+ background-color: #f0f0f0;
+}
+
+.diff-comment-box {
+ position: fixed;
+ left: 50%;
+ top: 50%;
+ transform: translate(-50%, -50%);
+ width: 80%;
+ max-width: 600px;
+ background-color: #fff;
+ padding: 20px;
+ border-radius: 8px;
+ box-shadow: 0 4px 20px rgba(0, 0, 0, 0.2);
+ z-index: 1000;
+}
+
+.diff-comment-box h3 {
+ margin-top: 0;
+ margin-bottom: 15px;
+ font-size: 18px;
+}
+
+.selected-line {
+ background-color: #f5f5f5;
+ padding: 10px;
+ margin-bottom: 15px;
+ border-radius: 4px;
+ border-left: 3px solid #0366d6;
+}
+
+.selected-line pre {
+ margin: 5px 0 0 0;
+ white-space: pre-wrap;
+ word-wrap: break-word;
+ font-family: Consolas, Monaco, "Andale Mono", monospace;
+ font-size: 14px;
+}
+
+#diffCommentInput {
+ width: 100%;
+ min-height: 100px;
+ padding: 10px;
+ margin-bottom: 15px;
+ border: 1px solid #ccc;
+ border-radius: 4px;
+ resize: vertical;
+ font-family: Arial, sans-serif;
+}
+
+.diff-comment-buttons {
+ display: flex;
+ justify-content: flex-end;
+ gap: 10px;
+}
+
+.diff-comment-buttons button {
+ padding: 8px 15px;
+ border: none;
+ border-radius: 4px;
+ cursor: pointer;
+ font-weight: 500;
+}
+
+#submitDiffComment {
+ background-color: #0366d6;
+ color: white;
+}
+
+#submitDiffComment:hover {
+ background-color: #0256bd;
+}
+
+#cancelDiffComment {
+ background-color: #e1e4e8;
+ color: #24292e;
+}
+
+#cancelDiffComment:hover {
+ background-color: #d1d5da;
+}
+
+/* View Mode Button Styles */
+.view-mode-buttons {
+ display: flex;
+ gap: 8px;
+ margin-right: 10px;
+}
+
+.emoji-button {
+ font-size: 18px;
+ width: 32px;
+ height: 32px;
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ background: white;
+ border: 1px solid #ddd;
+ border-radius: 4px;
+ cursor: pointer;
+ transition: all 0.2s ease;
+ padding: 0;
+ line-height: 1;
+}
+
+.emoji-button:hover {
+ background-color: #f0f0f0;
+ transform: translateY(-2px);
+ box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);
+}
+
+.emoji-button.active {
+ background-color: #e6f7ff;
+ border-color: #1890ff;
+ color: #1890ff;
+}
+
+#showConversationButton.active {
+ background-color: #e6f7ff;
+ border-color: #1890ff;
+}
+
+#showDiffButton.active {
+ background-color: #f6ffed;
+ border-color: #52c41a;
+}
+
+#showChartsButton.active {
+ background-color: #fff2e8;
+ border-color: #fa8c16;
+}
+
+.stop-button:hover {
+ background-color: #c82333 !important;
+}
+
+/* Chart View Styles */
+.chart-view {
+ width: 100%;
+ background-color: #ffffff;
+ border-radius: 8px;
+ overflow: hidden;
+ margin-bottom: 20px;
+ box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);
+ padding: 15px;
+}
+
+.chart-container {
+ width: 100%;
+ height: auto;
+ overflow: auto;
+}
+
+.chart-section {
+ margin-bottom: 30px;
+ border-bottom: 1px solid #eee;
+ padding-bottom: 20px;
+}
+
+/* Terminal View Styles */
+.terminal-view {
+ width: 100%;
+ background-color: #f5f5f5;
+ border-radius: 8px;
+ overflow: hidden;
+ margin-bottom: 20px;
+ box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);
+ padding: 15px;
+ height: 70vh;
+}
+
+.terminal-container {
+ width: 100%;
+ height: 100%;
+ overflow: hidden;
+}
+
+#showTerminalButton.active {
+ background-color: #fef0f0;
+ border-color: #ff4d4f;
+}
+
+.chart-section:last-child {
+ border-bottom: none;
+ margin-bottom: 0;
+}
+
+.chart-section h3 {
+ margin-top: 0;
+ margin-bottom: 15px;
+ font-size: 18px;
+ color: #333;
+}
+
+#costChart,
+#messagesChart {
+ width: 100%;
+ min-height: 300px;
+ margin-bottom: 10px;
+}
+
+/* Tool calls container styles */
+.tool-calls-container {
+ /* Removed dotted border */
+}
+
+.tool-calls-toggle {
+ cursor: pointer;
+ background-color: #f0f0f0;
+ padding: 5px 10px;
+ border: none;
+ border-radius: 4px;
+ text-align: left;
+ font-size: 12px;
+ margin-top: 5px;
+ color: #555;
+ font-weight: 500;
+}
+
+.tool-calls-toggle:hover {
+ background-color: #e0e0e0;
+}
+
+.tool-calls-details {
+ margin-top: 10px;
+ transition: max-height 0.3s ease;
+}
+
+.tool-calls-details.collapsed {
+ max-height: 0;
+ overflow: hidden;
+ margin-top: 0;
+}
+
+.tool-call {
+ background: #f9f9f9;
+ border-radius: 4px;
+ padding: 10px;
+ margin-bottom: 10px;
+ border-left: 3px solid #4caf50;
+}
+
+.tool-call-header {
+ margin-bottom: 8px;
+ font-size: 14px;
+ padding: 2px 0;
+}
+
+/* Compact tool display styles */
+.tool-compact-line {
+ font-family: monospace;
+ font-size: 12px;
+ line-height: 1.4;
+ padding: 4px 6px;
+ background: #f8f8f8;
+ border-radius: 3px;
+ position: relative;
+ white-space: nowrap;
+ overflow: hidden;
+ text-overflow: ellipsis;
+ max-width: 100%;
+ display: flex;
+ align-items: center;
+}
+
+.tool-result-inline {
+ font-family: monospace;
+ color: #0066bb;
+ white-space: nowrap;
+ overflow: hidden;
+ text-overflow: ellipsis;
+ max-width: 400px;
+ display: inline-block;
+ vertical-align: middle;
+}
+
+.copy-inline-button {
+ font-size: 10px;
+ padding: 2px 4px;
+ margin-left: 8px;
+ background: #eee;
+ border: none;
+ border-radius: 3px;
+ cursor: pointer;
+ opacity: 0.7;
+}
+
+.copy-inline-button:hover {
+ opacity: 1;
+ background: #ddd;
+}
+
+.tool-input.compact,
+.tool-result.compact {
+ margin: 2px 0;
+ padding: 4px;
+ font-size: 12px;
+}
+
+/* Removed old compact container CSS */
+
+/* Ultra-compact tool call box styles */
+.tool-calls-header {
+ /* Empty header - just small spacing */
+}
+
+.tool-call-boxes-row {
+ display: flex;
+ flex-wrap: wrap;
+ gap: 8px;
+ margin-bottom: 8px;
+}
+
+.tool-call-wrapper {
+ display: flex;
+ flex-direction: column;
+ margin-bottom: 4px;
+}
+
+.tool-call-box {
+ display: inline-flex;
+ align-items: center;
+ background: #f0f0f0;
+ border-radius: 4px;
+ padding: 3px 8px;
+ font-size: 12px;
+ cursor: pointer;
+ max-width: 320px;
+ position: relative;
+ border: 1px solid #ddd;
+ transition: background-color 0.2s;
+}
+
+.tool-call-box:hover {
+ background-color: #e8e8e8;
+}
+
+.tool-call-box.expanded {
+ background-color: #e0e0e0;
+ border-bottom-left-radius: 0;
+ border-bottom-right-radius: 0;
+ border-bottom: 1px solid #ccc;
+}
+
+.tool-call-name {
+ font-weight: bold;
+ margin-right: 6px;
+ color: #444;
+}
+
+.tool-call-input {
+ color: #666;
+ white-space: nowrap;
+ overflow: hidden;
+ text-overflow: ellipsis;
+ font-family: monospace;
+ font-size: 11px;
+}
+
+/* Removed old expanded view CSS */
+
+/* Custom styles for IRC-like experience */
+.user .message-content {
+ border-left-color: #2196f3;
+}
+
+.agent .message-content {
+ border-left-color: #4caf50;
+}
+
+.tool .message-content {
+ border-left-color: #ff9800;
+}
+
+.error .message-content {
+ border-left-color: #f44336;
+}
+
+/* Make message type display bold but without the IRC-style markers */
+.message-type {
+ font-weight: bold;
+}
+
+/* Tool call cards */
+.tool-call-cards-container {
+ display: flex;
+ flex-direction: column;
+ gap: 8px;
+ margin-top: 8px;
+}
+
+/* Commit message styling */
+.message.commit {
+ background-color: #f0f7ff;
+ border-left: 4px solid #0366d6;
+}
+
+.commits-container {
+ margin-top: 10px;
+ padding: 5px;
+}
+
+.commits-header {
+ font-weight: bold;
+ margin-bottom: 5px;
+ color: #24292e;
+}
+
+.commit-boxes-row {
+ display: flex;
+ flex-wrap: wrap;
+ gap: 8px;
+ margin-top: 8px;
+}
+
+.tool-call-card {
+ display: flex;
+ flex-direction: column;
+ border: 1px solid #ddd;
+ border-radius: 6px;
+ background-color: #f9f9f9;
+ overflow: hidden;
+ cursor: pointer;
+}
+
+/* Compact view (default) */
+.tool-call-compact-view {
+ display: flex;
+ align-items: center;
+ padding: 0px 6px;
+ gap: 8px;
+ background-color: #f9f9f9;
+ font-size: 0.9em;
+ white-space: nowrap;
+ overflow: visible; /* Don't hide overflow, we'll handle text truncation per element */
+ position: relative; /* For positioning the expand icon */
+}
+
+/* Expanded view (hidden by default) */
+.tool-call-card.collapsed .tool-call-expanded-view {
+ display: none;
+}
+
+.tool-call-expanded-view {
+ display: flex;
+ flex-direction: column;
+ border-top: 1px solid #eee;
+}
+
+.tool-call-header {
+ display: flex;
+ align-items: center;
+ justify-content: space-between;
+ padding: 6px 10px;
+ background-color: #f0f0f0;
+ border-bottom: 1px solid #ddd;
+ font-weight: bold;
+}
+
+.tool-call-name {
+ font-family: var(--monospace-font);
+ color: #0066cc;
+ font-weight: bold;
+}
+
+.tool-call-status {
+ margin-right: 4px;
+ min-width: 1em;
+ text-align: center;
+}
+
+.tool-call-status.spinner {
+ animation: spin 1s infinite linear;
+ display: inline-block;
+ width: 1em;
+}
+
+.tool-call-time {
+ margin-left: 8px;
+ font-size: 0.85em;
+ color: #666;
+ font-weight: normal;
+}
+
+.tool-call-input-preview {
+ color: #555;
+ font-family: var(--monospace-font);
+ overflow: hidden;
+ text-overflow: ellipsis;
+ white-space: nowrap;
+ max-width: 30%;
+ background-color: rgba(240, 240, 240, 0.5);
+ padding: 2px 5px;
+ border-radius: 3px;
+ font-size: 0.9em;
+}
+
+.tool-call-result-preview {
+ color: #28a745;
+ font-family: var(--monospace-font);
+ overflow: hidden;
+ text-overflow: ellipsis;
+ white-space: nowrap;
+ max-width: 40%;
+ background-color: rgba(240, 248, 240, 0.5);
+ padding: 2px 5px;
+ border-radius: 3px;
+ font-size: 0.9em;
+}
+
+.tool-call-expand-icon {
+ position: absolute;
+ right: 10px;
+ font-size: 0.8em;
+ color: #888;
+}
+
+.tool-call-input {
+ padding: 6px 10px;
+ border-bottom: 1px solid #eee;
+ font-family: var(--monospace-font);
+ font-size: 0.9em;
+ white-space: pre-wrap;
+ word-break: break-all;
+ background-color: #f5f5f5;
+}
+
+.tool-call-result {
+ padding: 6px 10px;
+ font-family: var(--monospace-font);
+ font-size: 0.9em;
+ white-space: pre-wrap;
+ max-height: 300px;
+ overflow-y: auto;
+}
+
+.tool-call-result pre {
+ margin: 0;
+ white-space: pre-wrap;
+}
+
+@keyframes spin {
+ 0% {
+ transform: rotate(0deg);
+ }
+ 100% {
+ transform: rotate(360deg);
+ }
+}
+
+/* Standalone tool messages (legacy/disconnected) */
+.tool-details.standalone .tool-header {
+ border-radius: 4px;
+ background-color: #fff3cd;
+ border-color: #ffeeba;
+}
+
+.tool-details.standalone .tool-warning {
+ margin-left: 10px;
+ font-size: 0.85em;
+ color: #856404;
+ font-style: italic;
+}
+
+/* Tool call expanded view with sections */
+.tool-call-section {
+ border-bottom: 1px solid #eee;
+}
+
+.tool-call-section:last-child {
+ border-bottom: none;
+}
+
+.tool-call-section-label {
+ display: flex;
+ justify-content: space-between;
+ align-items: center;
+ padding: 8px 10px;
+ background-color: #f5f5f5;
+ font-weight: bold;
+ font-size: 0.9em;
+}
+
+.tool-call-section-content {
+ padding: 0;
+}
+
+.tool-call-copy-btn {
+ background-color: #f0f0f0;
+ border: 1px solid #ddd;
+ border-radius: 4px;
+ padding: 2px 8px;
+ font-size: 0.8em;
+ cursor: pointer;
+ transition: background-color 0.2s;
+}
+
+.tool-call-copy-btn:hover {
+ background-color: #e0e0e0;
+}
+
+/* Override for tool call input in expanded view */
+.tool-call-section-content .tool-call-input {
+ margin: 0;
+ padding: 8px 10px;
+ border: none;
+ background-color: #fff;
+ max-height: 300px;
+ overflow-y: auto;
+}
+
+.title-container {
+ display: flex;
+ flex-direction: column;
+ max-width: 33%;
+ overflow: hidden;
+}
+
+.commit-box {
+ border: 1px solid #d1d5da;
+ border-radius: 4px;
+ overflow: hidden;
+ background-color: #ffffff;
+ box-shadow: 0 1px 3px rgba(0, 0, 0, 0.1);
+ max-width: 100%;
+ display: flex;
+ flex-direction: column;
+}
+
+.commit-preview {
+ padding: 8px 12px;
+ cursor: pointer;
+ font-family: monospace;
+ background-color: #f6f8fa;
+ border-bottom: 1px dashed #d1d5da;
+}
+
+.commit-preview:hover {
+ background-color: #eef2f6;
+}
+
+.commit-hash {
+ color: #0366d6;
+ font-weight: bold;
+}
+
+.commit-details {
+ padding: 8px 12px;
+ max-height: 200px;
+ overflow-y: auto;
+}
+
+.commit-details pre {
+ margin: 0;
+ white-space: pre-wrap;
+ word-break: break-word;
+}
+
+.commit-details.is-hidden {
+ display: none;
+}
+
+.pushed-branch {
+ color: #28a745;
+ font-weight: 500;
+ margin-left: 6px;
+}
+
+.commit-diff-button {
+ padding: 6px 12px;
+ border: 1px solid #ccc;
+ border-radius: 3px;
+ background-color: #f7f7f7;
+ color: #24292e;
+ font-size: 12px;
+ cursor: pointer;
+ transition: all 0.2s ease;
+ margin: 8px 12px;
+ display: block;
+}
+
+.commit-diff-button:hover {
+ background-color: #e7e7e7;
+ border-color: #aaa;
+}
+
+/* Hide views initially to prevent flash of content */
+.timeline-container .timeline,
+.timeline-container .diff-view,
+.timeline-container .chart-view,
+.timeline-container .terminal-view {
+ visibility: hidden;
+}
+
+/* Will be set by JavaScript once we know which view to display */
+.timeline-container.view-initialized .timeline,
+.timeline-container.view-initialized .diff-view,
+.timeline-container.view-initialized .chart-view,
+.timeline-container.view-initialized .terminal-view {
+ visibility: visible;
+}
+
+.markdown-content {
+ box-sizing: border-box;
+ min-width: 200px;
+ margin: 0 auto;
+}
+
+.markdown-content p {
+ margin-block-start: 0.5em;
+ margin-block-end: 0.5em
+}
\ No newline at end of file
diff --git a/loop/webui/src/timeline.html b/loop/webui/src/timeline.html
new file mode 100644
index 0000000..46144c1
--- /dev/null
+++ b/loop/webui/src/timeline.html
@@ -0,0 +1,158 @@
+<!DOCTYPE html>
+<html lang="en">
+ <head>
+ <meta charset="UTF-8" />
+ <meta name="viewport" content="width=device-width, initial-scale=1.0" />
+ <title>sketch coding assistant</title>
+ <!-- Import the diff2html CSS -->
+ <link rel="stylesheet" href="static/diff2html.min.css" />
+ <link rel="stylesheet" href="static/timeline.css" />
+ <link rel="stylesheet" href="static/diff2.css" />
+ <link rel="stylesheet" href="static/xterm.css" />
+ <link rel="stylesheet" href="static/tailwind.css" />
+ </head>
+ <body>
+ <div class="top-banner">
+ <div class="title-container">
+ <h1 class="banner-title">sketch coding assistant</h1>
+ <h2 id="chatTitle" class="chat-title"></h2>
+ </div>
+ <div class="info-grid">
+ <div class="info-item">
+ <a href="logs" class="text-blue-600 font-medium hover:text-blue-800 hover:underline">Logs</a>
+ </div>
+ <div class="info-item">
+ <a href="download" class="text-blue-600 font-medium hover:text-blue-800 hover:underline">Download</a>
+ </div>
+ <div class="info-item">
+ <span id="hostname" class="info-value">Loading...</span>
+ </div>
+ <div class="info-item">
+ <span id="workingDir" class="info-value">Loading...</span>
+ </div>
+ <div class="info-item">
+ <span class="info-label">Commit:</span>
+ <span id="initialCommit" class="info-value">Loading...</span>
+ </div>
+ <div class="info-item">
+ <span class="info-label">Msgs:</span>
+ <span id="messageCount" class="info-value">0</span>
+ </div>
+ <div class="info-item">
+ <span class="info-label">In:</span>
+ <span id="inputTokens" class="info-value">0</span>
+ </div>
+ <div class="info-item">
+ <span class="info-label">Cache Read:</span>
+ <span id="cacheReadInputTokens" class="info-value">0</span>
+ </div>
+ <div class="info-item">
+ <span class="info-label">Cache Create:</span>
+ <span id="cacheCreationInputTokens" class="info-value">0</span>
+ </div>
+ <div class="info-item">
+ <span class="info-label">Out:</span>
+ <span id="outputTokens" class="info-value">0</span>
+ </div>
+ <div class="info-item">
+ <span class="info-label">Cost:</span>
+ <span id="totalCost" class="info-value cost">$0.00</span>
+ </div>
+ </div>
+ <div class="refresh-control">
+ <div class="view-mode-buttons">
+ <button
+ id="showConversationButton"
+ class="emoji-button"
+ title="Conversation View"
+ >
+ š¬
+ </button>
+ <button
+ id="showDiff2Button"
+ class="emoji-button"
+ title="Diff View"
+ >
+ ±
+ </button>
+ <button
+ id="showChartsButton"
+ class="emoji-button"
+ title="Charts View"
+ >
+ š
+ </button>
+ <button
+ id="showTerminalButton"
+ class="emoji-button"
+ title="Terminal View"
+ >
+ š»
+ </button>
+ </div>
+ <button id="stopButton" class="refresh-button stop-button">Stop</button>
+ <div class="poll-updates">
+ <input type="checkbox" id="pollToggle" checked />
+ <label for="pollToggle">Poll</label>
+ </div>
+ <div class="status-container">
+ <span id="pollingIndicator" class="polling-indicator"></span>
+ <span id="statusText" class="status-text"></span>
+ </div>
+ </div>
+ </div>
+
+ <div class="timeline-container">
+ <div id="timeline" class="timeline empty"></div>
+ <div id="diff2View" class="diff-view" style="display: none">
+ <div id="diff2Container" class="diff-container">
+ <div id="diff-view-controls">
+ <div class="diff-view-format">
+ <label>
+ <input type="radio" name="diffViewFormat" value="side-by-side" checked> Side-by-side
+ </label>
+ <label>
+ <input type="radio" name="diffViewFormat" value="line-by-line"> Line-by-line
+ </label>
+ </div>
+ </div>
+ <div id="diff2htmlContent" class="diff2html-content"></div>
+ </div>
+ </div>
+ <div id="chartView" class="chart-view" style="display: none">
+ <div id="chartContainer" class="chart-container"></div>
+ </div>
+ <div id="terminalView" class="terminal-view" style="display: none">
+ <div id="terminalContainer" class="terminal-container"></div>
+ </div>
+ <div id="diffCommentBox" class="diff-comment-box" style="display: none">
+ <h3>Add a comment</h3>
+ <div class="selected-line">
+ Line:
+ <pre id="selectedLine"></pre>
+ </div>
+ <textarea
+ id="diffCommentInput"
+ placeholder="Enter your comment about this line..."
+ ></textarea>
+ <div class="diff-comment-buttons">
+ <button id="submitDiffComment">Add Comment</button>
+ <button id="cancelDiffComment">Cancel</button>
+ </div>
+ </div>
+ </div>
+
+ <div class="chat-container">
+ <div class="chat-input-wrapper">
+ <textarea
+ id="chatInput"
+ placeholder="Type your message here and press Enter to send..."
+ autofocus
+ ></textarea>
+ <button id="sendChatButton">Send</button>
+ </div>
+ </div>
+
+ <script src="static/timeline.js"></script>
+ </body>
+</html>
diff --git a/loop/webui/src/timeline.ts b/loop/webui/src/timeline.ts
new file mode 100644
index 0000000..eef2726
--- /dev/null
+++ b/loop/webui/src/timeline.ts
@@ -0,0 +1,641 @@
+import { TimelineMessage } from "./timeline/types";
+import { formatNumber } from "./timeline/utils";
+import { checkShouldScroll } from "./timeline/scroll";
+import { ChartManager } from "./timeline/charts";
+import { ConnectionStatus, DataManager } from "./timeline/data";
+import { DiffViewer } from "./timeline/diffviewer";
+import { MessageRenderer } from "./timeline/renderer";
+import { TerminalHandler } from "./timeline/terminal";
+
+/**
+ * TimelineManager - Class to manage the timeline UI and functionality
+ */
+class TimelineManager {
+ private diffViewer = new DiffViewer();
+ private terminalHandler = new TerminalHandler();
+ private chartManager = new ChartManager();
+ private messageRenderer = new MessageRenderer();
+ private dataManager = new DataManager();
+
+ private viewMode: "chat" | "diff2" | "charts" | "terminal" = "chat";
+ shouldScrollToBottom: boolean;
+
+ constructor() {
+ // Initialize when DOM is ready
+ document.addEventListener("DOMContentLoaded", () => {
+ // First initialize from URL params to prevent flash of incorrect view
+ // This must happen before setting up other event handlers
+ void this.initializeViewFromUrl()
+ .then(() => {
+ // Continue with the rest of initialization
+ return this.initialize();
+ })
+ .catch((err) => {
+ console.error("Failed to initialize timeline:", err);
+ });
+ });
+
+ // Add popstate event listener to handle browser back/forward navigation
+ window.addEventListener("popstate", (event) => {
+ if (event.state && event.state.mode) {
+ // Using void to handle the promise returned by toggleViewMode
+ void this.toggleViewMode(event.state.mode);
+ } else {
+ // If no state or no mode in state, default to chat view
+ void this.toggleViewMode("chat");
+ }
+ });
+
+ // Listen for commit diff event from MessageRenderer
+ document.addEventListener("showCommitDiff", ((e: CustomEvent) => {
+ const { commitHash } = e.detail;
+ this.diffViewer.showCommitDiff(
+ commitHash,
+ (mode: "chat" | "diff2" | "terminal" | "charts") =>
+ this.toggleViewMode(mode)
+ );
+ }) as EventListener);
+ }
+
+ /**
+ * Initialize the timeline manager
+ */
+ private async initialize(): Promise<void> {
+ // Set up data manager event listeners
+ this.dataManager.addEventListener(
+ "dataChanged",
+ this.handleDataChanged.bind(this)
+ );
+ this.dataManager.addEventListener(
+ "connectionStatusChanged",
+ this.handleConnectionStatusChanged.bind(this)
+ );
+
+ // Initialize the data manager
+ await this.dataManager.initialize();
+
+ // URL parameters have already been read in constructor
+ // to prevent flash of incorrect content
+
+ // Set up conversation button handler
+ document
+ .getElementById("showConversationButton")
+ ?.addEventListener("click", async () => {
+ this.toggleViewMode("chat");
+ });
+
+ // Set up diff2 button handler
+ document
+ .getElementById("showDiff2Button")
+ ?.addEventListener("click", async () => {
+ this.toggleViewMode("diff2");
+ });
+
+ // Set up charts button handler
+ document
+ .getElementById("showChartsButton")
+ ?.addEventListener("click", async () => {
+ this.toggleViewMode("charts");
+ });
+
+ // Set up terminal button handler
+ document
+ .getElementById("showTerminalButton")
+ ?.addEventListener("click", async () => {
+ this.toggleViewMode("terminal");
+ });
+
+ // The active button will be set by toggleViewMode
+ // We'll initialize view based on URL params or default to chat view if no params
+ // We defer button activation to the toggleViewMode function
+
+ // Set up stop button handler
+ document
+ .getElementById("stopButton")
+ ?.addEventListener("click", async () => {
+ this.stopInnerLoop();
+ });
+
+ const pollToggleCheckbox = document.getElementById(
+ "pollToggle"
+ ) as HTMLInputElement;
+ pollToggleCheckbox?.addEventListener("change", () => {
+ this.dataManager.setPollingEnabled(pollToggleCheckbox.checked);
+ const statusText = document.getElementById("statusText");
+ if (statusText) {
+ if (pollToggleCheckbox.checked) {
+ statusText.textContent = "Polling for updates...";
+ } else {
+ statusText.textContent = "Polling stopped";
+ }
+ }
+ });
+
+ // Initial data fetch and polling is now handled by the DataManager
+
+ // Set up chat functionality
+ this.setupChatBox();
+
+ // Set up keyboard shortcuts
+ this.setupKeyboardShortcuts();
+
+ // Set up spacing adjustments
+ this.adjustChatSpacing();
+ window.addEventListener("resize", () => this.adjustChatSpacing());
+ }
+
+ /**
+ * Set up chat box event listeners
+ */
+ private setupChatBox(): void {
+ const chatInput = document.getElementById(
+ "chatInput"
+ ) as HTMLTextAreaElement;
+ const sendButton = document.getElementById("sendChatButton");
+
+ // Handle pressing Enter in the text area
+ chatInput?.addEventListener("keydown", (event: KeyboardEvent) => {
+ // Send message if Enter is pressed without Shift key
+ if (event.key === "Enter" && !event.shiftKey) {
+ event.preventDefault(); // Prevent default newline
+ this.sendChatMessage();
+ }
+ });
+
+ // Handle send button click
+ sendButton?.addEventListener("click", () => this.sendChatMessage());
+
+ // Set up mutation observer for the chat container
+ if (chatInput) {
+ chatInput.addEventListener("input", () => {
+ // When content changes, adjust the spacing
+ requestAnimationFrame(() => this.adjustChatSpacing());
+ });
+ }
+ }
+
+ /**
+ * Send the chat message to the server
+ */
+ private async sendChatMessage(): Promise<void> {
+ const chatInput = document.getElementById(
+ "chatInput"
+ ) as HTMLTextAreaElement;
+ if (!chatInput) return;
+
+ const message = chatInput.value.trim();
+
+ // Don't send empty messages
+ if (!message) return;
+
+ try {
+ // Send the message to the server
+ const response = await fetch("chat", {
+ method: "POST",
+ headers: {
+ "Content-Type": "application/json",
+ },
+ body: JSON.stringify({ message }),
+ });
+
+ if (!response.ok) {
+ const errorData = await response.text();
+ throw new Error(`Server error: ${response.status} - ${errorData}`);
+ }
+
+ // Clear the input after sending
+ chatInput.value = "";
+
+ // Reset data manager state to force a full refresh after sending a message
+ // This ensures we get all messages in the correct order
+ // Use private API for now - TODO: add a resetState() method to DataManager
+ (this.dataManager as any).nextFetchIndex = 0;
+ (this.dataManager as any).currentFetchStartIndex = 0;
+
+ // If in diff view, switch to conversation view
+ if (this.viewMode === "diff2") {
+ await this.toggleViewMode("chat");
+ }
+
+ // Refresh the timeline data to show the new message
+ await this.dataManager.fetchData();
+ } catch (error) {
+ console.error("Error sending chat message:", error);
+ const statusText = document.getElementById("statusText");
+ if (statusText) {
+ statusText.textContent = "Error sending message";
+ }
+ }
+ }
+
+ /**
+ * Handle data changed event from the data manager
+ */
+ private handleDataChanged(eventData: {
+ state: any;
+ newMessages: TimelineMessage[];
+ isFirstFetch?: boolean;
+ }): void {
+ const { state, newMessages, isFirstFetch } = eventData;
+
+ // Check if we should scroll to bottom BEFORE handling new data
+ this.shouldScrollToBottom = this.checkShouldScroll();
+
+ // Update state info in the UI
+ this.updateUIWithState(state);
+
+ // Update the timeline if there are new messages
+ if (newMessages.length > 0) {
+ // Initialize the message renderer with current state
+ this.messageRenderer.initialize(
+ this.dataManager.getIsFirstLoad(),
+ this.dataManager.getCurrentFetchStartIndex()
+ );
+
+ this.messageRenderer.renderTimeline(newMessages, isFirstFetch || false);
+
+ // Update chart data using our full messages array
+ this.chartManager.setChartData(
+ this.chartManager.calculateCumulativeCostData(
+ this.dataManager.getMessages()
+ )
+ );
+
+ // If in charts view, update the charts
+ if (this.viewMode === "charts") {
+ this.chartManager.renderCharts();
+ }
+
+ const statusTextEl = document.getElementById("statusText");
+ if (statusTextEl) {
+ statusTextEl.textContent = "Updated just now";
+ }
+ } else {
+ const statusTextEl = document.getElementById("statusText");
+ if (statusTextEl) {
+ statusTextEl.textContent = "No new messages";
+ }
+ }
+ }
+
+ /**
+ * Handle connection status changed event from the data manager
+ */
+ private handleConnectionStatusChanged(
+ status: ConnectionStatus,
+ errorMessage?: string
+ ): void {
+ const pollingIndicator = document.getElementById("pollingIndicator");
+ if (!pollingIndicator) return;
+
+ // Remove all status classes
+ pollingIndicator.classList.remove("active", "error");
+
+ // Add appropriate class based on status
+ if (status === "connected") {
+ pollingIndicator.classList.add("active");
+ } else if (status === "disconnected") {
+ pollingIndicator.classList.add("error");
+ }
+
+ // Update status text if error message is provided
+ if (errorMessage) {
+ const statusTextEl = document.getElementById("statusText");
+ if (statusTextEl) {
+ statusTextEl.textContent = errorMessage;
+ }
+ }
+ }
+
+ /**
+ * Update UI elements with state data
+ */
+ private updateUIWithState(state: any): void {
+ // Update state info in the UI with safe getters
+ const hostnameEl = document.getElementById("hostname");
+ if (hostnameEl) {
+ hostnameEl.textContent = state?.hostname ?? "Unknown";
+ }
+
+ const workingDirEl = document.getElementById("workingDir");
+ if (workingDirEl) {
+ workingDirEl.textContent = state?.working_dir ?? "Unknown";
+ }
+
+ const initialCommitEl = document.getElementById("initialCommit");
+ if (initialCommitEl) {
+ initialCommitEl.textContent = state?.initial_commit
+ ? state.initial_commit.substring(0, 8)
+ : "Unknown";
+ }
+
+ const messageCountEl = document.getElementById("messageCount");
+ if (messageCountEl) {
+ messageCountEl.textContent = state?.message_count ?? "0";
+ }
+
+ const chatTitleEl = document.getElementById("chatTitle");
+ const bannerTitleEl = document.querySelector(".banner-title");
+
+ if (chatTitleEl && bannerTitleEl) {
+ if (state?.title) {
+ chatTitleEl.textContent = state.title;
+ chatTitleEl.style.display = "block";
+ bannerTitleEl.textContent = "sketch"; // Shorten title when chat title exists
+ } else {
+ chatTitleEl.style.display = "none";
+ bannerTitleEl.textContent = "sketch coding assistant"; // Full title when no chat title
+ }
+ }
+
+ // Get token and cost info safely
+ const inputTokens = state?.total_usage?.input_tokens ?? 0;
+ const outputTokens = state?.total_usage?.output_tokens ?? 0;
+ const cacheReadInputTokens =
+ state?.total_usage?.cache_read_input_tokens ?? 0;
+ const cacheCreationInputTokens =
+ state?.total_usage?.cache_creation_input_tokens ?? 0;
+ const totalCost = state?.total_usage?.total_cost_usd ?? 0;
+
+ const inputTokensEl = document.getElementById("inputTokens");
+ if (inputTokensEl) {
+ inputTokensEl.textContent = formatNumber(inputTokens, "0");
+ }
+
+ const outputTokensEl = document.getElementById("outputTokens");
+ if (outputTokensEl) {
+ outputTokensEl.textContent = formatNumber(outputTokens, "0");
+ }
+
+ const cacheReadInputTokensEl = document.getElementById(
+ "cacheReadInputTokens"
+ );
+ if (cacheReadInputTokensEl) {
+ cacheReadInputTokensEl.textContent = formatNumber(
+ cacheReadInputTokens,
+ "0"
+ );
+ }
+
+ const cacheCreationInputTokensEl = document.getElementById(
+ "cacheCreationInputTokens"
+ );
+ if (cacheCreationInputTokensEl) {
+ cacheCreationInputTokensEl.textContent = formatNumber(
+ cacheCreationInputTokens,
+ "0"
+ );
+ }
+
+ const totalCostEl = document.getElementById("totalCost");
+ if (totalCostEl) {
+ totalCostEl.textContent = `$${totalCost.toFixed(2)}`;
+ }
+ }
+
+ /**
+ * Check if we should scroll to the bottom
+ */
+ private checkShouldScroll(): boolean {
+ return checkShouldScroll(this.dataManager.getIsFirstLoad());
+ }
+
+ /**
+ * Dynamically adjust body padding based on the chat container height and top banner
+ */
+ private adjustChatSpacing(): void {
+ const chatContainer = document.querySelector(".chat-container");
+ const topBanner = document.querySelector(".top-banner");
+
+ if (chatContainer) {
+ const chatHeight = (chatContainer as HTMLElement).offsetHeight;
+ document.body.style.paddingBottom = `${chatHeight + 20}px`; // 20px extra for spacing
+ }
+
+ if (topBanner) {
+ const topHeight = (topBanner as HTMLElement).offsetHeight;
+ document.body.style.paddingTop = `${topHeight + 20}px`; // 20px extra for spacing
+ }
+ }
+
+ /**
+ * Set up keyboard shortcuts
+ */
+ private setupKeyboardShortcuts(): void {
+ // Add keyboard shortcut to automatically copy selected text with Ctrl+C (or Command+C on Mac)
+ document.addEventListener("keydown", (e: KeyboardEvent) => {
+ // We only want to handle Ctrl+C or Command+C
+ if ((e.ctrlKey || e.metaKey) && e.key === "c") {
+ // If text is already selected, we don't need to do anything special
+ // as the browser's default behavior will handle copying
+ // But we could add additional behavior here if needed
+ }
+ });
+ }
+
+ /**
+ * Toggle between different view modes: chat, diff2, charts
+ */
+ public async toggleViewMode(
+ mode: "chat" | "diff2" | "charts" | "terminal"
+ ): Promise<void> {
+ // Set the new view mode
+ this.viewMode = mode;
+
+ // Update URL with the current view mode
+ this.updateUrlForViewMode(mode);
+
+ // Get DOM elements
+ const timeline = document.getElementById("timeline");
+ const diff2View = document.getElementById("diff2View");
+ const chartView = document.getElementById("chartView");
+ const container = document.querySelector(".timeline-container");
+ const terminalView = document.getElementById("terminalView");
+ const conversationButton = document.getElementById(
+ "showConversationButton"
+ );
+ const diff2Button = document.getElementById("showDiff2Button");
+ const chartsButton = document.getElementById("showChartsButton");
+ const terminalButton = document.getElementById("showTerminalButton");
+
+ if (
+ !timeline ||
+ !diff2View ||
+ !chartView ||
+ !container ||
+ !conversationButton ||
+ !diff2Button ||
+ !chartsButton ||
+ !terminalView ||
+ !terminalButton
+ ) {
+ console.error("Required DOM elements not found");
+ return;
+ }
+
+ // Hide all views first
+ timeline.style.display = "none";
+ diff2View.style.display = "none";
+ chartView.style.display = "none";
+ terminalView.style.display = "none";
+
+ // Reset all button states
+ conversationButton.classList.remove("active");
+ diff2Button.classList.remove("active");
+ chartsButton.classList.remove("active");
+ terminalButton.classList.remove("active");
+
+ // Remove diff2-active and diff-active classes from container
+ container.classList.remove("diff2-active");
+ container.classList.remove("diff-active");
+
+ // If switching to chat view, clear the current commit hash
+ if (mode === "chat") {
+ this.diffViewer.clearCurrentCommitHash();
+ }
+
+ // Add class to indicate views are initialized (prevents flash of content)
+ container.classList.add("view-initialized");
+
+ // Show the selected view based on mode
+ switch (mode) {
+ case "chat":
+ timeline.style.display = "block";
+ conversationButton.classList.add("active");
+ break;
+ case "diff2":
+ diff2View.style.display = "block";
+ diff2Button.classList.add("active");
+ this.diffViewer.setViewMode(mode); // Update view mode in diff viewer
+ await this.diffViewer.loadDiff2HtmlContent();
+ break;
+ case "charts":
+ chartView.style.display = "block";
+ chartsButton.classList.add("active");
+ await this.chartManager.renderCharts();
+ break;
+ case "terminal":
+ terminalView.style.display = "block";
+ terminalButton.classList.add("active");
+ this.terminalHandler.setViewMode(mode); // Update view mode in terminal handler
+ this.diffViewer.setViewMode(mode); // Update view mode in diff viewer
+ await this.initializeTerminal();
+ break;
+ }
+ }
+
+ /**
+ * Initialize the terminal view
+ */
+ private async initializeTerminal(): Promise<void> {
+ // Use the TerminalHandler to initialize the terminal
+ await this.terminalHandler.initializeTerminal();
+ }
+
+ /**
+ * Initialize the view based on URL parameters
+ * This allows bookmarking and sharing of specific views
+ */
+ private async initializeViewFromUrl(): Promise<void> {
+ // Parse the URL parameters
+ const urlParams = new URLSearchParams(window.location.search);
+ const viewParam = urlParams.get("view");
+ const commitParam = urlParams.get("commit");
+
+ // Default to chat view if no valid view parameter is provided
+ if (!viewParam) {
+ // Explicitly set chat view to ensure button state is correct
+ await this.toggleViewMode("chat");
+ return;
+ }
+
+ // Check if the view parameter is valid
+ if (
+ viewParam === "chat" ||
+ viewParam === "diff2" ||
+ viewParam === "charts" ||
+ viewParam === "terminal"
+ ) {
+ // If it's a diff view with a commit hash, set the commit hash
+ if (viewParam === "diff2" && commitParam) {
+ this.diffViewer.setCurrentCommitHash(commitParam);
+ }
+
+ // Set the view mode
+ await this.toggleViewMode(
+ viewParam as "chat" | "diff2" | "charts" | "terminal"
+ );
+ }
+ }
+
+ /**
+ * Update URL to reflect current view mode for bookmarking and sharing
+ * @param mode The current view mode
+ */
+ private updateUrlForViewMode(
+ mode: "chat" | "diff2" | "charts" | "terminal"
+ ): void {
+ // Get the current URL without search parameters
+ const url = new URL(window.location.href);
+
+ // Clear existing parameters
+ url.search = "";
+
+ // Only add view parameter if not in default chat view
+ if (mode !== "chat") {
+ url.searchParams.set("view", mode);
+
+ // If in diff view and there's a commit hash, include that too
+ if (mode === "diff2" && this.diffViewer.getCurrentCommitHash()) {
+ url.searchParams.set("commit", this.diffViewer.getCurrentCommitHash());
+ }
+ }
+
+ // Update the browser history without reloading the page
+ window.history.pushState({ mode }, "", url.toString());
+ }
+
+ /**
+ * Stop the inner loop by calling the /cancel endpoint
+ */
+ private async stopInnerLoop(): Promise<void> {
+ if (!confirm("Are you sure you want to stop the current operation?")) {
+ return;
+ }
+
+ try {
+ const statusText = document.getElementById("statusText");
+ if (statusText) {
+ statusText.textContent = "Cancelling...";
+ }
+
+ const response = await fetch("cancel", {
+ method: "POST",
+ headers: {
+ "Content-Type": "application/json",
+ },
+ body: JSON.stringify({ reason: "User requested cancellation via UI" }),
+ });
+
+ if (!response.ok) {
+ const errorData = await response.text();
+ throw new Error(`Server error: ${response.status} - ${errorData}`);
+ }
+
+ // Parse the response
+ const _result = await response.json();
+ if (statusText) {
+ statusText.textContent = "Operation cancelled";
+ }
+ } catch (error) {
+ console.error("Error cancelling operation:", error);
+ const statusText = document.getElementById("statusText");
+ if (statusText) {
+ statusText.textContent = "Error cancelling operation";
+ }
+ }
+ }
+}
+
+// Create and initialize the timeline manager when the page loads
+const _timelineManager = new TimelineManager();
diff --git a/loop/webui/src/timeline/charts.ts b/loop/webui/src/timeline/charts.ts
new file mode 100644
index 0000000..0ed56e8
--- /dev/null
+++ b/loop/webui/src/timeline/charts.ts
@@ -0,0 +1,468 @@
+import type { TimelineMessage } from "./types";
+import vegaEmbed from "vega-embed";
+import { TopLevelSpec } from "vega-lite";
+
+/**
+ * ChartManager handles all chart-related functionality for the timeline.
+ * This includes rendering charts, calculating data, and managing chart state.
+ */
+export class ChartManager {
+ private chartData: { timestamp: Date; cost: number }[] = [];
+
+ /**
+ * Create a new ChartManager instance
+ */
+ constructor() {
+ this.chartData = [];
+ }
+
+ /**
+ * Calculate cumulative cost data from messages
+ */
+ public calculateCumulativeCostData(
+ messages: TimelineMessage[],
+ ): { timestamp: Date; cost: number }[] {
+ if (!messages || messages.length === 0) {
+ return [];
+ }
+
+ let cumulativeCost = 0;
+ const data: { timestamp: Date; cost: number }[] = [];
+
+ for (const message of messages) {
+ if (message.timestamp && message.usage && message.usage.cost_usd) {
+ const timestamp = new Date(message.timestamp);
+ cumulativeCost += message.usage.cost_usd;
+
+ data.push({
+ timestamp,
+ cost: cumulativeCost,
+ });
+ }
+ }
+
+ return data;
+ }
+
+ /**
+ * Get the current chart data
+ */
+ public getChartData(): { timestamp: Date; cost: number }[] {
+ return this.chartData;
+ }
+
+ /**
+ * Set chart data
+ */
+ public setChartData(data: { timestamp: Date; cost: number }[]): void {
+ this.chartData = data;
+ }
+
+ /**
+ * Fetch all messages to generate chart data
+ */
+ public async fetchAllMessages(): Promise<void> {
+ try {
+ // Fetch all messages in a single request
+ const response = await fetch("messages");
+ if (!response.ok) {
+ throw new Error(`Failed to fetch messages: ${response.status}`);
+ }
+
+ const allMessages = await response.json();
+ if (Array.isArray(allMessages)) {
+ // Sort messages chronologically
+ allMessages.sort((a, b) => {
+ const dateA = a.timestamp ? new Date(a.timestamp).getTime() : 0;
+ const dateB = b.timestamp ? new Date(b.timestamp).getTime() : 0;
+ return dateA - dateB;
+ });
+
+ // Calculate cumulative cost data
+ this.chartData = this.calculateCumulativeCostData(allMessages);
+ }
+ } catch (error) {
+ console.error("Error fetching messages for chart:", error);
+ this.chartData = [];
+ }
+ }
+
+ /**
+ * Render all charts in the chart view
+ */
+ public async renderCharts(): Promise<void> {
+ const chartContainer = document.getElementById("chartContainer");
+ if (!chartContainer) return;
+
+ try {
+ // Show loading state
+ chartContainer.innerHTML = "<div class='loader'></div>";
+
+ // Fetch messages if necessary
+ if (this.chartData.length === 0) {
+ await this.fetchAllMessages();
+ }
+
+ // Clear the container for multiple charts
+ chartContainer.innerHTML = "";
+
+ // Create cost chart container
+ const costChartDiv = document.createElement("div");
+ costChartDiv.className = "chart-section";
+ costChartDiv.innerHTML =
+ "<h3>Dollar Usage Over Time</h3><div id='costChart'></div>";
+ chartContainer.appendChild(costChartDiv);
+
+ // Create messages chart container
+ const messagesChartDiv = document.createElement("div");
+ messagesChartDiv.className = "chart-section";
+ messagesChartDiv.innerHTML =
+ "<h3>Message Timeline</h3><div id='messagesChart'></div>";
+ chartContainer.appendChild(messagesChartDiv);
+
+ // Render both charts
+ await this.renderDollarUsageChart();
+ await this.renderMessagesChart();
+ } catch (error) {
+ console.error("Error rendering charts:", error);
+ chartContainer.innerHTML = `<p>Error rendering charts: ${error instanceof Error ? error.message : "Unknown error"}</p>`;
+ }
+ }
+
+ /**
+ * Render the dollar usage chart using Vega-Lite
+ */
+ private async renderDollarUsageChart(): Promise<void> {
+ const costChartContainer = document.getElementById("costChart");
+ if (!costChartContainer) return;
+
+ try {
+ // Display cost chart using Vega-Lite
+ if (this.chartData.length === 0) {
+ costChartContainer.innerHTML =
+ "<p>No cost data available to display.</p>";
+ return;
+ }
+
+ // Create a Vega-Lite spec for the line chart
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
+ const costSpec: any = {
+ $schema: "https://vega.github.io/schema/vega-lite/v5.json",
+ description: "Cumulative cost over time",
+ width: "container",
+ height: 300,
+ data: {
+ values: this.chartData.map((d) => ({
+ timestamp: d.timestamp.toISOString(),
+ cost: d.cost,
+ })),
+ },
+ mark: {
+ type: "line",
+ point: true,
+ },
+ encoding: {
+ x: {
+ field: "timestamp",
+ type: "temporal",
+ title: "Time",
+ axis: {
+ format: "%H:%M:%S",
+ title: "Time",
+ labelAngle: -45,
+ },
+ },
+ y: {
+ field: "cost",
+ type: "quantitative",
+ title: "Cumulative Cost (USD)",
+ axis: {
+ format: "$,.4f",
+ },
+ },
+ tooltip: [
+ {
+ field: "timestamp",
+ type: "temporal",
+ title: "Time",
+ format: "%Y-%m-%d %H:%M:%S",
+ },
+ {
+ field: "cost",
+ type: "quantitative",
+ title: "Cumulative Cost",
+ format: "$,.4f",
+ },
+ ],
+ },
+ };
+
+ // Render the cost chart
+ await vegaEmbed(costChartContainer, costSpec, {
+ actions: true,
+ renderer: "svg",
+ });
+ } catch (error) {
+ console.error("Error rendering dollar usage chart:", error);
+ costChartContainer.innerHTML = `<p>Error rendering dollar usage chart: ${error instanceof Error ? error.message : "Unknown error"}</p>`;
+ }
+ }
+
+ /**
+ * Render the messages timeline chart using Vega-Lite
+ */
+ private async renderMessagesChart(): Promise<void> {
+ const messagesChartContainer = document.getElementById("messagesChart");
+ if (!messagesChartContainer) return;
+
+ try {
+ // Get all messages
+ const response = await fetch("messages");
+ if (!response.ok) {
+ throw new Error(`Failed to fetch messages: ${response.status}`);
+ }
+
+ const allMessages = await response.json();
+ if (!Array.isArray(allMessages) || allMessages.length === 0) {
+ messagesChartContainer.innerHTML =
+ "<p>No messages available to display.</p>";
+ return;
+ }
+
+ // Sort messages chronologically
+ allMessages.sort((a, b) => {
+ const dateA = a.timestamp ? new Date(a.timestamp).getTime() : 0;
+ const dateB = b.timestamp ? new Date(b.timestamp).getTime() : 0;
+ return dateA - dateB;
+ });
+
+ // Create unique indexes for all messages
+ const messageIndexMap = new Map<string, number>();
+ allMessages.forEach((msg, index) => {
+ // Create a unique ID for each message to track its position
+ const msgId = msg.timestamp ? msg.timestamp.toString() : `msg-${index}`;
+ messageIndexMap.set(msgId, index);
+ });
+
+ // Prepare data for messages with start_time and end_time (bar marks)
+ const barData = allMessages
+ .filter((msg) => msg.start_time && msg.end_time) // Only include messages with explicit start and end times
+ .map((msg) => {
+ // Parse start and end times
+ const startTime = new Date(msg.start_time!);
+ const endTime = new Date(msg.end_time!);
+
+ // Get the index for this message
+ const msgId = msg.timestamp ? msg.timestamp.toString() : "";
+ const index = messageIndexMap.get(msgId) || 0;
+
+ // Truncate content for tooltip readability
+ const displayContent = msg.content
+ ? msg.content.length > 100
+ ? msg.content.substring(0, 100) + "..."
+ : msg.content
+ : "No content";
+
+ // Prepare tool input and output for tooltip if applicable
+ const toolInput = msg.input
+ ? msg.input.length > 100
+ ? msg.input.substring(0, 100) + "..."
+ : msg.input
+ : "";
+
+ const toolResult = msg.tool_result
+ ? msg.tool_result.length > 100
+ ? msg.tool_result.substring(0, 100) + "..."
+ : msg.tool_result
+ : "";
+
+ return {
+ index: index,
+ message_type: msg.type,
+ content: displayContent,
+ tool_name: msg.tool_name || "",
+ tool_input: toolInput,
+ tool_result: toolResult,
+ start_time: startTime.toISOString(),
+ end_time: endTime.toISOString(),
+ message: JSON.stringify(msg, null, 2), // Full message for detailed inspection
+ };
+ });
+
+ // Prepare data for messages with timestamps only (point marks)
+ const pointData = allMessages
+ .filter((msg) => msg.timestamp && !(msg.start_time && msg.end_time)) // Only messages with timestamp but without start/end times
+ .map((msg) => {
+ // Get the timestamp
+ const timestamp = new Date(msg.timestamp!);
+
+ // Get the index for this message
+ const msgId = msg.timestamp ? msg.timestamp.toString() : "";
+ const index = messageIndexMap.get(msgId) || 0;
+
+ // Truncate content for tooltip readability
+ const displayContent = msg.content
+ ? msg.content.length > 100
+ ? msg.content.substring(0, 100) + "..."
+ : msg.content
+ : "No content";
+
+ // Prepare tool input and output for tooltip if applicable
+ const toolInput = msg.input
+ ? msg.input.length > 100
+ ? msg.input.substring(0, 100) + "..."
+ : msg.input
+ : "";
+
+ const toolResult = msg.tool_result
+ ? msg.tool_result.length > 100
+ ? msg.tool_result.substring(0, 100) + "..."
+ : msg.tool_result
+ : "";
+
+ return {
+ index: index,
+ message_type: msg.type,
+ content: displayContent,
+ tool_name: msg.tool_name || "",
+ tool_input: toolInput,
+ tool_result: toolResult,
+ time: timestamp.toISOString(),
+ message: JSON.stringify(msg, null, 2), // Full message for detailed inspection
+ };
+ });
+
+ // Check if we have any data to display
+ if (barData.length === 0 && pointData.length === 0) {
+ messagesChartContainer.innerHTML =
+ "<p>No message timing data available to display.</p>";
+ return;
+ }
+
+ // Calculate height based on number of unique messages
+ const chartHeight = 20 * Math.min(allMessages.length, 25); // Max 25 visible at once
+
+ // Create a layered Vega-Lite spec combining bars and points
+ const messagesSpec: TopLevelSpec = {
+ $schema: "https://vega.github.io/schema/vega-lite/v5.json",
+ description: "Message Timeline",
+ width: "container",
+ height: chartHeight,
+ layer: [],
+ };
+
+ // Add bar layer if we have bar data
+ if (barData.length > 0) {
+ messagesSpec.layer.push({
+ data: { values: barData },
+ mark: {
+ type: "bar",
+ height: 16,
+ },
+ encoding: {
+ x: {
+ field: "start_time",
+ type: "temporal",
+ title: "Time",
+ axis: {
+ format: "%H:%M:%S",
+ title: "Time",
+ labelAngle: -45,
+ },
+ },
+ x2: { field: "end_time" },
+ y: {
+ field: "index",
+ type: "ordinal",
+ title: "Message Index",
+ axis: {
+ grid: true,
+ },
+ },
+ color: {
+ field: "message_type",
+ type: "nominal",
+ title: "Message Type",
+ legend: {},
+ },
+ tooltip: [
+ { field: "message_type", type: "nominal", title: "Type" },
+ { field: "tool_name", type: "nominal", title: "Tool" },
+ {
+ field: "start_time",
+ type: "temporal",
+ title: "Start Time",
+ format: "%H:%M:%S.%L",
+ },
+ {
+ field: "end_time",
+ type: "temporal",
+ title: "End Time",
+ format: "%H:%M:%S.%L",
+ },
+ { field: "content", type: "nominal", title: "Content" },
+ { field: "tool_input", type: "nominal", title: "Tool Input" },
+ { field: "tool_result", type: "nominal", title: "Tool Result" },
+ ],
+ },
+ });
+ }
+
+ // Add point layer if we have point data
+ if (pointData.length > 0) {
+ messagesSpec.layer.push({
+ data: { values: pointData },
+ mark: {
+ type: "point",
+ size: 100,
+ filled: true,
+ },
+ encoding: {
+ x: {
+ field: "time",
+ type: "temporal",
+ title: "Time",
+ axis: {
+ format: "%H:%M:%S",
+ title: "Time",
+ labelAngle: -45,
+ },
+ },
+ y: {
+ field: "index",
+ type: "ordinal",
+ title: "Message Index",
+ },
+ color: {
+ field: "message_type",
+ type: "nominal",
+ title: "Message Type",
+ },
+ tooltip: [
+ { field: "message_type", type: "nominal", title: "Type" },
+ { field: "tool_name", type: "nominal", title: "Tool" },
+ {
+ field: "time",
+ type: "temporal",
+ title: "Timestamp",
+ format: "%H:%M:%S.%L",
+ },
+ { field: "content", type: "nominal", title: "Content" },
+ { field: "tool_input", type: "nominal", title: "Tool Input" },
+ { field: "tool_result", type: "nominal", title: "Tool Result" },
+ ],
+ },
+ });
+ }
+
+ // Render the messages timeline chart
+ await vegaEmbed(messagesChartContainer, messagesSpec, {
+ actions: true,
+ renderer: "svg",
+ });
+ } catch (error) {
+ console.error("Error rendering messages chart:", error);
+ messagesChartContainer.innerHTML = `<p>Error rendering messages chart: ${error instanceof Error ? error.message : "Unknown error"}</p>`;
+ }
+ }
+}
diff --git a/loop/webui/src/timeline/commits.ts b/loop/webui/src/timeline/commits.ts
new file mode 100644
index 0000000..f4303f2
--- /dev/null
+++ b/loop/webui/src/timeline/commits.ts
@@ -0,0 +1,90 @@
+/**
+ * Utility functions for rendering commit messages in the timeline
+ */
+
+import { escapeHTML } from "./utils";
+
+interface Commit {
+ hash: string;
+ subject: string;
+ body: string;
+ pushed_branch?: string;
+}
+
+/**
+ * Create HTML elements to display commits in the timeline
+ * @param commits List of commit information to display
+ * @param diffViewerCallback Callback function to show commit diff when requested
+ * @returns The created HTML container element with commit information
+ */
+export function createCommitsContainer(
+ commits: Commit[],
+ diffViewerCallback: (commitHash: string) => void
+): HTMLElement {
+ const commitsContainer = document.createElement("div");
+ commitsContainer.className = "commits-container";
+
+ // Create a header for commits
+ const commitsHeaderRow = document.createElement("div");
+ commitsHeaderRow.className = "commits-header";
+ commitsHeaderRow.textContent = `${commits.length} new commit${commits.length > 1 ? "s" : ""} detected`;
+ commitsContainer.appendChild(commitsHeaderRow);
+
+ // Create a row for commit boxes
+ const commitBoxesRow = document.createElement("div");
+ commitBoxesRow.className = "commit-boxes-row";
+
+ // Add each commit as a box
+ commits.forEach((commit) => {
+ // Create the commit box
+ const commitBox = document.createElement("div");
+ commitBox.className = "commit-box";
+
+ // Show commit hash and subject line as the preview
+ const commitPreview = document.createElement("div");
+ commitPreview.className = "commit-preview";
+
+ // Include pushed branch information if available
+ let previewHTML = `<span class="commit-hash">${commit.hash.substring(0, 8)}</span> ${escapeHTML(commit.subject)}`;
+ if (commit.pushed_branch) {
+ previewHTML += ` <span class="pushed-branch">→ pushed to ${escapeHTML(commit.pushed_branch)}</span>`;
+ }
+
+ commitPreview.innerHTML = previewHTML;
+ commitBox.appendChild(commitPreview);
+
+ // Create expandable view for commit details
+ const expandedView = document.createElement("div");
+ expandedView.className = "commit-details is-hidden";
+ expandedView.innerHTML = `<pre>${escapeHTML(commit.body)}</pre>`;
+ commitBox.appendChild(expandedView);
+
+ // Toggle visibility of expanded view when clicking the preview
+ commitPreview.addEventListener("click", (event) => {
+ // If holding Ctrl/Cmd key, show diff for this commit
+ if (event.ctrlKey || event.metaKey) {
+ // Call the diff viewer callback with the commit hash
+ diffViewerCallback(commit.hash);
+ } else {
+ // Normal behavior - toggle expanded view
+ expandedView.classList.toggle("is-hidden");
+ }
+ });
+
+ // Add a diff button to view commit changes
+ const diffButton = document.createElement("button");
+ diffButton.className = "commit-diff-button";
+ diffButton.textContent = "View Changes";
+ diffButton.addEventListener("click", (event) => {
+ event.stopPropagation(); // Prevent triggering the parent click event
+ diffViewerCallback(commit.hash);
+ });
+ // Add the button directly to the commit box
+ commitBox.appendChild(diffButton);
+
+ commitBoxesRow.appendChild(commitBox);
+ });
+
+ commitsContainer.appendChild(commitBoxesRow);
+ return commitsContainer;
+}
diff --git a/loop/webui/src/timeline/components/collapsible.ts b/loop/webui/src/timeline/components/collapsible.ts
new file mode 100644
index 0000000..12f90ec
--- /dev/null
+++ b/loop/webui/src/timeline/components/collapsible.ts
@@ -0,0 +1,37 @@
+import { TimelineMessage } from "../types";
+
+/**
+ * Adds collapsible functionality to long content elements.
+ * This creates a toggle button that allows users to expand/collapse long text content.
+ *
+ * @param message - The timeline message containing the content
+ * @param textEl - The DOM element containing the text content
+ * @param containerEl - The container element for the text and copy button
+ * @param contentEl - The outer content element that will contain everything
+ */
+export function addCollapsibleFunctionality(
+ message: TimelineMessage,
+ textEl: HTMLElement,
+ containerEl: HTMLElement,
+ contentEl: HTMLElement
+): void {
+ // Don't collapse end_of_turn messages (final output) regardless of length
+ if (message.content.length > 1000 && !message.end_of_turn) {
+ textEl.classList.add("collapsed");
+
+ const toggleButton = document.createElement("button");
+ toggleButton.className = "collapsible";
+ toggleButton.textContent = "Show more...";
+ toggleButton.addEventListener("click", () => {
+ textEl.classList.toggle("collapsed");
+ toggleButton.textContent = textEl.classList.contains("collapsed")
+ ? "Show more..."
+ : "Show less";
+ });
+
+ contentEl.appendChild(containerEl);
+ contentEl.appendChild(toggleButton);
+ } else {
+ contentEl.appendChild(containerEl);
+ }
+}
diff --git a/loop/webui/src/timeline/copybutton.ts b/loop/webui/src/timeline/copybutton.ts
new file mode 100644
index 0000000..d9b994b
--- /dev/null
+++ b/loop/webui/src/timeline/copybutton.ts
@@ -0,0 +1,44 @@
+/**
+ * Creates a copy button container with a functioning copy button
+ */
+export function createCopyButton(textToCopy: string): {
+ container: HTMLDivElement;
+ button: HTMLButtonElement;
+} {
+ // Create container for the copy button
+ const copyButtonContainer = document.createElement("div");
+ copyButtonContainer.className = "message-actions";
+
+ // Create the copy button itself
+ const copyButton = document.createElement("button");
+ copyButton.className = "copy-button";
+ copyButton.textContent = "Copy";
+ copyButton.title = "Copy text to clipboard";
+
+ // Add click event listener to handle copying
+ copyButton.addEventListener("click", (e) => {
+ e.stopPropagation();
+ navigator.clipboard
+ .writeText(textToCopy)
+ .then(() => {
+ copyButton.textContent = "Copied!";
+ setTimeout(() => {
+ copyButton.textContent = "Copy";
+ }, 2000);
+ })
+ .catch((err) => {
+ console.error("Failed to copy text: ", err);
+ copyButton.textContent = "Failed";
+ setTimeout(() => {
+ copyButton.textContent = "Copy";
+ }, 2000);
+ });
+ });
+
+ copyButtonContainer.appendChild(copyButton);
+
+ return {
+ container: copyButtonContainer,
+ button: copyButton
+ };
+}
diff --git a/loop/webui/src/timeline/data.ts b/loop/webui/src/timeline/data.ts
new file mode 100644
index 0000000..2130c21
--- /dev/null
+++ b/loop/webui/src/timeline/data.ts
@@ -0,0 +1,379 @@
+import { TimelineMessage } from "./types";
+import { formatNumber } from "./utils";
+
+/**
+ * Event types for data manager
+ */
+export type DataManagerEventType = 'dataChanged' | 'connectionStatusChanged';
+
+/**
+ * Connection status types
+ */
+export type ConnectionStatus = 'connected' | 'disconnected' | 'disabled';
+
+/**
+ * State interface
+ */
+export interface TimelineState {
+ hostname?: string;
+ working_dir?: string;
+ initial_commit?: string;
+ message_count?: number;
+ title?: string;
+ total_usage?: {
+ input_tokens: number;
+ output_tokens: number;
+ cache_read_input_tokens: number;
+ cache_creation_input_tokens: number;
+ total_cost_usd: number;
+ };
+}
+
+/**
+ * DataManager - Class to manage timeline data, fetching, and polling
+ */
+export class DataManager {
+ // State variables
+ private lastMessageCount: number = 0;
+ private nextFetchIndex: number = 0;
+ private currentFetchStartIndex: number = 0;
+ private currentPollController: AbortController | null = null;
+ private isFetchingMessages: boolean = false;
+ private isPollingEnabled: boolean = true;
+ private isFirstLoad: boolean = true;
+ private connectionStatus: ConnectionStatus = "disabled";
+ private messages: TimelineMessage[] = [];
+ private timelineState: TimelineState | null = null;
+
+ // Event listeners
+ private eventListeners: Map<DataManagerEventType, Array<(...args: any[]) => void>> = new Map();
+
+ constructor() {
+ // Initialize empty arrays for each event type
+ this.eventListeners.set('dataChanged', []);
+ this.eventListeners.set('connectionStatusChanged', []);
+ }
+
+ /**
+ * Initialize the data manager and fetch initial data
+ */
+ public async initialize(): Promise<void> {
+ try {
+ // Initial data fetch
+ await this.fetchData();
+ // Start polling for updates only if initial fetch succeeds
+ this.startPolling();
+ } catch (error) {
+ console.error("Initial data fetch failed, will retry via polling", error);
+ // Still start polling to recover
+ this.startPolling();
+ }
+ }
+
+ /**
+ * Get all messages
+ */
+ public getMessages(): TimelineMessage[] {
+ return this.messages;
+ }
+
+ /**
+ * Get the current state
+ */
+ public getState(): TimelineState | null {
+ return this.timelineState;
+ }
+
+ /**
+ * Get the connection status
+ */
+ public getConnectionStatus(): ConnectionStatus {
+ return this.connectionStatus;
+ }
+
+ /**
+ * Get the isFirstLoad flag
+ */
+ public getIsFirstLoad(): boolean {
+ return this.isFirstLoad;
+ }
+
+ /**
+ * Get the currentFetchStartIndex
+ */
+ public getCurrentFetchStartIndex(): number {
+ return this.currentFetchStartIndex;
+ }
+
+ /**
+ * Add an event listener
+ */
+ public addEventListener(event: DataManagerEventType, callback: (...args: any[]) => void): void {
+ const listeners = this.eventListeners.get(event) || [];
+ listeners.push(callback);
+ this.eventListeners.set(event, listeners);
+ }
+
+ /**
+ * Remove an event listener
+ */
+ public removeEventListener(event: DataManagerEventType, callback: (...args: any[]) => void): void {
+ const listeners = this.eventListeners.get(event) || [];
+ const index = listeners.indexOf(callback);
+ if (index !== -1) {
+ listeners.splice(index, 1);
+ this.eventListeners.set(event, listeners);
+ }
+ }
+
+ /**
+ * Emit an event
+ */
+ private emitEvent(event: DataManagerEventType, ...args: any[]): void {
+ const listeners = this.eventListeners.get(event) || [];
+ listeners.forEach(callback => callback(...args));
+ }
+
+ /**
+ * Set polling enabled/disabled state
+ */
+ public setPollingEnabled(enabled: boolean): void {
+ this.isPollingEnabled = enabled;
+
+ if (enabled) {
+ this.startPolling();
+ } else {
+ this.stopPolling();
+ }
+ }
+
+ /**
+ * Start polling for updates
+ */
+ public startPolling(): void {
+ this.stopPolling(); // Stop any existing polling
+
+ // Start long polling
+ this.longPoll();
+ }
+
+ /**
+ * Stop polling for updates
+ */
+ public stopPolling(): void {
+ // Abort any ongoing long poll request
+ if (this.currentPollController) {
+ this.currentPollController.abort();
+ this.currentPollController = null;
+ }
+
+ // If polling is disabled by user, set connection status to disabled
+ if (!this.isPollingEnabled) {
+ this.updateConnectionStatus("disabled");
+ }
+ }
+
+ /**
+ * Update the connection status
+ */
+ private updateConnectionStatus(status: ConnectionStatus): void {
+ if (this.connectionStatus !== status) {
+ this.connectionStatus = status;
+ this.emitEvent('connectionStatusChanged', status);
+ }
+ }
+
+ /**
+ * Long poll for updates
+ */
+ private async longPoll(): Promise<void> {
+ // Abort any existing poll request
+ if (this.currentPollController) {
+ this.currentPollController.abort();
+ this.currentPollController = null;
+ }
+
+ // If polling is disabled, don't start a new poll
+ if (!this.isPollingEnabled) {
+ return;
+ }
+
+ let timeoutId: number | undefined;
+
+ try {
+ // Create a new abort controller for this request
+ this.currentPollController = new AbortController();
+ const signal = this.currentPollController.signal;
+
+ // Get the URL with the current message count
+ const pollUrl = `state?poll=true&seen=${this.lastMessageCount}`;
+
+ // Make the long poll request
+ // Use explicit timeout to handle stalled connections (120s)
+ const controller = new AbortController();
+ timeoutId = window.setTimeout(() => controller.abort(), 120000);
+
+ interface CustomFetchOptions extends RequestInit {
+ [Symbol.toStringTag]?: unknown;
+ }
+
+ const fetchOptions: CustomFetchOptions = {
+ signal: controller.signal,
+ // Use the original signal to allow manual cancellation too
+ get [Symbol.toStringTag]() {
+ if (signal.aborted) controller.abort();
+ return "";
+ },
+ };
+
+ try {
+ const response = await fetch(pollUrl, fetchOptions);
+ // Clear the timeout since we got a response
+ clearTimeout(timeoutId);
+
+ // Parse the JSON response
+ const _data = await response.json();
+
+ // If we got here, data has changed, so fetch the latest data
+ await this.fetchData();
+
+ // Start a new long poll (if polling is still enabled)
+ if (this.isPollingEnabled) {
+ this.longPoll();
+ }
+ } catch (error) {
+ // Handle fetch errors inside the inner try block
+ clearTimeout(timeoutId);
+ throw error; // Re-throw to be caught by the outer catch block
+ }
+ } catch (error: unknown) {
+ // Clean up timeout if we're handling an error
+ if (timeoutId) clearTimeout(timeoutId);
+
+ // Don't log or treat manual cancellations as errors
+ const isErrorWithName = (
+ err: unknown,
+ ): err is { name: string; message?: string } =>
+ typeof err === "object" && err !== null && "name" in err;
+
+ if (
+ isErrorWithName(error) &&
+ error.name === "AbortError" &&
+ this.currentPollController?.signal.aborted
+ ) {
+ console.log("Polling cancelled by user");
+ return;
+ }
+
+ // Handle different types of errors with specific messages
+ let errorMessage = "Not connected";
+
+ if (isErrorWithName(error)) {
+ if (error.name === "AbortError") {
+ // This was our timeout abort
+ errorMessage = "Connection timeout - not connected";
+ console.error("Long polling timeout");
+ } else if (error.name === "SyntaxError") {
+ // JSON parsing error
+ errorMessage = "Invalid response from server - not connected";
+ console.error("JSON parsing error:", error);
+ } else if (
+ error.name === "TypeError" &&
+ error.message?.includes("NetworkError")
+ ) {
+ // Network connectivity issues
+ errorMessage = "Network connection lost - not connected";
+ console.error("Network error during polling:", error);
+ } else {
+ // Generic error
+ console.error("Long polling error:", error);
+ }
+ }
+
+ // Disable polling on error
+ this.isPollingEnabled = false;
+
+ // Update connection status to disconnected
+ this.updateConnectionStatus("disconnected");
+
+ // Emit an event that we're disconnected with the error message
+ this.emitEvent('connectionStatusChanged', this.connectionStatus, errorMessage);
+ }
+ }
+
+ /**
+ * Fetch timeline data
+ */
+ public async fetchData(): Promise<void> {
+ // If we're already fetching messages, don't start another fetch
+ if (this.isFetchingMessages) {
+ console.log("Already fetching messages, skipping request");
+ return;
+ }
+
+ this.isFetchingMessages = true;
+
+ try {
+ // Fetch state first
+ const stateResponse = await fetch("state");
+ const state = await stateResponse.json();
+ this.timelineState = state;
+
+ // Check if new messages are available
+ if (
+ state.message_count === this.lastMessageCount &&
+ this.lastMessageCount > 0
+ ) {
+ // No new messages, early return
+ this.isFetchingMessages = false;
+ this.emitEvent('dataChanged', { state, newMessages: [] });
+ return;
+ }
+
+ // Fetch messages with a start parameter
+ this.currentFetchStartIndex = this.nextFetchIndex;
+ const messagesResponse = await fetch(
+ `messages?start=${this.nextFetchIndex}`,
+ );
+ const newMessages = await messagesResponse.json() || [];
+
+ // Store messages in our array
+ if (this.nextFetchIndex === 0) {
+ // If this is the first fetch, replace the entire array
+ this.messages = [...newMessages];
+ } else {
+ // Otherwise append the new messages
+ this.messages = [...this.messages, ...newMessages];
+ }
+
+ // Update connection status to connected
+ this.updateConnectionStatus("connected");
+
+ // Update the last message index for next fetch
+ if (newMessages && newMessages.length > 0) {
+ this.nextFetchIndex += newMessages.length;
+ }
+
+ // Update the message count
+ this.lastMessageCount = state?.message_count ?? 0;
+
+ // Mark that we've completed first load
+ if (this.isFirstLoad) {
+ this.isFirstLoad = false;
+ }
+
+ // Emit an event that data has changed
+ this.emitEvent('dataChanged', { state, newMessages, isFirstFetch: this.nextFetchIndex === newMessages.length });
+ } catch (error) {
+ console.error("Error fetching data:", error);
+
+ // Update connection status to disconnected
+ this.updateConnectionStatus("disconnected");
+
+ // Emit an event that we're disconnected
+ this.emitEvent('connectionStatusChanged', this.connectionStatus, "Not connected");
+ } finally {
+ this.isFetchingMessages = false;
+ }
+ }
+}
diff --git a/loop/webui/src/timeline/diffviewer.ts b/loop/webui/src/timeline/diffviewer.ts
new file mode 100644
index 0000000..1460dc3
--- /dev/null
+++ b/loop/webui/src/timeline/diffviewer.ts
@@ -0,0 +1,384 @@
+import * as Diff2Html from "diff2html";
+
+/**
+ * Class to handle diff and commit viewing functionality in the timeline UI.
+ */
+export class DiffViewer {
+ // Current commit hash being viewed
+ private currentCommitHash: string = "";
+ // Selected line in the diff for commenting
+ private selectedDiffLine: string | null = null;
+ // Current view mode (needed for integration with TimelineManager)
+ private viewMode: string = "chat";
+
+ /**
+ * Constructor for DiffViewer
+ */
+ constructor() {}
+
+ /**
+ * Sets the current view mode
+ * @param mode The current view mode
+ */
+ public setViewMode(mode: string): void {
+ this.viewMode = mode;
+ }
+
+ /**
+ * Gets the current commit hash
+ * @returns The current commit hash
+ */
+ public getCurrentCommitHash(): string {
+ return this.currentCommitHash;
+ }
+
+ /**
+ * Sets the current commit hash
+ * @param hash The commit hash to set
+ */
+ public setCurrentCommitHash(hash: string): void {
+ this.currentCommitHash = hash;
+ }
+
+ /**
+ * Clears the current commit hash
+ */
+ public clearCurrentCommitHash(): void {
+ this.currentCommitHash = "";
+ }
+
+ /**
+ * Loads diff content and renders it using diff2html
+ * @param commitHash Optional commit hash to load diff for
+ */
+ public async loadDiff2HtmlContent(commitHash?: string): Promise<void> {
+ const diff2htmlContent = document.getElementById("diff2htmlContent");
+ const container = document.querySelector(".timeline-container");
+ if (!diff2htmlContent || !container) return;
+
+ try {
+ // Show loading state
+ diff2htmlContent.innerHTML = "Loading enhanced diff...";
+
+ // Add classes to container to allow full-width rendering
+ container.classList.add("diff2-active");
+ container.classList.add("diff-active");
+
+ // Use currentCommitHash if provided or passed from parameter
+ const hash = commitHash || this.currentCommitHash;
+
+ // Build the diff URL - include commit hash if specified
+ const diffUrl = hash ? `diff?commit=${hash}` : "diff";
+
+ // Fetch the diff from the server
+ const response = await fetch(diffUrl);
+
+ if (!response.ok) {
+ throw new Error(
+ `Server returned ${response.status}: ${response.statusText}`,
+ );
+ }
+
+ const diffText = await response.text();
+
+ if (!diffText || diffText.trim() === "") {
+ diff2htmlContent.innerHTML =
+ "<span style='color: #666; font-style: italic;'>No changes detected since conversation started.</span>";
+ return;
+ }
+
+ // Get the selected view format
+ const formatRadios = document.getElementsByName("diffViewFormat") as NodeListOf<HTMLInputElement>;
+ let outputFormat = "side-by-side"; // default
+
+ // Convert NodeListOf to Array to ensure [Symbol.iterator]() is available
+ Array.from(formatRadios).forEach(radio => {
+ if (radio.checked) {
+ outputFormat = radio.value as "side-by-side" | "line-by-line";
+ }
+ })
+
+ // Render the diff using diff2html
+ const diffHtml = Diff2Html.html(diffText, {
+ outputFormat: outputFormat as "side-by-side" | "line-by-line",
+ drawFileList: true,
+ matching: "lines",
+ // Make sure no unnecessary scrollbars in the nested containers
+ renderNothingWhenEmpty: false,
+ colorScheme: "light" as any, // Force light mode to match the rest of the UI
+ });
+
+ // Insert the generated HTML
+ diff2htmlContent.innerHTML = diffHtml;
+
+ // Add CSS styles to ensure we don't have double scrollbars
+ const d2hFiles = diff2htmlContent.querySelectorAll(".d2h-file-wrapper");
+ d2hFiles.forEach((file) => {
+ const contentElem = file.querySelector(".d2h-files-diff");
+ if (contentElem) {
+ // Remove internal scrollbar - the outer container will handle scrolling
+ (contentElem as HTMLElement).style.overflow = "visible";
+ (contentElem as HTMLElement).style.maxHeight = "none";
+ }
+ });
+
+ // Add click event handlers to each code line for commenting
+ this.setupDiff2LineComments();
+
+ // Setup event listeners for diff view format radio buttons
+ this.setupDiffViewFormatListeners();
+ } catch (error) {
+ console.error("Error loading diff2html content:", error);
+ const errorMessage =
+ error instanceof Error ? error.message : "Unknown error";
+ diff2htmlContent.innerHTML = `<span style='color: #dc3545;'>Error loading enhanced diff: ${errorMessage}</span>`;
+ }
+ }
+
+ /**
+ * Setup event listeners for diff view format radio buttons
+ */
+ private setupDiffViewFormatListeners(): void {
+ const formatRadios = document.getElementsByName("diffViewFormat") as NodeListOf<HTMLInputElement>;
+
+ // Convert NodeListOf to Array to ensure [Symbol.iterator]() is available
+ Array.from(formatRadios).forEach(radio => {
+ radio.addEventListener("change", () => {
+ // Reload the diff with the new format when radio selection changes
+ this.loadDiff2HtmlContent(this.currentCommitHash);
+ });
+ })
+ }
+
+ /**
+ * Setup handlers for diff2 code lines to enable commenting
+ */
+ private setupDiff2LineComments(): void {
+ const diff2htmlContent = document.getElementById("diff2htmlContent");
+ if (!diff2htmlContent) return;
+
+ console.log("Setting up diff2 line comments");
+
+ // Add plus buttons to each code line
+ this.addCommentButtonsToCodeLines();
+
+ // Use event delegation for handling clicks on plus buttons
+ diff2htmlContent.addEventListener("click", (event) => {
+ const target = event.target as HTMLElement;
+
+ // Only respond to clicks on the plus button
+ if (target.classList.contains("d2h-gutter-comment-button")) {
+ // Find the parent row first
+ const row = target.closest("tr");
+ if (!row) return;
+
+ // Then find the code line in that row
+ const codeLine = row.querySelector(".d2h-code-side-line") || row.querySelector(".d2h-code-line");
+ if (!codeLine) return;
+
+ // Get the line text content
+ const lineContent = codeLine.querySelector(".d2h-code-line-ctn");
+ if (!lineContent) return;
+
+ const lineText = lineContent.textContent?.trim() || "";
+
+ // Get file name to add context
+ const fileHeader = codeLine
+ .closest(".d2h-file-wrapper")
+ ?.querySelector(".d2h-file-name");
+ const fileName = fileHeader
+ ? fileHeader.textContent?.trim()
+ : "Unknown file";
+
+ // Get line number if available
+ const lineNumElem = codeLine
+ .closest("tr")
+ ?.querySelector(".d2h-code-side-linenumber");
+ const lineNum = lineNumElem ? lineNumElem.textContent?.trim() : "";
+ const lineInfo = lineNum ? `Line ${lineNum}: ` : "";
+
+ // Format the line for the comment box with file context and line number
+ const formattedLine = `${fileName} ${lineInfo}${lineText}`;
+
+ console.log("Comment button clicked for line: ", formattedLine);
+
+ // Open the comment box with this line
+ this.openDiffCommentBox(formattedLine, 0);
+
+ // Prevent event from bubbling up
+ event.stopPropagation();
+ }
+ });
+
+ // Handle text selection
+ let isSelecting = false;
+
+ diff2htmlContent.addEventListener("mousedown", () => {
+ isSelecting = false;
+ });
+
+ diff2htmlContent.addEventListener("mousemove", (event) => {
+ // If mouse is moving with button pressed, user is selecting text
+ if (event.buttons === 1) { // Primary button (usually left) is pressed
+ isSelecting = true;
+ }
+ });
+ }
+
+ /**
+ * Add plus buttons to each table row in the diff for commenting
+ */
+ private addCommentButtonsToCodeLines(): void {
+ const diff2htmlContent = document.getElementById("diff2htmlContent");
+ if (!diff2htmlContent) return;
+
+ // Target code lines first, then find their parent rows
+ const codeLines = diff2htmlContent.querySelectorAll(
+ ".d2h-code-side-line, .d2h-code-line"
+ );
+
+ // Create a Set to store unique rows to avoid duplicates
+ const rowsSet = new Set<HTMLElement>();
+
+ // Get all rows that contain code lines
+ codeLines.forEach(line => {
+ const row = line.closest('tr');
+ if (row) rowsSet.add(row as HTMLElement);
+ });
+
+ // Convert Set back to array for processing
+ const codeRows = Array.from(rowsSet);
+
+ codeRows.forEach((row) => {
+ const rowElem = row as HTMLElement;
+
+ // Skip info lines without actual code (e.g., "file added")
+ if (rowElem.querySelector(".d2h-info")) {
+ return;
+ }
+
+ // Find the code line number element (first TD in the row)
+ const lineNumberCell = rowElem.querySelector(
+ ".d2h-code-side-linenumber, .d2h-code-linenumber"
+ );
+
+ if (!lineNumberCell) return;
+
+ // Create the plus button
+ const plusButton = document.createElement("span");
+ plusButton.className = "d2h-gutter-comment-button";
+ plusButton.innerHTML = "+";
+ plusButton.title = "Add a comment on this line";
+
+ // Add button to the line number cell for proper positioning
+ (lineNumberCell as HTMLElement).style.position = "relative"; // Ensure positioning context
+ lineNumberCell.appendChild(plusButton);
+ });
+ }
+
+ /**
+ * Open the comment box for a selected diff line
+ */
+ private openDiffCommentBox(lineText: string, _lineNumber: number): void {
+ const commentBox = document.getElementById("diffCommentBox");
+ const selectedLine = document.getElementById("selectedLine");
+ const commentInput = document.getElementById(
+ "diffCommentInput",
+ ) as HTMLTextAreaElement;
+
+ if (!commentBox || !selectedLine || !commentInput) return;
+
+ // Store the selected line
+ this.selectedDiffLine = lineText;
+
+ // Display the line in the comment box
+ selectedLine.textContent = lineText;
+
+ // Reset the comment input
+ commentInput.value = "";
+
+ // Show the comment box
+ commentBox.style.display = "block";
+
+ // Focus on the comment input
+ commentInput.focus();
+
+ // Add event listeners for submit and cancel buttons
+ const submitButton = document.getElementById("submitDiffComment");
+ if (submitButton) {
+ submitButton.onclick = () => this.submitDiffComment();
+ }
+
+ const cancelButton = document.getElementById("cancelDiffComment");
+ if (cancelButton) {
+ cancelButton.onclick = () => this.closeDiffCommentBox();
+ }
+ }
+
+ /**
+ * Close the diff comment box without submitting
+ */
+ private closeDiffCommentBox(): void {
+ const commentBox = document.getElementById("diffCommentBox");
+ if (commentBox) {
+ commentBox.style.display = "none";
+ }
+ this.selectedDiffLine = null;
+ }
+
+ /**
+ * Submit a comment on a diff line
+ */
+ private submitDiffComment(): void {
+ const commentInput = document.getElementById(
+ "diffCommentInput",
+ ) as HTMLTextAreaElement;
+ const chatInput = document.getElementById(
+ "chatInput",
+ ) as HTMLTextAreaElement;
+
+ if (!commentInput || !chatInput) return;
+
+ const comment = commentInput.value.trim();
+
+ // Validate inputs
+ if (!this.selectedDiffLine || !comment) {
+ alert("Please select a line and enter a comment.");
+ return;
+ }
+
+ // Format the comment in a readable way
+ const formattedComment = `\`\`\`\n${this.selectedDiffLine}\n\`\`\`\n\n${comment}`;
+
+ // Append the formatted comment to the chat textarea
+ if (chatInput.value.trim() !== "") {
+ chatInput.value += "\n\n"; // Add two line breaks before the new comment
+ }
+ chatInput.value += formattedComment;
+ chatInput.focus();
+
+ // Close only the comment box but keep the diff view open
+ this.closeDiffCommentBox();
+ }
+
+ /**
+ * Show diff for a specific commit
+ * @param commitHash The commit hash to show diff for
+ * @param toggleViewModeCallback Callback to toggle view mode to diff
+ */
+ public showCommitDiff(commitHash: string, toggleViewModeCallback: (mode: string) => void): void {
+ // Store the commit hash
+ this.currentCommitHash = commitHash;
+
+ // Switch to diff2 view (side-by-side)
+ toggleViewModeCallback("diff2");
+ }
+
+ /**
+ * Clean up resources when component is destroyed
+ */
+ public dispose(): void {
+ // Clean up any resources or event listeners here
+ // Currently there are no specific resources to clean up
+ }
+}
diff --git a/loop/webui/src/timeline/icons/index.ts b/loop/webui/src/timeline/icons/index.ts
new file mode 100644
index 0000000..d9480c5
--- /dev/null
+++ b/loop/webui/src/timeline/icons/index.ts
@@ -0,0 +1,19 @@
+/**
+ * Get the icon text to display for a message type
+ * @param type - The message type
+ * @returns The single character to represent this message type
+ */
+export function getIconText(type: string | null | undefined): string {
+ switch (type) {
+ case "user":
+ return "U";
+ case "agent":
+ return "A";
+ case "tool":
+ return "T";
+ case "error":
+ return "E";
+ default:
+ return "?";
+ }
+}
diff --git a/loop/webui/src/timeline/index.ts b/loop/webui/src/timeline/index.ts
new file mode 100644
index 0000000..a3d24b7
--- /dev/null
+++ b/loop/webui/src/timeline/index.ts
@@ -0,0 +1,24 @@
+// Export types
+export * from './types';
+
+// Export utility functions
+export * from './utils';
+
+// Export terminal handler
+export * from './terminal';
+
+// Export diff viewer
+export * from './diffviewer';
+
+// Export chart manager
+export * from './charts';
+
+// Export tool call utilities
+export * from './toolcalls';
+
+// Export copy button utilities
+export * from './copybutton';
+
+// Re-export the timeline manager (will be implemented later)
+// For now, we'll maintain backward compatibility by importing from the original file
+import '../timeline';
diff --git a/loop/webui/src/timeline/markdown/renderer.ts b/loop/webui/src/timeline/markdown/renderer.ts
new file mode 100644
index 0000000..8199b69
--- /dev/null
+++ b/loop/webui/src/timeline/markdown/renderer.ts
@@ -0,0 +1,40 @@
+import { marked } from "marked";
+
+/**
+ * Renders markdown content as HTML with proper security handling.
+ *
+ * @param markdownContent - The markdown string to render
+ * @returns The rendered HTML content as a string
+ */
+export async function renderMarkdown(markdownContent: string): Promise<string> {
+ try {
+ // Set markdown options for proper code block highlighting and safety
+ const markedOptions = {
+ gfm: true, // GitHub Flavored Markdown
+ breaks: true, // Convert newlines to <br>
+ headerIds: false, // Disable header IDs for safety
+ mangle: false, // Don't mangle email addresses
+ // DOMPurify is recommended for production, but not included in this implementation
+ };
+
+ return await marked.parse(markdownContent, markedOptions);
+ } catch (error) {
+ console.error("Error rendering markdown:", error);
+ // Fallback to plain text if markdown parsing fails
+ return markdownContent;
+ }
+}
+
+/**
+ * Process rendered markdown HTML element, adding security attributes to links.
+ *
+ * @param element - The HTML element containing rendered markdown
+ */
+export function processRenderedMarkdown(element: HTMLElement): void {
+ // Make sure links open in a new tab and have proper security attributes
+ const links = element.querySelectorAll("a");
+ links.forEach((link) => {
+ link.setAttribute("target", "_blank");
+ link.setAttribute("rel", "noopener noreferrer");
+ });
+}
diff --git a/loop/webui/src/timeline/renderer.ts b/loop/webui/src/timeline/renderer.ts
new file mode 100644
index 0000000..f2770ee
--- /dev/null
+++ b/loop/webui/src/timeline/renderer.ts
@@ -0,0 +1,729 @@
+/**
+ * MessageRenderer - Class to handle rendering of timeline messages
+ */
+
+import { TimelineMessage, ToolCall } from "./types";
+import { escapeHTML, formatNumber, generateColorFromId } from "./utils";
+import { renderMarkdown, processRenderedMarkdown } from "./markdown/renderer";
+import { createToolCallCard, updateToolCallCard } from "./toolcalls";
+import { createCommitsContainer } from "./commits";
+import { createCopyButton } from "./copybutton";
+import { getIconText } from "./icons";
+import { addCollapsibleFunctionality } from "./components/collapsible";
+import { checkShouldScroll, scrollToBottom } from "./scroll";
+
+export class MessageRenderer {
+ // Map to store references to agent message DOM elements by tool call ID
+ private toolCallIdToMessageElement: Map<
+ string,
+ {
+ messageEl: HTMLElement;
+ toolCallContainer: HTMLElement | null;
+ toolCardId: string;
+ }
+ > = new Map();
+
+ // State tracking variables
+ private isFirstLoad: boolean = true;
+ private shouldScrollToBottom: boolean = true;
+ private currentFetchStartIndex: number = 0;
+
+ constructor() {}
+
+ /**
+ * Initialize the renderer with state from the timeline manager
+ */
+ public initialize(isFirstLoad: boolean, currentFetchStartIndex: number) {
+ this.isFirstLoad = isFirstLoad;
+ this.currentFetchStartIndex = currentFetchStartIndex;
+ }
+
+ /**
+ * Renders the timeline with messages
+ * @param messages The messages to render
+ * @param clearExisting Whether to clear existing content before rendering
+ */
+ public renderTimeline(
+ messages: TimelineMessage[],
+ clearExisting: boolean = false,
+ ): void {
+ const timeline = document.getElementById("timeline");
+ if (!timeline) return;
+
+ // We'll keep the isFirstLoad value for this render cycle,
+ // but will set it to false afterwards in scrollToBottom
+
+ if (clearExisting) {
+ timeline.innerHTML = ""; // Clear existing content only if this is the first load
+ // Clear our map of tool call references
+ this.toolCallIdToMessageElement.clear();
+ }
+
+ if (!messages || messages.length === 0) {
+ if (clearExisting) {
+ timeline.innerHTML = "<p>No messages available.</p>";
+ timeline.classList.add("empty");
+ }
+ return;
+ }
+
+ // Remove empty class when there are messages
+ timeline.classList.remove("empty");
+
+ // Keep track of conversation groups to properly indent
+ interface ConversationGroup {
+ color: string;
+ level: number;
+ }
+
+ const conversationGroups: Record<string, ConversationGroup> = {};
+
+ // Use the currentFetchStartIndex as the base index for these messages
+ const startIndex = this.currentFetchStartIndex;
+ // Group tool messages with their parent agent messages
+ const organizedMessages: (TimelineMessage & {
+ toolResponses?: TimelineMessage[];
+ })[] = [];
+ const toolMessagesByCallId: Record<string, TimelineMessage> = {};
+
+ // First, process tool messages - check if any can update existing UI elements
+ const processedToolMessages = new Set<string>();
+
+ messages.forEach((message) => {
+ // If this is a tool message with a tool_call_id
+ if (message.type === "tool" && message.tool_call_id) {
+ // Try to find an existing agent message that's waiting for this tool response
+ const toolCallRef = this.toolCallIdToMessageElement.get(
+ message.tool_call_id,
+ );
+
+ if (toolCallRef) {
+ // Found an existing agent message that needs updating
+ this.updateToolCallInAgentMessage(message, toolCallRef);
+ processedToolMessages.add(message.tool_call_id);
+ } else {
+ // No existing agent message found, we'll include this in normal rendering
+ toolMessagesByCallId[message.tool_call_id] = message;
+ }
+ }
+ });
+
+ // Then, process messages and organize them
+ messages.forEach((message, localIndex) => {
+ const _index = startIndex + localIndex;
+ if (!message) return; // Skip if message is null/undefined
+
+ // If it's a tool message and we're going to inline it with its parent agent message,
+ // we'll skip rendering it here - it will be included with the agent message
+ if (message.type === "tool" && message.tool_call_id) {
+ // Skip if we've already processed this tool message (updated an existing agent message)
+ if (processedToolMessages.has(message.tool_call_id)) {
+ return;
+ }
+
+ // Skip if this tool message will be included with a new agent message
+ if (toolMessagesByCallId[message.tool_call_id]) {
+ return;
+ }
+ }
+
+ // For agent messages with tool calls, attach their tool responses
+ if (
+ message.type === "agent" &&
+ message.tool_calls &&
+ message.tool_calls.length > 0
+ ) {
+ const toolResponses: TimelineMessage[] = [];
+
+ // Look up tool responses for each tool call
+ message.tool_calls.forEach((toolCall) => {
+ if (
+ toolCall.tool_call_id &&
+ toolMessagesByCallId[toolCall.tool_call_id]
+ ) {
+ toolResponses.push(toolMessagesByCallId[toolCall.tool_call_id]);
+ }
+ });
+
+ if (toolResponses.length > 0) {
+ message = { ...message, toolResponses };
+ }
+ }
+
+ organizedMessages.push(message);
+ });
+
+ let lastMessage:TimelineMessage|undefined;
+ if (messages && messages.length > 0 && startIndex > 0) {
+ lastMessage = messages[startIndex-1];
+ }
+
+ // Loop through organized messages and create timeline items
+ organizedMessages.forEach((message, localIndex) => {
+ const _index = startIndex + localIndex;
+ if (!message) return; // Skip if message is null/undefined
+
+ if (localIndex > 0) {
+ lastMessage = organizedMessages.at(localIndex-1);
+ }
+ // Determine if this is a subconversation
+ const hasParent = !!message.parent_conversation_id;
+ const conversationId = message.conversation_id || "";
+ const _parentId = message.parent_conversation_id || "";
+
+ // Track the conversation group
+ if (conversationId && !conversationGroups[conversationId]) {
+ conversationGroups[conversationId] = {
+ color: generateColorFromId(conversationId),
+ level: hasParent ? 1 : 0, // Level 0 for main conversation, 1+ for nested
+ };
+ }
+
+ // Get the level and color for this message
+ const group = conversationGroups[conversationId] || {
+ level: 0,
+ color: "#888888",
+ };
+
+ const messageEl = document.createElement("div");
+ messageEl.className = `message ${message.type || "unknown"} ${message.end_of_turn ? "end-of-turn" : ""}`;
+
+ // Add indentation class for subconversations
+ if (hasParent) {
+ messageEl.classList.add("subconversation");
+ messageEl.style.marginLeft = `${group.level * 40}px`;
+
+ // Add a colored left border to indicate the subconversation
+ messageEl.style.borderLeft = `4px solid ${group.color}`;
+ }
+
+ // newMsgType indicates when to create a new icon and message
+ // type header. This is a primitive form of message coalescing,
+ // but it does reduce the amount of redundant information in
+ // the UI.
+ const newMsgType = !lastMessage ||
+ (message.type == 'user' && lastMessage.type != 'user') ||
+ (message.type != 'user' && lastMessage.type == 'user');
+
+ if (newMsgType) {
+ // Create message icon
+ const iconEl = document.createElement("div");
+ iconEl.className = "message-icon";
+ iconEl.textContent = getIconText(message.type);
+ messageEl.appendChild(iconEl);
+ }
+
+ // Create message content container
+ const contentEl = document.createElement("div");
+ contentEl.className = "message-content";
+
+ // Create message header
+ const headerEl = document.createElement("div");
+ headerEl.className = "message-header";
+
+ if (newMsgType) {
+ const typeEl = document.createElement("span");
+ typeEl.className = "message-type";
+ typeEl.textContent = this.getTypeName(message.type);
+ headerEl.appendChild(typeEl);
+ }
+
+ // Add timestamp and usage info combined for agent messages at the top
+ if (message.timestamp) {
+ const timestampEl = document.createElement("span");
+ timestampEl.className = "message-timestamp";
+ timestampEl.textContent = this.formatTimestamp(message.timestamp);
+
+ // Add elapsed time if available
+ if (message.elapsed) {
+ timestampEl.textContent += ` (${(message.elapsed / 1e9).toFixed(2)}s)`;
+ }
+
+ // Add turn duration for end-of-turn messages
+ if (message.turnDuration && message.end_of_turn) {
+ timestampEl.textContent += ` [Turn: ${(message.turnDuration / 1e9).toFixed(2)}s]`;
+ }
+
+ // Add usage info inline for agent messages
+ if (
+ message.type === "agent" &&
+ message.usage &&
+ (message.usage.input_tokens > 0 ||
+ message.usage.output_tokens > 0 ||
+ message.usage.cost_usd > 0)
+ ) {
+ try {
+ // Safe get all values
+ const inputTokens = formatNumber(
+ message.usage.input_tokens ?? 0,
+ );
+ const cacheInput = message.usage.cache_read_input_tokens ?? 0;
+ const outputTokens = formatNumber(
+ message.usage.output_tokens ?? 0,
+ );
+ const messageCost = this.formatCurrency(
+ message.usage.cost_usd ?? 0,
+ "$0.0000", // Default format for message costs
+ true, // Use 4 decimal places for message-level costs
+ );
+
+ timestampEl.textContent += ` | In: ${inputTokens}`;
+ if (cacheInput > 0) {
+ timestampEl.textContent += ` [Cache: ${formatNumber(cacheInput)}]`;
+ }
+ timestampEl.textContent += ` Out: ${outputTokens} (${messageCost})`;
+ } catch (e) {
+ console.error("Error adding usage info to timestamp:", e);
+ }
+ }
+
+ headerEl.appendChild(timestampEl);
+ }
+
+ contentEl.appendChild(headerEl);
+
+ // Add message content
+ if (message.content) {
+ const containerEl = document.createElement("div");
+ containerEl.className = "message-text-container";
+
+ const textEl = document.createElement("div");
+ textEl.className = "message-text markdown-content";
+
+ // Render markdown content
+ // Handle the Promise returned by renderMarkdown
+ renderMarkdown(message.content).then(html => {
+ textEl.innerHTML = html;
+ processRenderedMarkdown(textEl);
+ });
+
+ // Add copy button
+ const { container: copyButtonContainer, button: copyButton } = createCopyButton(message.content);
+ containerEl.appendChild(copyButtonContainer);
+ containerEl.appendChild(textEl);
+
+ // Add collapse/expand for long content
+ addCollapsibleFunctionality(message, textEl, containerEl, contentEl);
+ }
+
+ // If the message has tool calls, show them in an ultra-compact row of boxes
+ if (message.tool_calls && message.tool_calls.length > 0) {
+ const toolCallsContainer = document.createElement("div");
+ toolCallsContainer.className = "tool-calls-container";
+
+ // Create a header row with tool count
+ const toolCallsHeaderRow = document.createElement("div");
+ toolCallsHeaderRow.className = "tool-calls-header";
+ // No header text - empty header
+ toolCallsContainer.appendChild(toolCallsHeaderRow);
+
+ // Create a container for the tool call cards
+ const toolCallsCardContainer = document.createElement("div");
+ toolCallsCardContainer.className = "tool-call-cards-container";
+
+ // Add each tool call as a card with response or spinner
+ message.tool_calls.forEach((toolCall: ToolCall, _index: number) => {
+ // Create a unique ID for this tool card
+ const toolCardId = `tool-card-${toolCall.tool_call_id || Math.random().toString(36).substring(2, 11)}`;
+
+ // Find the matching tool response if it exists
+ const toolResponse = message.toolResponses?.find(
+ (resp) => resp.tool_call_id === toolCall.tool_call_id,
+ );
+
+ // Use the extracted utility function to create the tool card
+ const toolCard = createToolCallCard(toolCall, toolResponse, toolCardId);
+
+ // Store reference to this element if it has a tool_call_id
+ if (toolCall.tool_call_id) {
+ this.toolCallIdToMessageElement.set(toolCall.tool_call_id, {
+ messageEl,
+ toolCallContainer: toolCallsCardContainer,
+ toolCardId,
+ });
+ }
+
+ // Add the card to the container
+ toolCallsCardContainer.appendChild(toolCard);
+ });
+
+ toolCallsContainer.appendChild(toolCallsCardContainer);
+ contentEl.appendChild(toolCallsContainer);
+ }
+ // If message is a commit message, display commits
+ if (
+ message.type === "commit" &&
+ message.commits &&
+ message.commits.length > 0
+ ) {
+ // Use the extracted utility function to create the commits container
+ const commitsContainer = createCommitsContainer(
+ message.commits,
+ (commitHash) => {
+ // This will need to be handled by the TimelineManager
+ const event = new CustomEvent('showCommitDiff', {
+ detail: { commitHash }
+ });
+ document.dispatchEvent(event);
+ }
+ );
+ contentEl.appendChild(commitsContainer);
+ }
+
+ // Tool messages are now handled inline with agent messages
+ // If we still see a tool message here, it means it's not associated with an agent message
+ // (this could be legacy data or a special case)
+ if (message.type === "tool") {
+ const toolDetailsEl = document.createElement("div");
+ toolDetailsEl.className = "tool-details standalone";
+
+ // Get tool input and result for display
+ let inputText = "";
+ try {
+ if (message.input) {
+ const parsedInput = JSON.parse(message.input);
+ // Format input compactly for simple inputs
+ inputText = JSON.stringify(parsedInput);
+ }
+ } catch (e) {
+ // Not valid JSON, use as-is
+ inputText = message.input || "";
+ }
+
+ const resultText = message.tool_result || "";
+ const statusEmoji = message.tool_error ? "ā" : "ā
";
+ const toolName = message.tool_name || "Unknown";
+
+ // Determine if we can use super compact display (e.g., for bash command results)
+ // Use compact display for short inputs/outputs without newlines
+ const isSimpleCommand =
+ toolName === "bash" &&
+ inputText.length < 50 &&
+ resultText.length < 200 &&
+ !resultText.includes("\n");
+ const isCompact =
+ inputText.length < 50 &&
+ resultText.length < 100 &&
+ !resultText.includes("\n");
+
+ if (isSimpleCommand) {
+ // SUPER COMPACT VIEW FOR BASH: Display everything on a single line
+ const toolLineEl = document.createElement("div");
+ toolLineEl.className = "tool-compact-line";
+
+ // Create the compact bash display in format: "ā
bash({command}) → result"
+ try {
+ const parsed = JSON.parse(inputText);
+ const cmd = parsed.command || "";
+ toolLineEl.innerHTML = `${statusEmoji} <strong>${toolName}</strong>({"command":"${cmd}"}) → <span class="tool-result-inline">${resultText}</span>`;
+ } catch {
+ toolLineEl.innerHTML = `${statusEmoji} <strong>${toolName}</strong>(${inputText}) → <span class="tool-result-inline">${resultText}</span>`;
+ }
+
+ // Add copy button for result
+ const copyBtn = document.createElement("button");
+ copyBtn.className = "copy-inline-button";
+ copyBtn.textContent = "Copy";
+ copyBtn.title = "Copy result to clipboard";
+
+ copyBtn.addEventListener("click", (e) => {
+ e.stopPropagation();
+ navigator.clipboard
+ .writeText(resultText)
+ .then(() => {
+ copyBtn.textContent = "Copied!";
+ setTimeout(() => {
+ copyBtn.textContent = "Copy";
+ }, 2000);
+ })
+ .catch((_err) => {
+ copyBtn.textContent = "Failed";
+ setTimeout(() => {
+ copyBtn.textContent = "Copy";
+ }, 2000);
+ });
+ });
+
+ toolLineEl.appendChild(copyBtn);
+ toolDetailsEl.appendChild(toolLineEl);
+ } else if (isCompact && !isSimpleCommand) {
+ // COMPACT VIEW: Display everything on one or two lines for other tool types
+ const toolLineEl = document.createElement("div");
+ toolLineEl.className = "tool-compact-line";
+
+ // Create the compact display in format: "ā
tool_name(input) → result"
+ let compactDisplay = `${statusEmoji} <strong>${toolName}</strong>(${inputText})`;
+
+ if (resultText) {
+ compactDisplay += ` → <span class="tool-result-inline">${resultText}</span>`;
+ }
+
+ toolLineEl.innerHTML = compactDisplay;
+
+ // Add copy button for result
+ const copyBtn = document.createElement("button");
+ copyBtn.className = "copy-inline-button";
+ copyBtn.textContent = "Copy";
+ copyBtn.title = "Copy result to clipboard";
+
+ copyBtn.addEventListener("click", (e) => {
+ e.stopPropagation();
+ navigator.clipboard
+ .writeText(resultText)
+ .then(() => {
+ copyBtn.textContent = "Copied!";
+ setTimeout(() => {
+ copyBtn.textContent = "Copy";
+ }, 2000);
+ })
+ .catch((_err) => {
+ copyBtn.textContent = "Failed";
+ setTimeout(() => {
+ copyBtn.textContent = "Copy";
+ }, 2000);
+ });
+ });
+
+ toolLineEl.appendChild(copyBtn);
+ toolDetailsEl.appendChild(toolLineEl);
+ } else {
+ // EXPANDED VIEW: For longer inputs/results that need more space
+ // Tool name header
+ const toolNameEl = document.createElement("div");
+ toolNameEl.className = "tool-name";
+ toolNameEl.innerHTML = `${statusEmoji} <strong>${toolName}</strong>`;
+ toolDetailsEl.appendChild(toolNameEl);
+
+ // Show input (simplified)
+ if (message.input) {
+ const inputContainer = document.createElement("div");
+ inputContainer.className = "tool-input-container compact";
+
+ const inputEl = document.createElement("pre");
+ inputEl.className = "tool-input compact";
+ inputEl.textContent = inputText;
+ inputContainer.appendChild(inputEl);
+ toolDetailsEl.appendChild(inputContainer);
+ }
+
+ // Show result (simplified)
+ if (resultText) {
+ const resultContainer = document.createElement("div");
+ resultContainer.className = "tool-result-container compact";
+
+ const resultEl = document.createElement("pre");
+ resultEl.className = "tool-result compact";
+ resultEl.textContent = resultText;
+ resultContainer.appendChild(resultEl);
+
+ // Add collapse/expand for longer results
+ if (resultText.length > 100) {
+ resultEl.classList.add("collapsed");
+
+ const toggleButton = document.createElement("button");
+ toggleButton.className = "collapsible";
+ toggleButton.textContent = "Show more...";
+ toggleButton.addEventListener("click", () => {
+ resultEl.classList.toggle("collapsed");
+ toggleButton.textContent = resultEl.classList.contains(
+ "collapsed",
+ )
+ ? "Show more..."
+ : "Show less";
+ });
+
+ toolDetailsEl.appendChild(resultContainer);
+ toolDetailsEl.appendChild(toggleButton);
+ } else {
+ toolDetailsEl.appendChild(resultContainer);
+ }
+ }
+ }
+
+ contentEl.appendChild(toolDetailsEl);
+ }
+
+ // Add usage info if available with robust null handling - only for non-agent messages
+ if (
+ message.type !== "agent" && // Skip for agent messages as we've already added usage info at the top
+ message.usage &&
+ (message.usage.input_tokens > 0 ||
+ message.usage.output_tokens > 0 ||
+ message.usage.cost_usd > 0)
+ ) {
+ try {
+ const usageEl = document.createElement("div");
+ usageEl.className = "usage-info";
+
+ // Safe get all values
+ const inputTokens = formatNumber(
+ message.usage.input_tokens ?? 0,
+ );
+ const cacheInput = message.usage.cache_read_input_tokens ?? 0;
+ const outputTokens = formatNumber(
+ message.usage.output_tokens ?? 0,
+ );
+ const messageCost = this.formatCurrency(
+ message.usage.cost_usd ?? 0,
+ "$0.0000", // Default format for message costs
+ true, // Use 4 decimal places for message-level costs
+ );
+
+ // Create usage info display
+ usageEl.innerHTML = `
+ <span title="Input tokens">In: ${inputTokens}</span>
+ ${cacheInput > 0 ? `<span title="Cache tokens">[Cache: ${formatNumber(cacheInput)}]</span>` : ""}
+ <span title="Output tokens">Out: ${outputTokens}</span>
+ <span title="Message cost">(${messageCost})</span>
+ `;
+
+ contentEl.appendChild(usageEl);
+ } catch (e) {
+ console.error("Error rendering usage info:", e);
+ }
+ }
+
+ messageEl.appendChild(contentEl);
+ timeline.appendChild(messageEl);
+ });
+
+ // Scroll to bottom of the timeline if needed
+ this.scrollToBottom();
+ }
+
+ /**
+ * Check if we should scroll to the bottom
+ */
+ private checkShouldScroll(): boolean {
+ return checkShouldScroll(this.isFirstLoad);
+ }
+
+ /**
+ * Scroll to the bottom of the timeline
+ */
+ private scrollToBottom(): void {
+ scrollToBottom(this.shouldScrollToBottom);
+
+ // After first load, we'll only auto-scroll if user is already near the bottom
+ this.isFirstLoad = false;
+ }
+
+ /**
+ * Get readable name for message type
+ */
+ private getTypeName(type: string | null | undefined): string {
+ switch (type) {
+ case "user":
+ return "User";
+ case "agent":
+ return "Agent";
+ case "tool":
+ return "Tool Use";
+ case "error":
+ return "Error";
+ default:
+ return (
+ (type || "Unknown").charAt(0).toUpperCase() +
+ (type || "unknown").slice(1)
+ );
+ }
+ }
+
+ /**
+ * Format timestamp for display
+ */
+ private formatTimestamp(
+ timestamp: string | number | Date | null | undefined,
+ defaultValue: string = "",
+ ): string {
+ if (!timestamp) return defaultValue;
+ try {
+ const date = new Date(timestamp);
+ if (isNaN(date.getTime())) return defaultValue;
+
+ // Format: Mar 13, 2025 09:53:25 AM
+ return date.toLocaleString("en-US", {
+ month: "short",
+ day: "numeric",
+ year: "numeric",
+ hour: "numeric",
+ minute: "2-digit",
+ second: "2-digit",
+ hour12: true,
+ });
+ } catch (e) {
+ return defaultValue;
+ }
+ }
+
+ /**
+ * Format currency values
+ */
+ private formatCurrency(
+ num: number | string | null | undefined,
+ defaultValue: string = "$0.00",
+ isMessageLevel: boolean = false,
+ ): string {
+ if (num === undefined || num === null) return defaultValue;
+ try {
+ // Use 4 decimal places for message-level costs, 2 for totals
+ const decimalPlaces = isMessageLevel ? 4 : 2;
+ return `$${parseFloat(String(num)).toFixed(decimalPlaces)}`;
+ } catch (e) {
+ return defaultValue;
+ }
+ }
+
+ /**
+ * Update a tool call in an agent message with the response
+ */
+ private updateToolCallInAgentMessage(
+ toolMessage: TimelineMessage,
+ toolCallRef: {
+ messageEl: HTMLElement;
+ toolCallContainer: HTMLElement | null;
+ toolCardId: string;
+ },
+ ): void {
+ const { messageEl, toolCardId } = toolCallRef;
+
+ // Find the tool card element
+ const toolCard = messageEl.querySelector(`#${toolCardId}`) as HTMLElement;
+ if (!toolCard) return;
+
+ // Use the extracted utility function to update the tool card
+ updateToolCallCard(toolCard, toolMessage);
+ }
+
+ /**
+ * Get the tool call id to message element map
+ * Used by the TimelineManager to access the map
+ */
+ public getToolCallIdToMessageElement(): Map<
+ string,
+ {
+ messageEl: HTMLElement;
+ toolCallContainer: HTMLElement | null;
+ toolCardId: string;
+ }
+ > {
+ return this.toolCallIdToMessageElement;
+ }
+
+ /**
+ * Set the tool call id to message element map
+ * Used by the TimelineManager to update the map
+ */
+ public setToolCallIdToMessageElement(
+ map: Map<
+ string,
+ {
+ messageEl: HTMLElement;
+ toolCallContainer: HTMLElement | null;
+ toolCardId: string;
+ }
+ >
+ ): void {
+ this.toolCallIdToMessageElement = map;
+ }
+}
diff --git a/loop/webui/src/timeline/scroll.ts b/loop/webui/src/timeline/scroll.ts
new file mode 100644
index 0000000..df3b8f9
--- /dev/null
+++ b/loop/webui/src/timeline/scroll.ts
@@ -0,0 +1,40 @@
+/**
+ * Check if the page should scroll to the bottom based on current view position
+ * @param isFirstLoad If this is the first load of the timeline
+ * @returns Boolean indicating if we should scroll to the bottom
+ */
+export function checkShouldScroll(isFirstLoad: boolean): boolean {
+ // Always scroll on first load
+ if (isFirstLoad) {
+ return true;
+ }
+
+ // Check if user is already near the bottom of the page
+ // Account for the fixed top bar and chat bar
+ return (
+ window.innerHeight + window.scrollY >= document.body.offsetHeight - 200
+ );
+}
+
+/**
+ * Scroll to the bottom of the timeline if shouldScrollToBottom is true
+ * @param shouldScrollToBottom Flag indicating if we should scroll
+ */
+export function scrollToBottom(shouldScrollToBottom: boolean): void {
+ // Find the timeline container
+ const timeline = document.getElementById("timeline");
+
+ // Scroll the window to the bottom based on our pre-determined value
+ if (timeline && shouldScrollToBottom) {
+ // Get the last message or element in the timeline
+ const lastElement = timeline.lastElementChild;
+
+ if (lastElement) {
+ // Scroll to the bottom of the page
+ window.scrollTo({
+ top: document.body.scrollHeight,
+ behavior: "smooth",
+ });
+ }
+ }
+}
diff --git a/loop/webui/src/timeline/terminal.ts b/loop/webui/src/timeline/terminal.ts
new file mode 100644
index 0000000..fbe9a7d
--- /dev/null
+++ b/loop/webui/src/timeline/terminal.ts
@@ -0,0 +1,269 @@
+import { Terminal } from "@xterm/xterm";
+import { FitAddon } from "@xterm/addon-fit";
+
+/**
+ * Class to handle terminal functionality in the timeline UI.
+ */
+export class TerminalHandler {
+ // Terminal instance
+ private terminal: Terminal | null = null;
+ // Terminal fit addon for handling resize
+ private fitAddon: FitAddon | null = null;
+ // Terminal EventSource for SSE
+ private terminalEventSource: EventSource | null = null;
+ // Terminal ID (always 1 for now, will support 1-9 later)
+ private terminalId: string = "1";
+ // Queue for serializing terminal inputs
+ private terminalInputQueue: string[] = [];
+ // Flag to track if we're currently processing a terminal input
+ private processingTerminalInput: boolean = false;
+ // Current view mode (needed for resize handling)
+ private viewMode: string = "chat";
+
+ /**
+ * Constructor for TerminalHandler
+ */
+ constructor() {}
+
+ /**
+ * Sets the current view mode
+ * @param mode The current view mode
+ */
+ public setViewMode(mode: string): void {
+ this.viewMode = mode;
+ }
+
+ /**
+ * Initialize the terminal component
+ * @param terminalContainer The DOM element to contain the terminal
+ */
+ public async initializeTerminal(): Promise<void> {
+ const terminalContainer = document.getElementById("terminalContainer");
+
+ if (!terminalContainer) {
+ console.error("Terminal container not found");
+ return;
+ }
+
+ // If terminal is already initialized, just focus it
+ if (this.terminal) {
+ this.terminal.focus();
+ if (this.fitAddon) {
+ this.fitAddon.fit();
+ }
+ return;
+ }
+
+ // Clear the terminal container
+ terminalContainer.innerHTML = "";
+
+ // Create new terminal instance
+ this.terminal = new Terminal({
+ cursorBlink: true,
+ theme: {
+ background: "#f5f5f5",
+ foreground: "#333333",
+ cursor: "#0078d7",
+ selectionBackground: "rgba(0, 120, 215, 0.4)",
+ },
+ });
+
+ // Add fit addon to handle terminal resizing
+ this.fitAddon = new FitAddon();
+ this.terminal.loadAddon(this.fitAddon);
+
+ // Open the terminal in the container
+ this.terminal.open(terminalContainer);
+
+ // Connect to WebSocket
+ await this.connectTerminal();
+
+ // Fit the terminal to the container
+ this.fitAddon.fit();
+
+ // Setup resize handler
+ window.addEventListener("resize", () => {
+ if (this.viewMode === "terminal" && this.fitAddon) {
+ this.fitAddon.fit();
+ // Send resize information to server
+ this.sendTerminalResize();
+ }
+ });
+
+ // Focus the terminal
+ this.terminal.focus();
+ }
+
+ /**
+ * Connect to terminal events stream
+ */
+ private async connectTerminal(): Promise<void> {
+ if (!this.terminal) {
+ return;
+ }
+
+ // Close existing connections if any
+ this.closeTerminalConnections();
+
+ try {
+ // Connect directly to the SSE endpoint for terminal 1
+ // Use relative URL based on current location
+ const baseUrl = window.location.pathname.endsWith('/') ? '.' : '.';
+ const eventsUrl = `${baseUrl}/terminal/events/${this.terminalId}`;
+ this.terminalEventSource = new EventSource(eventsUrl);
+
+ // Handle SSE events
+ this.terminalEventSource.onopen = () => {
+ console.log("Terminal SSE connection opened");
+ this.sendTerminalResize();
+ };
+
+ this.terminalEventSource.onmessage = (event) => {
+ if (this.terminal) {
+ // Decode base64 data before writing to terminal
+ try {
+ const decoded = atob(event.data);
+ this.terminal.write(decoded);
+ } catch (e) {
+ console.error('Error decoding terminal data:', e);
+ // Fallback to raw data if decoding fails
+ this.terminal.write(event.data);
+ }
+ }
+ };
+
+ this.terminalEventSource.onerror = (error) => {
+ console.error("Terminal SSE error:", error);
+ if (this.terminal) {
+ this.terminal.write("\r\n\x1b[1;31mConnection error\x1b[0m\r\n");
+ }
+ // Attempt to reconnect if the connection was lost
+ if (this.terminalEventSource?.readyState === EventSource.CLOSED) {
+ this.closeTerminalConnections();
+ }
+ };
+
+ // Send key inputs to the server via POST requests
+ if (this.terminal) {
+ this.terminal.onData((data) => {
+ this.sendTerminalInput(data);
+ });
+ }
+ } catch (error) {
+ console.error("Failed to connect to terminal:", error);
+ if (this.terminal) {
+ this.terminal.write(`\r\n\x1b[1;31mFailed to connect: ${error}\x1b[0m\r\n`);
+ }
+ }
+ }
+
+ /**
+ * Close any active terminal connections
+ */
+ private closeTerminalConnections(): void {
+ if (this.terminalEventSource) {
+ this.terminalEventSource.close();
+ this.terminalEventSource = null;
+ }
+ }
+
+ /**
+ * Send input to the terminal
+ * @param data The input data to send
+ */
+ private async sendTerminalInput(data: string): Promise<void> {
+ // Add the data to the queue
+ this.terminalInputQueue.push(data);
+
+ // If we're not already processing inputs, start processing
+ if (!this.processingTerminalInput) {
+ await this.processTerminalInputQueue();
+ }
+ }
+
+ /**
+ * Process the terminal input queue in order
+ */
+ private async processTerminalInputQueue(): Promise<void> {
+ if (this.terminalInputQueue.length === 0) {
+ this.processingTerminalInput = false;
+ return;
+ }
+
+ this.processingTerminalInput = true;
+
+ // Concatenate all available inputs from the queue into a single request
+ let combinedData = '';
+
+ // Take all currently available items from the queue
+ while (this.terminalInputQueue.length > 0) {
+ combinedData += this.terminalInputQueue.shift()!;
+ }
+
+ try {
+ // Use relative URL based on current location
+ const baseUrl = window.location.pathname.endsWith('/') ? '.' : '.';
+ const response = await fetch(`${baseUrl}/terminal/input/${this.terminalId}`, {
+ method: 'POST',
+ body: combinedData,
+ headers: {
+ 'Content-Type': 'text/plain'
+ }
+ });
+
+ if (!response.ok) {
+ console.error(`Failed to send terminal input: ${response.status} ${response.statusText}`);
+ }
+ } catch (error) {
+ console.error("Error sending terminal input:", error);
+ }
+
+ // Continue processing the queue (for any new items that may have been added)
+ await this.processTerminalInputQueue();
+ }
+
+ /**
+ * Send terminal resize information to the server
+ */
+ private async sendTerminalResize(): Promise<void> {
+ if (!this.terminal || !this.fitAddon) {
+ return;
+ }
+
+ // Get terminal dimensions
+ try {
+ // Send resize message in a format the server can understand
+ // Use relative URL based on current location
+ const baseUrl = window.location.pathname.endsWith('/') ? '.' : '.';
+ const response = await fetch(`${baseUrl}/terminal/input/${this.terminalId}`, {
+ method: 'POST',
+ body: JSON.stringify({
+ type: "resize",
+ cols: this.terminal.cols || 80, // Default to 80 if undefined
+ rows: this.terminal.rows || 24, // Default to 24 if undefined
+ }),
+ headers: {
+ 'Content-Type': 'application/json'
+ }
+ });
+
+ if (!response.ok) {
+ console.error(`Failed to send terminal resize: ${response.status} ${response.statusText}`);
+ }
+ } catch (error) {
+ console.error("Error sending terminal resize:", error);
+ }
+ }
+
+ /**
+ * Clean up resources when component is destroyed
+ */
+ public dispose(): void {
+ this.closeTerminalConnections();
+ if (this.terminal) {
+ this.terminal.dispose();
+ this.terminal = null;
+ }
+ this.fitAddon = null;
+ }
+}
diff --git a/loop/webui/src/timeline/toolcalls.ts b/loop/webui/src/timeline/toolcalls.ts
new file mode 100644
index 0000000..5df88bd
--- /dev/null
+++ b/loop/webui/src/timeline/toolcalls.ts
@@ -0,0 +1,259 @@
+/**
+ * Utility functions for rendering tool calls in the timeline
+ */
+
+import { ToolCall, TimelineMessage } from "./types";
+import { html, render } from "lit-html";
+
+/**
+ * Create a tool call card element for display in the timeline
+ * @param toolCall The tool call data to render
+ * @param toolResponse Optional tool response message if available
+ * @param toolCardId Unique ID for this tool card
+ * @returns The created tool card element
+ */
+export function createToolCallCard(
+ toolCall: ToolCall,
+ toolResponse?: TimelineMessage | null,
+ toolCardId?: string
+): HTMLElement {
+ // Create a unique ID for this tool card if not provided
+ const cardId =
+ toolCardId ||
+ `tool-card-${
+ toolCall.tool_call_id || Math.random().toString(36).substring(2, 11)
+ }`;
+
+ // Get input as compact string
+ let inputText = "";
+ try {
+ if (toolCall.input) {
+ const parsedInput = JSON.parse(toolCall.input);
+
+ // For bash commands, use a special format
+ if (toolCall.name === "bash" && parsedInput.command) {
+ inputText = parsedInput.command;
+ } else {
+ // For other tools, use the stringified JSON
+ inputText = JSON.stringify(parsedInput);
+ }
+ }
+ } catch (e) {
+ // Not valid JSON, use as-is
+ inputText = toolCall.input || "";
+ }
+
+ // Truncate input text for display
+ const displayInput =
+ inputText.length > 80 ? inputText.substring(0, 78) + "..." : inputText;
+
+ // Truncate for compact display
+ const shortInput =
+ displayInput.length > 30
+ ? displayInput.substring(0, 28) + "..."
+ : displayInput;
+
+ // Format input for expanded view
+ let formattedInput = displayInput;
+ try {
+ const parsedInput = JSON.parse(toolCall.input || "");
+ formattedInput = JSON.stringify(parsedInput, null, 2);
+ } catch (e) {
+ // Not valid JSON, use display input as-is
+ }
+
+ // Truncate result for compact display if available
+ let shortResult = "";
+ if (toolResponse && toolResponse.tool_result) {
+ shortResult =
+ toolResponse.tool_result.length > 40
+ ? toolResponse.tool_result.substring(0, 38) + "..."
+ : toolResponse.tool_result;
+ }
+
+ // State for collapsed/expanded view
+ let isCollapsed = true;
+
+ // Handler to copy text to clipboard
+ const copyToClipboard = (text: string, button: HTMLElement) => {
+ navigator.clipboard
+ .writeText(text)
+ .then(() => {
+ button.textContent = "Copied!";
+ setTimeout(() => {
+ button.textContent = "Copy";
+ }, 2000);
+ })
+ .catch((err) => {
+ console.error("Failed to copy text:", err);
+ button.textContent = "Failed";
+ setTimeout(() => {
+ button.textContent = "Copy";
+ }, 2000);
+ });
+ };
+
+ const cancelToolCall = async(tool_call_id: string, button: HTMLButtonElement) => {
+ console.log('cancelToolCall', tool_call_id, button);
+ button.innerText = 'Cancelling';
+ button.disabled = true;
+ try {
+ const response = await fetch("cancel", {
+ method: "POST",
+ headers: {
+ "Content-Type": "application/json",
+ },
+ body: JSON.stringify({tool_call_id: tool_call_id, reason: "user requested cancellation" }),
+ });
+ console.log('cancel', tool_call_id, response);
+ button.parentElement.removeChild(button);
+ } catch (e) {
+ console.error('cancel', tool_call_id,e);
+ }
+ };
+
+ // Create the container element
+ const container = document.createElement("div");
+ container.id = cardId;
+ container.className = "tool-call-card collapsed";
+
+ // Function to render the component
+ const renderComponent = () => {
+ const template = html`
+ <div
+ class="tool-call-compact-view"
+ @click=${() => {
+ isCollapsed = !isCollapsed;
+ container.classList.toggle("collapsed");
+ renderComponent();
+ }}
+ >
+ <span class="tool-call-status ${toolResponse ? "" : "spinner"}">
+ ${toolResponse ? (toolResponse.tool_error ? "ā" : "ā
") : "ā³"}
+ </span>
+ <span class="tool-call-name">${toolCall.name}</span>
+ <code class="tool-call-input-preview">${shortInput}</code>
+ ${toolResponse && toolResponse.tool_result
+ ? html`<code class="tool-call-result-preview">${shortResult}</code>`
+ : ""}
+ ${toolResponse && toolResponse.elapsed !== undefined
+ ? html`<span class="tool-call-time"
+ >${(toolResponse.elapsed / 1e9).toFixed(2)}s</span
+ >`
+ : ""}
+ ${toolResponse ? "" :
+ html`<button class="refresh-button stop-button" title="Cancel this operation" @click=${(e: Event) => {
+ e.stopPropagation(); // Don't toggle expansion when clicking cancel
+ const button = e.target as HTMLButtonElement;
+ cancelToolCall(toolCall.tool_call_id, button);
+ }}>Cancel</button>`}
+ <span class="tool-call-expand-icon">${isCollapsed ? "ā¼" : "ā²"}</span>
+ </div>
+
+ <div class="tool-call-expanded-view">
+ <div class="tool-call-section">
+ <div class="tool-call-section-label">
+ Input:
+ <button
+ class="tool-call-copy-btn"
+ title="Copy input to clipboard"
+ @click=${(e: Event) => {
+ e.stopPropagation(); // Don't toggle expansion when clicking copy
+ const button = e.target as HTMLElement;
+ copyToClipboard(toolCall.input || displayInput, button);
+ }}
+ >
+ Copy
+ </button>
+ </div>
+ <div class="tool-call-section-content">
+ <pre class="tool-call-input">${formattedInput}</pre>
+ </div>
+ </div>
+
+ ${toolResponse && toolResponse.tool_result
+ ? html`
+ <div class="tool-call-section">
+ <div class="tool-call-section-label">
+ Result:
+ <button
+ class="tool-call-copy-btn"
+ title="Copy result to clipboard"
+ @click=${(e: Event) => {
+ e.stopPropagation(); // Don't toggle expansion when clicking copy
+ const button = e.target as HTMLElement;
+ copyToClipboard(toolResponse.tool_result || "", button);
+ }}
+ >
+ Copy
+ </button>
+ </div>
+ <div class="tool-call-section-content">
+ <div class="tool-call-result">
+ ${toolResponse.tool_result.includes("\n")
+ ? html`<pre><code>${toolResponse.tool_result}</code></pre>`
+ : toolResponse.tool_result}
+ </div>
+ </div>
+ </div>
+ `
+ : ""}
+ </div>
+ `;
+
+ render(template, container);
+ };
+
+ // Initial render
+ renderComponent();
+
+ return container;
+}
+
+/**
+ * Update a tool call card with response data
+ * @param toolCard The tool card element to update
+ * @param toolMessage The tool response message
+ */
+export function updateToolCallCard(
+ toolCard: HTMLElement,
+ toolMessage: TimelineMessage
+): void {
+ if (!toolCard) return;
+
+ // Find the original tool call data to reconstruct the card
+ const toolName = toolCard.querySelector(".tool-call-name")?.textContent || "";
+ const inputPreview =
+ toolCard.querySelector(".tool-call-input-preview")?.textContent || "";
+
+ // Extract the original input from the expanded view
+ let originalInput = "";
+ const inputEl = toolCard.querySelector(".tool-call-input");
+ if (inputEl) {
+ originalInput = inputEl.textContent || "";
+ }
+
+ // Create a minimal ToolCall object from the existing data
+ const toolCall: Partial<ToolCall> = {
+ name: toolName,
+ // Try to reconstruct the original input if possible
+ input: originalInput,
+ };
+
+ // Replace the existing card with a new one
+ const newCard = createToolCallCard(
+ toolCall as ToolCall,
+ toolMessage,
+ toolCard.id
+ );
+
+ // Preserve the collapse state
+ if (!toolCard.classList.contains("collapsed")) {
+ newCard.classList.remove("collapsed");
+ }
+
+ // Replace the old card with the new one
+ if (toolCard.parentNode) {
+ toolCard.parentNode.replaceChild(newCard, toolCard);
+ }
+}
diff --git a/loop/webui/src/timeline/types.ts b/loop/webui/src/timeline/types.ts
new file mode 100644
index 0000000..81d47d0
--- /dev/null
+++ b/loop/webui/src/timeline/types.ts
@@ -0,0 +1,49 @@
+/**
+ * Interface for a Git commit
+ */
+export interface GitCommit {
+ hash: string; // Full commit hash
+ subject: string; // Commit subject line
+ body: string; // Full commit message body
+ pushed_branch?: string; // If set, this commit was pushed to this branch
+}
+
+/**
+ * Interface for a tool call
+ */
+export interface ToolCall {
+ name: string;
+ args?: string;
+ result?: string;
+ input?: string; // Input property for TypeScript compatibility
+ tool_call_id?: string;
+}
+
+/**
+ * Interface for a timeline message
+ */
+export interface TimelineMessage {
+ type: string;
+ content?: string;
+ timestamp?: string | number | Date;
+ elapsed?: number;
+ turnDuration?: number; // Turn duration field
+ end_of_turn?: boolean;
+ conversation_id?: string;
+ parent_conversation_id?: string;
+ tool_calls?: ToolCall[];
+ tool_name?: string;
+ tool_error?: boolean;
+ tool_call_id?: string;
+ commits?: GitCommit[]; // For commit messages
+ input?: string; // Input property
+ tool_result?: string; // Tool result property
+ toolResponses?: any[]; // Tool responses array
+ usage?: {
+ input_tokens?: number;
+ output_tokens?: number;
+ cache_read_input_tokens?: number;
+ cache_creation_input_tokens?: number;
+ cost_usd?: number;
+ };
+}
diff --git a/loop/webui/src/timeline/utils.ts b/loop/webui/src/timeline/utils.ts
new file mode 100644
index 0000000..ff505f9
--- /dev/null
+++ b/loop/webui/src/timeline/utils.ts
@@ -0,0 +1,50 @@
+/**
+ * Escapes HTML special characters in a string
+ */
+export function escapeHTML(str: string): string {
+ return str
+ .replace(/&/g, "&")
+ .replace(/</g, "<")
+ .replace(/>/g, ">")
+ .replace(/"/g, """)
+ .replace(/'/g, "'");
+}
+
+/**
+ * Formats a number with locale-specific formatting
+ */
+export function formatNumber(
+ num: number | null | undefined,
+ defaultValue: string = "0",
+): string {
+ if (num === undefined || num === null) return defaultValue;
+ try {
+ return num.toLocaleString();
+ } catch (e) {
+ return String(num);
+ }
+}
+
+/**
+ * Generates a consistent color based on an ID string
+ */
+export function generateColorFromId(id: string | null | undefined): string {
+ if (!id) return "#7c7c7c"; // Default color for null/undefined
+
+ // Generate a hash from the ID
+ let hash = 0;
+ for (let i = 0; i < id.length; i++) {
+ hash = id.charCodeAt(i) + ((hash << 5) - hash);
+ }
+
+ // Convert hash to a hex color
+ let color = "#";
+ for (let i = 0; i < 3; i++) {
+ // Generate more muted colors by using only part of the range
+ // and adding a base value to avoid very dark colors
+ const value = ((hash >> (i * 8)) & 0xff);
+ const scaledValue = Math.floor(100 + (value * 100) / 255); // Range 100-200 for more muted colors
+ color += scaledValue.toString(16).padStart(2, "0");
+ }
+ return color;
+}
diff --git a/loop/webui/src/vega-types.d.ts b/loop/webui/src/vega-types.d.ts
new file mode 100644
index 0000000..97a4655
--- /dev/null
+++ b/loop/webui/src/vega-types.d.ts
@@ -0,0 +1,34 @@
+// Type definitions for Vega-Lite and related modules
+declare module "fast-json-patch/index.mjs";
+
+// Add any interface augmentations for TimelineMessage and ToolCall
+interface ToolCall {
+ name: string;
+ args?: string;
+ result?: string;
+ input?: string; // Add missing property
+}
+
+interface TimelineMessage {
+ type: string;
+ content?: string;
+ timestamp?: string | number | Date;
+ elapsed?: number;
+ end_of_turn?: boolean;
+ conversation_id?: string;
+ parent_conversation_id?: string;
+ tool_calls?: ToolCall[];
+ tool_name?: string;
+ tool_error?: boolean;
+ tool_result?: string;
+ input?: string;
+ start_time?: string | number | Date; // Add start time
+ end_time?: string | number | Date; // Add end time
+ usage?: {
+ input_tokens?: number;
+ output_tokens?: number;
+ cache_read_input_tokens?: number;
+ cache_creation_input_tokens?: number;
+ cost_usd?: number;
+ };
+}
diff --git a/loop/webui/tailwind.config.js b/loop/webui/tailwind.config.js
new file mode 100644
index 0000000..91d9b4b
--- /dev/null
+++ b/loop/webui/tailwind.config.js
@@ -0,0 +1,10 @@
+/** @type {import('tailwindcss').Config} */
+module.exports = {
+ content: [
+ "./src/**/*.{html,js,ts}",
+ ],
+ theme: {
+ extend: {},
+ },
+ plugins: [],
+};
diff --git a/loop/webui/tsconfig.json b/loop/webui/tsconfig.json
new file mode 100644
index 0000000..810eb41
--- /dev/null
+++ b/loop/webui/tsconfig.json
@@ -0,0 +1,17 @@
+{
+ "compilerOptions": {
+ "target": "ES2020",
+ "module": "ESNext",
+ "moduleResolution": "node",
+ "esModuleInterop": true,
+ "strict": false,
+ "sourceMap": true,
+ "outDir": "./dist",
+ "declaration": true,
+ "lib": ["DOM", "ES2020"],
+ "skipLibCheck": true,
+ "noImplicitAny": false
+ },
+ "include": ["src/**/*"],
+ "exclude": ["node_modules", "dist"]
+}
diff --git a/skabandclient/skabandclient.go b/skabandclient/skabandclient.go
new file mode 100644
index 0000000..1f1e92b
--- /dev/null
+++ b/skabandclient/skabandclient.go
@@ -0,0 +1,255 @@
+package skabandclient
+
+import (
+ "bufio"
+ "context"
+ "crypto/ed25519"
+ crand "crypto/rand"
+ "crypto/tls"
+ "crypto/x509"
+ "encoding/hex"
+ "encoding/json"
+ "encoding/pem"
+ "errors"
+ "fmt"
+ "io"
+ "log/slog"
+ "net"
+ "net/http"
+ "net/url"
+ "os"
+ "path/filepath"
+ "strings"
+ "sync/atomic"
+ "time"
+
+ "golang.org/x/net/http2"
+)
+
+// DialAndServeLoop is a redial loop around DialAndServe.
+func DialAndServeLoop(ctx context.Context, skabandAddr, sessionID, clientPubKey string, srv http.Handler, connectFn func(connected bool)) {
+ if _, err := os.Stat("/.dockerenv"); err == nil { // inDocker
+ if addr, err := LocalhostToDockerInternal(skabandAddr); err == nil {
+ skabandAddr = addr
+ }
+ }
+
+ var skabandConnected atomic.Bool
+ skabandHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if r.URL.Path == "/skabandinit" {
+ b, err := io.ReadAll(r.Body)
+ if err != nil {
+ fmt.Printf("skabandinit failed: %v\n", err)
+ return
+ }
+ m := map[string]string{}
+ if err := json.Unmarshal(b, &m); err != nil {
+ fmt.Printf("skabandinit failed: %v\n", err)
+ return
+ }
+ skabandConnected.Store(true)
+ if connectFn != nil {
+ connectFn(true)
+ }
+ return
+ }
+ srv.ServeHTTP(w, r)
+ })
+
+ var lastErrLog time.Time
+ for {
+ if err := DialAndServe(ctx, skabandAddr, sessionID, clientPubKey, skabandHandler); err != nil {
+ // NOTE: *just* backoff the logging. Backing off dialing
+ // is bad UX. Doing so saves negligble CPU and doing so
+ // without huring UX requires interrupting the backoff with
+ // wake-from-sleep and network-up events from the OS,
+ // which are a pain to plumb.
+ if time.Since(lastErrLog) > 1*time.Minute {
+ slog.DebugContext(ctx, "skaband connection failed", "err", err)
+ lastErrLog = time.Now()
+ }
+ }
+ if skabandConnected.CompareAndSwap(true, false) {
+ if connectFn != nil {
+ connectFn(false)
+ }
+ }
+ time.Sleep(200 * time.Millisecond)
+ }
+}
+
+func DialAndServe(ctx context.Context, hostURL, sessionID, clientPubKey string, h http.Handler) (err error) {
+ // Connect to the server.
+ var conn net.Conn
+ if strings.HasPrefix(hostURL, "https://") {
+ u, err := url.Parse(hostURL)
+ if err != nil {
+ return err
+ }
+ port := u.Port()
+ if port == "" {
+ port = "443"
+ }
+ dialer := tls.Dialer{}
+ conn, err = dialer.DialContext(ctx, "tcp4", u.Host+":"+port)
+ } else if strings.HasPrefix(hostURL, "http://") {
+ dialer := net.Dialer{}
+ conn, err = dialer.DialContext(ctx, "tcp4", strings.TrimPrefix(hostURL, "http://"))
+ } else {
+ return fmt.Errorf("skabandclient.Dial: bad url, needs to be http or https: %s", hostURL)
+ }
+ if err != nil {
+ return fmt.Errorf("skabandclient: %w", err)
+ }
+ defer conn.Close()
+
+ // "Upgrade" our connection, like a WebSocket does.
+ req, err := http.NewRequest("POST", hostURL+"/attach", nil)
+ if err != nil {
+ return fmt.Errorf("skabandclient.Dial: /attach: %w", err)
+ }
+ req.Header.Set("Connection", "Upgrade")
+ req.Header.Set("Upgrade", "ska")
+ req.Header.Set("Session-ID", sessionID)
+ req.Header.Set("Public-Key", clientPubKey)
+
+ if err := req.Write(conn); err != nil {
+ return fmt.Errorf("skabandclient.Dial: write upgrade request: %w", err)
+ }
+ reader := bufio.NewReader(conn)
+ resp, err := http.ReadResponse(reader, req)
+ if err != nil {
+ b, _ := io.ReadAll(resp.Body)
+ return fmt.Errorf("skabandclient.Dial: read upgrade response: %w: %s", err, b)
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode != http.StatusSwitchingProtocols {
+ b, _ := io.ReadAll(resp.Body)
+ return fmt.Errorf("skabandclient.Dial: unexpected status code: %d: %s", resp.StatusCode, b)
+ }
+ if !strings.Contains(resp.Header.Get("Upgrade"), "ska") {
+ return errors.New("skabandclient.Dial: server did not upgrade to ska protocol")
+ }
+ if buf := reader.Buffered(); buf > 0 {
+ peek, _ := reader.Peek(buf)
+ return fmt.Errorf("skabandclient.Dial: buffered read after upgrade response: %d: %q", buf, string(peek))
+ }
+
+ // Send Magic.
+ const magic = "skaband\n"
+ if _, err := conn.Write([]byte(magic)); err != nil {
+ return fmt.Errorf("skabandclient.Dial: failed to send upgrade init message: %w", err)
+ }
+
+ // We have a TCP connection to the server and have been through the upgrade dance.
+ // Now we can run an HTTP server over that connection ("inverting" the HTTP flow).
+ server := &http2.Server{}
+ server.ServeConn(conn, &http2.ServeConnOpts{
+ Handler: h,
+ })
+
+ return nil
+}
+
+func decodePrivKey(privData []byte) (ed25519.PrivateKey, error) {
+ privBlock, _ := pem.Decode(privData)
+ if privBlock == nil || privBlock.Type != "PRIVATE KEY" {
+ return nil, fmt.Errorf("no valid private key block found")
+ }
+ parsedPriv, err := x509.ParsePKCS8PrivateKey(privBlock.Bytes)
+ if err != nil {
+ return nil, err
+ }
+ return parsedPriv.(ed25519.PrivateKey), nil
+}
+
+func encodePrivateKey(privKey ed25519.PrivateKey) ([]byte, error) {
+ privBytes, err := x509.MarshalPKCS8PrivateKey(privKey)
+ if err != nil {
+ return nil, err
+ }
+ return pem.EncodeToMemory(&pem.Block{Type: "PRIVATE KEY", Bytes: privBytes}), nil
+}
+
+func LoadOrCreatePrivateKey(path string) (ed25519.PrivateKey, error) {
+ privData, err := os.ReadFile(path)
+ if os.IsNotExist(err) {
+ _, privKey, err := ed25519.GenerateKey(crand.Reader)
+ if err != nil {
+ return nil, err
+ }
+ b, err := encodePrivateKey(privKey)
+ if err := os.WriteFile(path, b, 0o600); err != nil {
+ return nil, err
+ }
+ return privKey, nil
+ } else if err != nil {
+ return nil, fmt.Errorf("read key failed: %w", err)
+ }
+ key, err := decodePrivKey(privData)
+ if err != nil {
+ return nil, fmt.Errorf("%s: %w", path, err)
+ }
+ return key, nil
+}
+
+func Login(stdout io.Writer, privKey ed25519.PrivateKey, skabandAddr, sessionID string) (pubKey, apiURL, apiKey string, err error) {
+ sig := ed25519.Sign(privKey, []byte(sessionID))
+
+ req, err := http.NewRequest("POST", skabandAddr+"/authclient", nil)
+ if err != nil {
+ return "", "", "", err
+ }
+ pubKey = hex.EncodeToString(privKey.Public().(ed25519.PublicKey))
+ req.Header.Set("Public-Key", pubKey)
+ req.Header.Set("Session-ID", sessionID)
+ req.Header.Set("Session-ID-Sig", hex.EncodeToString(sig))
+ resp, err := http.DefaultClient.Do(req)
+ if err != nil {
+ return "", "", "", fmt.Errorf("skaband login: %w", err)
+ }
+ apiURL = resp.Header.Get("X-API-URL")
+ apiKey = resp.Header.Get("X-API-Key")
+ defer resp.Body.Close()
+ _, err = io.Copy(stdout, resp.Body)
+ if err != nil {
+ return "", "", "", fmt.Errorf("skaband login: %w", err)
+ }
+ if resp.StatusCode != 200 {
+ return "", "", "", fmt.Errorf("skaband login failed: %d", resp.StatusCode)
+ }
+ if apiURL == "" {
+ return "", "", "", fmt.Errorf("skaband returned no api url")
+ }
+ if apiKey == "" {
+ return "", "", "", fmt.Errorf("skaband returned no api key")
+ }
+ return pubKey, apiURL, apiKey, nil
+}
+
+func DefaultKeyPath() string {
+ homeDir, err := os.UserHomeDir()
+ if err != nil {
+ panic(err)
+ }
+ cacheDir := filepath.Join(homeDir, ".cache", "sketch")
+ os.MkdirAll(cacheDir, 0o777)
+ return filepath.Join(cacheDir, "sketch.ed25519")
+}
+
+func LocalhostToDockerInternal(skabandURL string) (string, error) {
+ u, err := url.Parse(skabandURL)
+ if err != nil {
+ return "", fmt.Errorf("localhostToDockerInternal: %w", err)
+ }
+ switch u.Hostname() {
+ case "localhost", "127.0.0.1":
+ host := "host.docker.internal"
+ if port := u.Port(); port != "" {
+ host += ":" + port
+ }
+ u.Host = host
+ return u.String(), nil
+ }
+ return skabandURL, nil
+}
diff --git a/skribe/skribe.go b/skribe/skribe.go
new file mode 100644
index 0000000..d113bf0
--- /dev/null
+++ b/skribe/skribe.go
@@ -0,0 +1,100 @@
+// Package skribe defines sketch-wide logging types and functions.
+//
+// Logging happens via slog.
+package skribe
+
+import (
+ "context"
+ "io"
+ "log/slog"
+ "slices"
+ "strings"
+)
+
+type attrsKey struct{}
+
+func Redact(arr []string) []string {
+ ret := []string{}
+ for _, s := range arr {
+ if strings.HasPrefix(s, "ANTHROPIC_API_KEY=") {
+ ret = append(ret, "ANTHROPIC_API_KEY=[REDACTED]")
+ } else {
+ ret = append(ret, s)
+ }
+ }
+ return ret
+}
+
+func ContextWithAttr(ctx context.Context, add ...slog.Attr) context.Context {
+ attrs := slices.Clone(Attrs(ctx))
+ attrs = append(attrs, add...)
+ return context.WithValue(ctx, attrsKey{}, attrs)
+}
+
+func Attrs(ctx context.Context) []slog.Attr {
+ attrs, _ := ctx.Value(attrsKey{}).([]slog.Attr)
+ return attrs
+}
+
+func AttrsWrap(h slog.Handler) slog.Handler {
+ return &augmentHandler{Handler: h}
+}
+
+type augmentHandler struct {
+ slog.Handler
+}
+
+func (h *augmentHandler) Handle(ctx context.Context, r slog.Record) error {
+ attrs := Attrs(ctx)
+ r.AddAttrs(attrs...)
+ return h.Handler.Handle(ctx, r)
+}
+
+type multiHandler struct {
+ AllHandler slog.Handler
+}
+
+// Enabled implements slog.Handler. Ignores slog.Level - if there's a logger, this returns true.
+func (mh *multiHandler) Enabled(ctx context.Context, l slog.Level) bool {
+ _, ok := ctx.Value(skribeCtxHandlerKey).(slog.Handler)
+ return ok
+}
+
+// WithAttrs implements slog.Handler.
+func (mh *multiHandler) WithAttrs(attrs []slog.Attr) slog.Handler {
+ panic("unimplemented")
+}
+
+// WithGroup implements slog.Handler.
+func (mh *multiHandler) WithGroup(name string) slog.Handler {
+ panic("unimplemented")
+}
+
+func NewMultiHandler() *multiHandler {
+ return &multiHandler{}
+}
+
+type scribeCtxKeyType string
+
+const skribeCtxHandlerKey scribeCtxKeyType = "skribe-handlerKey"
+
+func (mh *multiHandler) NewSlogHandlerCtx(ctx context.Context, logFile io.Writer) context.Context {
+ h := slog.NewJSONHandler(logFile, &slog.HandlerOptions{Level: slog.LevelDebug})
+ w := AttrsWrap(h)
+ return context.WithValue(ctx, skribeCtxHandlerKey, w)
+}
+
+func (mh *multiHandler) Handle(ctx context.Context, r slog.Record) error {
+ if mh.AllHandler != nil {
+ if err := mh.AllHandler.Handle(ctx, r); err != nil {
+ return err
+ }
+ }
+ attrs := Attrs(ctx)
+ r.AddAttrs(attrs...)
+ handler, ok := ctx.Value(skribeCtxHandlerKey).(slog.Handler)
+ if !ok {
+ panic("no skribeCtxHandlerKey value in ctx")
+ }
+ return handler.Handle(ctx, r)
+}
diff --git a/termui/termui.go b/termui/termui.go
new file mode 100644
index 0000000..d1aaf73
--- /dev/null
+++ b/termui/termui.go
@@ -0,0 +1,391 @@
+package termui
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "os/exec"
+ "os/signal"
+ "strings"
+ "sync"
+ "syscall"
+ "text/template"
+ "time"
+
+ "github.com/fatih/color"
+ "golang.org/x/term"
+ "sketch.dev/loop"
+)
+
+var (
+ // toolUseTemplTxt defines how tool invocations appear in the terminal UI.
+ // Keep this template in sync with the tools defined in claudetool package
+ // and registered in loop/agent.go.
+ // Add formatting for new tools as they are created.
+ // TODO: should this be part of tool definition to make it harder to forget to set up?
+ toolUseTemplTxt = `{{if .msg.ToolError}}š {{end -}}
+{{if eq .msg.ToolName "think" -}}
+ š§ {{.input.thoughts -}}
+{{else if eq .msg.ToolName "keyword_search" -}}
+ š {{ .input.query}}: {{.input.keywords -}}
+{{else if eq .msg.ToolName "bash" -}}
+ š„ļø {{ .input.command -}}
+{{else if eq .msg.ToolName "patch" -}}
+ āØļø {{.input.path -}}
+{{else if eq .msg.ToolName "done" -}}
+{{/* nothing to show here, the agent will write more in its next message */}}
+{{else if eq .msg.ToolName "title" -}}
+ š·ļø {{.input.title -}}
+{{else if eq .msg.ToolName "str_replace_editor" -}}
+ āļø {{.input.file_path -}}
+{{else if eq .msg.ToolName "codereview" -}}
+ š Running automated code review, may be slow
+{{else -}}
+ š ļø {{ .msg.ToolName}}: {{.msg.ToolInput -}}
+{{end -}}
+`
+ toolUseTmpl = template.Must(template.New("tool_use").Parse(toolUseTemplTxt))
+)
+
+type termUI struct {
+ stdin *os.File
+ stdout *os.File
+ stderr *os.File
+
+ agent loop.CodingAgent
+ httpURL string
+
+ trm *term.Terminal
+
+ // the chatMsgCh channel is for "conversation" messages, like responses to user input
+ // from the LLM, or output from executing slash-commands issued by the user.
+ chatMsgCh chan chatMessage
+
+ // the log channel is for secondary messages, like logging, errors, and debug information
+ // from local and remove subproceses.
+ termLogCh chan string
+
+ // protects following
+ mu sync.Mutex
+ oldState *term.State
+ // Tracks branches that were pushed during the session
+ pushedBranches map[string]struct{}
+}
+
+type chatMessage struct {
+ idx int
+ sender string
+ content string
+ thinking bool
+}
+
+func New(agent loop.CodingAgent, httpURL string) *termUI {
+ return &termUI{
+ agent: agent,
+ stdin: os.Stdin,
+ stdout: os.Stdout,
+ stderr: os.Stderr,
+ httpURL: httpURL,
+ chatMsgCh: make(chan chatMessage, 1),
+ termLogCh: make(chan string, 1),
+ pushedBranches: make(map[string]struct{}),
+ }
+}
+
+func (ui *termUI) Run(ctx context.Context) error {
+ fmt.Println(`šØ Welcome to Sketch`)
+ fmt.Println(`š ` + ui.httpURL + `/`)
+ fmt.Println(`š Initial Commit: ` + ui.agent.InitialCommit())
+ fmt.Println(`š¬ type 'help' for help`)
+ fmt.Println()
+
+ // Start up the main terminal UI:
+ if err := ui.initializeTerminalUI(ctx); err != nil {
+ return err
+ }
+ go ui.receiveMessagesLoop(ctx)
+ if err := ui.inputLoop(ctx); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (ui *termUI) LogToolUse(resp loop.AgentMessage) {
+ inputData := map[string]any{}
+ if err := json.Unmarshal([]byte(resp.ToolInput), &inputData); err != nil {
+ ui.AppendSystemMessage("error: %v", err)
+ return
+ }
+ buf := bytes.Buffer{}
+ if err := toolUseTmpl.Execute(&buf, map[string]any{"msg": resp, "input": inputData, "output": resp.ToolResult}); err != nil {
+ ui.AppendSystemMessage("error: %v", err)
+ return
+ }
+ ui.AppendSystemMessage("%s\n", buf.String())
+}
+
+func (ui *termUI) receiveMessagesLoop(ctx context.Context) {
+ bold := color.New(color.Bold).SprintFunc()
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ default:
+ }
+ resp := ui.agent.WaitForMessage(ctx)
+ // Typically a user message will start the thinking and a (top-level
+ // conversation) end of turn will stop it.
+ thinking := !(resp.EndOfTurn && resp.ParentConversationID == nil)
+
+ switch resp.Type {
+ case loop.AgentMessageType:
+ ui.AppendChatMessage(chatMessage{thinking: thinking, idx: resp.Idx, sender: "š“ļø", content: resp.Content})
+ case loop.ToolUseMessageType:
+ ui.LogToolUse(resp)
+ case loop.ErrorMessageType:
+ ui.AppendSystemMessage("ā %s", resp.Content)
+ case loop.BudgetMessageType:
+ ui.AppendSystemMessage("š° %s", resp.Content)
+ case loop.AutoMessageType:
+ ui.AppendSystemMessage("š§ %s", resp.Content)
+ case loop.UserMessageType:
+ ui.AppendChatMessage(chatMessage{thinking: thinking, idx: resp.Idx, sender: "š¤ļø", content: resp.Content})
+ case loop.CommitMessageType:
+ // Display each commit in the terminal
+ for _, commit := range resp.Commits {
+ if commit.PushedBranch != "" {
+ ui.AppendSystemMessage("š new commit: [%s] %s pushed to %s", commit.Hash[:8], commit.Subject, bold(commit.PushedBranch))
+
+ // Track the pushed branch in our map
+ ui.mu.Lock()
+ ui.pushedBranches[commit.PushedBranch] = struct{}{}
+ ui.mu.Unlock()
+ } else {
+ ui.AppendSystemMessage("š new commit: [%s] %s", commit.Hash[:8], commit.Subject)
+ }
+ }
+ default:
+ ui.AppendSystemMessage("ā Unexpected Message Type %s %v", resp.Type, resp)
+ }
+ }
+}
+
+func (ui *termUI) inputLoop(ctx context.Context) error {
+ for {
+ line, err := ui.trm.ReadLine()
+ if errors.Is(err, io.EOF) {
+ ui.AppendSystemMessage("\n")
+ line = "exit"
+ } else if err != nil {
+ return err
+ }
+
+ line = strings.TrimSpace(line)
+
+ switch line {
+ case "?", "help":
+ ui.AppendSystemMessage(`Available commands:
+- help, ? : Show this help message
+- budget : Show original budget
+- usage, cost : Show current token usage and cost
+- stop, cancel, abort : Cancel the current operation
+- exit, quit, q : Exit the application
+- ! <command> : Execute shell command (e.g. !ls -la)`)
+ case "budget":
+ originalBudget := ui.agent.OriginalBudget()
+ ui.AppendSystemMessage("š° Budget summary:")
+ if originalBudget.MaxResponses > 0 {
+ ui.AppendSystemMessage("- Max responses: %d", originalBudget.MaxResponses)
+ }
+ if originalBudget.MaxWallTime > 0 {
+ ui.AppendSystemMessage("- Max wall time: %v", originalBudget.MaxWallTime)
+ }
+ ui.AppendSystemMessage("- Max total cost: %0.2f", originalBudget.MaxDollars)
+ case "usage", "cost":
+ totalUsage := ui.agent.TotalUsage()
+ ui.AppendSystemMessage("š° Current usage summary:")
+ ui.AppendSystemMessage("- Input tokens: %d [Cache: read=%d, creation=%d]", totalUsage.InputTokens, totalUsage.CacheReadInputTokens, totalUsage.CacheCreationInputTokens)
+ ui.AppendSystemMessage("- Output tokens: %d", totalUsage.OutputTokens)
+ ui.AppendSystemMessage("- Responses: %d", totalUsage.Responses)
+ ui.AppendSystemMessage("- Wall time: %s", totalUsage.WallTime().Round(time.Second))
+ ui.AppendSystemMessage("- Total cost: $%0.2f", totalUsage.TotalCostUSD)
+ case "bye", "exit", "q", "quit":
+ ui.trm.SetPrompt("")
+ // Display final usage stats
+ totalUsage := ui.agent.TotalUsage()
+ ui.AppendSystemMessage("š° Final usage summary:")
+ ui.AppendSystemMessage("- Input tokens: %d [Cache: read=%d, creation=%d]", totalUsage.InputTokens, totalUsage.CacheReadInputTokens, totalUsage.CacheCreationInputTokens)
+ ui.AppendSystemMessage("- Output tokens: %d", totalUsage.OutputTokens)
+ ui.AppendSystemMessage("- Responses: %d", totalUsage.Responses)
+ ui.AppendSystemMessage("- Wall time: %s", totalUsage.WallTime().Round(time.Second))
+ ui.AppendSystemMessage("- Total cost: $%0.2f", totalUsage.TotalCostUSD)
+
+ // Display pushed branches
+ ui.mu.Lock()
+ if len(ui.pushedBranches) > 0 {
+ // Convert map keys to a slice for display
+ branches := make([]string, 0, len(ui.pushedBranches))
+ for branch := range ui.pushedBranches {
+ branches = append(branches, branch)
+ }
+
+ if len(branches) == 1 {
+ ui.AppendSystemMessage("\nš Branch pushed during session: %s", branches[0])
+ ui.AppendSystemMessage("š To add those changes to your branch: git cherry-pick %s..%s", ui.agent.InitialCommit(), branches[0])
+ } else {
+ ui.AppendSystemMessage("\nš Branches pushed during session:")
+ for _, branch := range branches {
+ ui.AppendSystemMessage("- %s", branch)
+ }
+ ui.AppendSystemMessage("\nš To add all those changes to your branch:")
+ for _, branch := range branches {
+ ui.AppendSystemMessage("git cherry-pick %s..%s", ui.agent.InitialCommit(), branch)
+ }
+ }
+ }
+ ui.mu.Unlock()
+
+ ui.AppendSystemMessage("\nš Goodbye!")
+ return nil
+ case "stop", "cancel", "abort":
+ ui.agent.CancelInnerLoop(fmt.Errorf("user canceled the operation"))
+ case "panic":
+ panic("user forced a panic")
+ default:
+ if line == "" {
+ continue
+ }
+ if strings.HasPrefix(line, "!") {
+ // Execute as shell command
+ line = line[1:] // remove the '!' prefix
+ sendToLLM := strings.HasPrefix(line, "!")
+ if sendToLLM {
+ line = line[1:] // remove the second '!'
+ }
+
+ // Create a cmd and run it
+ // TODO: ui.trm contains a mutex inside its write call.
+ // It is potentially safe to attach ui.trm directly to this
+ // cmd object's Stdout/Stderr and stream the output.
+ // That would make a big difference for, e.g. wget.
+ cmd := exec.Command("bash", "-c", line)
+ out, err := cmd.CombinedOutput()
+ ui.AppendSystemMessage("%s", out)
+ if err != nil {
+ ui.AppendSystemMessage("ā Command error: %v", err)
+ }
+ if sendToLLM {
+ // Send the command and its output to the agent
+ message := fmt.Sprintf("I ran the command: `%s`\nOutput:\n```\n%s```", line, out)
+ if err != nil {
+ message += fmt.Sprintf("\n\nError: %v", err)
+ }
+ ui.agent.UserMessage(ctx, message)
+ }
+ continue
+ }
+
+ // Send it to the LLM
+ // chatMsg := chatMessage{sender: "you", content: line}
+ // ui.sendChatMessage(chatMsg)
+ ui.agent.UserMessage(ctx, line)
+ }
+ }
+}
+
+func (ui *termUI) updatePrompt(thinking bool) {
+ var t string
+
+ if thinking {
+ // Emoji don't seem to work here? Messes up my terminal.
+ t = "*"
+ }
+ p := fmt.Sprintf("%s/ %s($%0.2f/%0.2f)%s> ",
+ ui.httpURL, ui.agent.WorkingDir(), ui.agent.TotalUsage().TotalCostUSD, ui.agent.OriginalBudget().MaxDollars, t)
+ ui.trm.SetPrompt(p)
+}
+
+func (ui *termUI) initializeTerminalUI(ctx context.Context) error {
+ ui.mu.Lock()
+ defer ui.mu.Unlock()
+
+ if !term.IsTerminal(int(ui.stdin.Fd())) {
+ return fmt.Errorf("this command requires terminal I/O")
+ }
+
+ oldState, err := term.MakeRaw(int(ui.stdin.Fd()))
+ if err != nil {
+ return err
+ }
+ ui.oldState = oldState
+ ui.trm = term.NewTerminal(ui.stdin, "")
+ width, height, err := term.GetSize(int(ui.stdin.Fd()))
+ if err != nil {
+ return fmt.Errorf("Error getting terminal size: %v\n", err)
+ }
+ ui.trm.SetSize(width, height)
+ // Handle terminal resizes...
+ sig := make(chan os.Signal, 1)
+ signal.Notify(sig, syscall.SIGWINCH)
+ go func() {
+ for {
+ <-sig
+ newWidth, newHeight, err := term.GetSize(int(ui.stdin.Fd()))
+ if err != nil {
+ continue
+ }
+ if newWidth != width || newHeight != height {
+ width, height = newWidth, newHeight
+ ui.trm.SetSize(width, height)
+ }
+ }
+ }()
+
+ ui.updatePrompt(false)
+
+ // This is the only place where we should call fe.trm.Write:
+ go func() {
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case msg := <-ui.chatMsgCh:
+ // Sometimes claude doesn't say anything when it runs tools.
+ // No need to output anything in that case.
+ if strings.TrimSpace(msg.content) == "" {
+ break
+ }
+ s := fmt.Sprintf("%s %s\n", msg.sender, msg.content)
+ // Update prompt before writing, because otherwise it doesn't redraw the prompt.
+ ui.updatePrompt(msg.thinking)
+ ui.trm.Write([]byte(s))
+ case logLine := <-ui.termLogCh:
+ b := []byte(logLine + "\n")
+ ui.trm.Write(b)
+ }
+ }
+ }()
+
+ return nil
+}
+
+func (ui *termUI) RestoreOldState() error {
+ ui.mu.Lock()
+ defer ui.mu.Unlock()
+ return term.Restore(int(ui.stdin.Fd()), ui.oldState)
+}
+
+// AppendChatMessage is for showing responses the user's request, conversational dialog etc
+func (ui *termUI) AppendChatMessage(msg chatMessage) {
+ ui.chatMsgCh <- msg
+}
+
+// AppendSystemMessage is for debug information, errors and such that are not part of the "conversation" per se,
+// but still need to be shown to the user.
+func (ui *termUI) AppendSystemMessage(fmtString string, args ...any) {
+ ui.termLogCh <- fmt.Sprintf(fmtString, args...)
+}