blob: e126698ed9a27de4b880b106c981c9a61c9c857c [file] [log] [blame]
Sean McCullough138ec242025-06-02 22:42:06 +00001package dockerimg
2
3import (
4 "context"
5 "encoding/json"
6 "fmt"
7 "log/slog"
8 "net/http"
9 "os/exec"
10 "sync"
11 "time"
12
13 "sketch.dev/loop"
14)
15
16// skipPorts defines system ports that should not be auto-tunneled
17var skipPorts = map[string]bool{
18 "22": true, // SSH
19 "80": true, // HTTP (this is the main sketch web interface)
20 "443": true, // HTTPS
21 "25": true, // SMTP
22 "53": true, // DNS
23 "110": true, // POP3
24 "143": true, // IMAP
25 "993": true, // IMAPS
26 "995": true, // POP3S
27}
28
29// TunnelManager manages automatic SSH tunnels for container ports
30type TunnelManager struct {
31 mu sync.Mutex
32 containerURL string // HTTP URL to container (e.g., "http://localhost:8080")
33 containerSSHHost string // SSH hostname for container (e.g., "sketch-abcd-efgh")
34 activeTunnels map[string]*sshTunnel // port -> tunnel mapping
35 lastPollTime time.Time
36 maxActiveTunnels int // maximum number of concurrent tunnels allowed
37}
38
39// sshTunnel represents an active SSH tunnel
40type sshTunnel struct {
41 containerPort string
42 hostPort string
43 cmd *exec.Cmd
44 cancel context.CancelFunc
45}
46
47// NewTunnelManager creates a new tunnel manager
48func NewTunnelManager(containerURL, containerSSHHost string, maxActiveTunnels int) *TunnelManager {
49 return &TunnelManager{
50 containerURL: containerURL,
51 containerSSHHost: containerSSHHost,
52 activeTunnels: make(map[string]*sshTunnel),
53 lastPollTime: time.Now(),
54 maxActiveTunnels: maxActiveTunnels,
55 }
56}
57
58// Start begins monitoring port events and managing tunnels
59func (tm *TunnelManager) Start(ctx context.Context) {
60 go func() {
61 ticker := time.NewTicker(10 * time.Second) // Poll every 10 seconds
62 defer ticker.Stop()
63
64 for {
65 select {
66 case <-ctx.Done():
67 tm.cleanupAllTunnels()
68 return
69 case <-ticker.C:
70 tm.pollPortEvents(ctx)
71 }
72 }
73 }()
74}
75
76// pollPortEvents fetches recent port events from container and updates tunnels
77func (tm *TunnelManager) pollPortEvents(ctx context.Context) {
78 // Build URL with since parameter
79 url := fmt.Sprintf("%s/port-events?since=%s", tm.containerURL, tm.lastPollTime.Format(time.RFC3339))
80
81 req, err := http.NewRequestWithContext(ctx, "GET", url, nil)
82 if err != nil {
83 slog.DebugContext(ctx, "Failed to create port events request", "error", err)
84 return
85 }
86
87 resp, err := http.DefaultClient.Do(req)
88 if err != nil {
89 slog.DebugContext(ctx, "Failed to fetch port events", "error", err)
90 return
91 }
92 defer resp.Body.Close()
93
94 if resp.StatusCode != http.StatusOK {
95 slog.DebugContext(ctx, "Port events request failed", "status", resp.StatusCode)
96 return
97 }
98
99 var events []loop.PortEvent
100 if err := json.NewDecoder(resp.Body).Decode(&events); err != nil {
101 slog.DebugContext(ctx, "Failed to decode port events", "error", err)
102 return
103 }
104
105 // Process each event
106 for _, event := range events {
107 tm.processPortEvent(ctx, event)
108 tm.mu.Lock()
109 // Update last poll time to the latest event timestamp
110 if event.Timestamp.After(tm.lastPollTime) {
111 tm.lastPollTime = event.Timestamp
112 }
113 tm.mu.Unlock()
114 }
115
116 // Update poll time even if no events, to avoid re-fetching old events
117 if len(events) == 0 {
118 tm.lastPollTime = time.Now()
119 }
120}
121
122// processPortEvent handles a single port event
123func (tm *TunnelManager) processPortEvent(ctx context.Context, event loop.PortEvent) {
124 // Extract port number from event.Port (format: "tcp:0.0.0.0:8080")
125 containerPort := tm.extractPortNumber(event.Port)
126 if containerPort == "" {
127 slog.DebugContext(ctx, "Could not extract port number", "port", event.Port)
128 return
129 }
130
131 // Skip common system ports that we don't want to tunnel
132 if tm.shouldSkipPort(containerPort) {
Sean McCullough138ec242025-06-02 22:42:06 +0000133 return
134 }
135
136 switch event.Type {
137 case "opened":
138 tm.createTunnel(ctx, containerPort)
139 case "closed":
140 tm.removeTunnel(ctx, containerPort)
141 default:
142 slog.DebugContext(ctx, "Unknown port event type", "type", event.Type)
143 }
144}
145
146// extractPortNumber extracts port number from ss format like "tcp:0.0.0.0:8080"
147func (tm *TunnelManager) extractPortNumber(portStr string) string {
148 // Expected format: "tcp:0.0.0.0:8080" or "tcp:[::]:8080"
149 // Find the last colon and extract the port
150 for i := len(portStr) - 1; i >= 0; i-- {
151 if portStr[i] == ':' {
152 return portStr[i+1:]
153 }
154 }
155 return ""
156}
157
158// shouldSkipPort returns true for ports we don't want to auto-tunnel
159func (tm *TunnelManager) shouldSkipPort(port string) bool {
160 return skipPorts[port]
161}
162
163// createTunnel creates an SSH tunnel for the given container port
164func (tm *TunnelManager) createTunnel(ctx context.Context, containerPort string) {
165 tm.mu.Lock()
166 // Check if tunnel already exists
167 if _, exists := tm.activeTunnels[containerPort]; exists {
168 tm.mu.Unlock()
169 slog.DebugContext(ctx, "Tunnel already exists for port", "port", containerPort)
170 return
171 }
172
173 // Check if we've reached the maximum number of active tunnels
174 if len(tm.activeTunnels) >= tm.maxActiveTunnels {
175 tm.mu.Unlock()
176 slog.WarnContext(ctx, "Maximum active tunnels reached, skipping port", "port", containerPort, "max", tm.maxActiveTunnels, "active", len(tm.activeTunnels))
177 return
178 }
179 tm.mu.Unlock()
180
181 // Use the same port on host as container for simplicity
182 hostPort := containerPort
183
184 // Create SSH tunnel command: ssh -L hostPort:127.0.0.1:containerPort containerSSHHost
185 tunnelCtx, cancel := context.WithCancel(ctx)
186 cmd := exec.CommandContext(tunnelCtx, "ssh",
187 "-L", fmt.Sprintf("%s:127.0.0.1:%s", hostPort, containerPort),
188 "-N", // Don't execute remote commands
189 "-T", // Don't allocate TTY
190 tm.containerSSHHost,
191 )
192
193 // Start the tunnel
194 if err := cmd.Start(); err != nil {
195 slog.ErrorContext(ctx, "Failed to start SSH tunnel", "port", containerPort, "error", err)
196 cancel()
197 return
198 }
199
200 // Store tunnel info
201 tunnel := &sshTunnel{
202 containerPort: containerPort,
203 hostPort: hostPort,
204 cmd: cmd,
205 cancel: cancel,
206 }
207 tm.mu.Lock()
208 tm.activeTunnels[containerPort] = tunnel
209 tm.mu.Unlock()
210
211 slog.InfoContext(ctx, "Created SSH tunnel", "container_port", containerPort, "host_port", hostPort)
212
213 // Monitor tunnel in background
214 go func() {
215 err := cmd.Wait()
216 tm.mu.Lock()
217 delete(tm.activeTunnels, containerPort)
218 tm.mu.Unlock()
219 if err != nil && tunnelCtx.Err() == nil {
220 slog.ErrorContext(ctx, "SSH tunnel exited with error", "port", containerPort, "error", err)
221 }
222 }()
223}
224
225// removeTunnel removes an SSH tunnel for the given container port
226func (tm *TunnelManager) removeTunnel(ctx context.Context, containerPort string) {
227 tunnel, exists := tm.activeTunnels[containerPort]
228 if !exists {
229 slog.DebugContext(ctx, "No tunnel to remove for port", "port", containerPort)
230 return
231 }
232
233 // Cancel the tunnel context and clean up
234 tunnel.cancel()
235 delete(tm.activeTunnels, containerPort)
236
237 slog.InfoContext(ctx, "Removed SSH tunnel", "container_port", containerPort, "host_port", tunnel.hostPort)
238}
239
240// cleanupAllTunnels stops all active tunnels
241func (tm *TunnelManager) cleanupAllTunnels() {
242 tm.mu.Lock()
243 defer tm.mu.Unlock()
244
245 for port, tunnel := range tm.activeTunnels {
246 tunnel.cancel()
247 delete(tm.activeTunnels, port)
248 }
249}
250
251// GetActiveTunnels returns a list of currently active tunnels
252func (tm *TunnelManager) GetActiveTunnels() map[string]string {
253 tm.mu.Lock()
254 defer tm.mu.Unlock()
255
256 result := make(map[string]string)
257 for containerPort, tunnel := range tm.activeTunnels {
258 result[containerPort] = tunnel.hostPort
259 }
260 return result
261}