| Sean McCullough | 138ec24 | 2025-06-02 22:42:06 +0000 | [diff] [blame] | 1 | package dockerimg |
| 2 | |
| 3 | import ( |
| 4 | "context" |
| 5 | "encoding/json" |
| 6 | "fmt" |
| 7 | "log/slog" |
| 8 | "net/http" |
| 9 | "os/exec" |
| 10 | "sync" |
| 11 | "time" |
| 12 | |
| 13 | "sketch.dev/loop" |
| 14 | ) |
| 15 | |
| 16 | // skipPorts defines system ports that should not be auto-tunneled |
| 17 | var skipPorts = map[string]bool{ |
| 18 | "22": true, // SSH |
| 19 | "80": true, // HTTP (this is the main sketch web interface) |
| 20 | "443": true, // HTTPS |
| 21 | "25": true, // SMTP |
| 22 | "53": true, // DNS |
| 23 | "110": true, // POP3 |
| 24 | "143": true, // IMAP |
| 25 | "993": true, // IMAPS |
| 26 | "995": true, // POP3S |
| 27 | } |
| 28 | |
| 29 | // TunnelManager manages automatic SSH tunnels for container ports |
| 30 | type TunnelManager struct { |
| 31 | mu sync.Mutex |
| 32 | containerURL string // HTTP URL to container (e.g., "http://localhost:8080") |
| 33 | containerSSHHost string // SSH hostname for container (e.g., "sketch-abcd-efgh") |
| 34 | activeTunnels map[string]*sshTunnel // port -> tunnel mapping |
| 35 | lastPollTime time.Time |
| 36 | maxActiveTunnels int // maximum number of concurrent tunnels allowed |
| 37 | } |
| 38 | |
| 39 | // sshTunnel represents an active SSH tunnel |
| 40 | type sshTunnel struct { |
| 41 | containerPort string |
| 42 | hostPort string |
| 43 | cmd *exec.Cmd |
| 44 | cancel context.CancelFunc |
| 45 | } |
| 46 | |
| 47 | // NewTunnelManager creates a new tunnel manager |
| 48 | func NewTunnelManager(containerURL, containerSSHHost string, maxActiveTunnels int) *TunnelManager { |
| 49 | return &TunnelManager{ |
| 50 | containerURL: containerURL, |
| 51 | containerSSHHost: containerSSHHost, |
| 52 | activeTunnels: make(map[string]*sshTunnel), |
| 53 | lastPollTime: time.Now(), |
| 54 | maxActiveTunnels: maxActiveTunnels, |
| 55 | } |
| 56 | } |
| 57 | |
| 58 | // Start begins monitoring port events and managing tunnels |
| 59 | func (tm *TunnelManager) Start(ctx context.Context) { |
| 60 | go func() { |
| 61 | ticker := time.NewTicker(10 * time.Second) // Poll every 10 seconds |
| 62 | defer ticker.Stop() |
| 63 | |
| 64 | for { |
| 65 | select { |
| 66 | case <-ctx.Done(): |
| 67 | tm.cleanupAllTunnels() |
| 68 | return |
| 69 | case <-ticker.C: |
| 70 | tm.pollPortEvents(ctx) |
| 71 | } |
| 72 | } |
| 73 | }() |
| 74 | } |
| 75 | |
| 76 | // pollPortEvents fetches recent port events from container and updates tunnels |
| 77 | func (tm *TunnelManager) pollPortEvents(ctx context.Context) { |
| 78 | // Build URL with since parameter |
| 79 | url := fmt.Sprintf("%s/port-events?since=%s", tm.containerURL, tm.lastPollTime.Format(time.RFC3339)) |
| 80 | |
| 81 | req, err := http.NewRequestWithContext(ctx, "GET", url, nil) |
| 82 | if err != nil { |
| 83 | slog.DebugContext(ctx, "Failed to create port events request", "error", err) |
| 84 | return |
| 85 | } |
| 86 | |
| 87 | resp, err := http.DefaultClient.Do(req) |
| 88 | if err != nil { |
| 89 | slog.DebugContext(ctx, "Failed to fetch port events", "error", err) |
| 90 | return |
| 91 | } |
| 92 | defer resp.Body.Close() |
| 93 | |
| 94 | if resp.StatusCode != http.StatusOK { |
| 95 | slog.DebugContext(ctx, "Port events request failed", "status", resp.StatusCode) |
| 96 | return |
| 97 | } |
| 98 | |
| 99 | var events []loop.PortEvent |
| 100 | if err := json.NewDecoder(resp.Body).Decode(&events); err != nil { |
| 101 | slog.DebugContext(ctx, "Failed to decode port events", "error", err) |
| 102 | return |
| 103 | } |
| 104 | |
| 105 | // Process each event |
| 106 | for _, event := range events { |
| 107 | tm.processPortEvent(ctx, event) |
| 108 | tm.mu.Lock() |
| 109 | // Update last poll time to the latest event timestamp |
| 110 | if event.Timestamp.After(tm.lastPollTime) { |
| 111 | tm.lastPollTime = event.Timestamp |
| 112 | } |
| 113 | tm.mu.Unlock() |
| 114 | } |
| 115 | |
| 116 | // Update poll time even if no events, to avoid re-fetching old events |
| 117 | if len(events) == 0 { |
| 118 | tm.lastPollTime = time.Now() |
| 119 | } |
| 120 | } |
| 121 | |
| 122 | // processPortEvent handles a single port event |
| 123 | func (tm *TunnelManager) processPortEvent(ctx context.Context, event loop.PortEvent) { |
| 124 | // Extract port number from event.Port (format: "tcp:0.0.0.0:8080") |
| 125 | containerPort := tm.extractPortNumber(event.Port) |
| 126 | if containerPort == "" { |
| 127 | slog.DebugContext(ctx, "Could not extract port number", "port", event.Port) |
| 128 | return |
| 129 | } |
| 130 | |
| 131 | // Skip common system ports that we don't want to tunnel |
| 132 | if tm.shouldSkipPort(containerPort) { |
| 133 | slog.DebugContext(ctx, "Skipping system port", "port", containerPort) |
| 134 | return |
| 135 | } |
| 136 | |
| 137 | switch event.Type { |
| 138 | case "opened": |
| 139 | tm.createTunnel(ctx, containerPort) |
| 140 | case "closed": |
| 141 | tm.removeTunnel(ctx, containerPort) |
| 142 | default: |
| 143 | slog.DebugContext(ctx, "Unknown port event type", "type", event.Type) |
| 144 | } |
| 145 | } |
| 146 | |
| 147 | // extractPortNumber extracts port number from ss format like "tcp:0.0.0.0:8080" |
| 148 | func (tm *TunnelManager) extractPortNumber(portStr string) string { |
| 149 | // Expected format: "tcp:0.0.0.0:8080" or "tcp:[::]:8080" |
| 150 | // Find the last colon and extract the port |
| 151 | for i := len(portStr) - 1; i >= 0; i-- { |
| 152 | if portStr[i] == ':' { |
| 153 | return portStr[i+1:] |
| 154 | } |
| 155 | } |
| 156 | return "" |
| 157 | } |
| 158 | |
| 159 | // shouldSkipPort returns true for ports we don't want to auto-tunnel |
| 160 | func (tm *TunnelManager) shouldSkipPort(port string) bool { |
| 161 | return skipPorts[port] |
| 162 | } |
| 163 | |
| 164 | // createTunnel creates an SSH tunnel for the given container port |
| 165 | func (tm *TunnelManager) createTunnel(ctx context.Context, containerPort string) { |
| 166 | tm.mu.Lock() |
| 167 | // Check if tunnel already exists |
| 168 | if _, exists := tm.activeTunnels[containerPort]; exists { |
| 169 | tm.mu.Unlock() |
| 170 | slog.DebugContext(ctx, "Tunnel already exists for port", "port", containerPort) |
| 171 | return |
| 172 | } |
| 173 | |
| 174 | // Check if we've reached the maximum number of active tunnels |
| 175 | if len(tm.activeTunnels) >= tm.maxActiveTunnels { |
| 176 | tm.mu.Unlock() |
| 177 | slog.WarnContext(ctx, "Maximum active tunnels reached, skipping port", "port", containerPort, "max", tm.maxActiveTunnels, "active", len(tm.activeTunnels)) |
| 178 | return |
| 179 | } |
| 180 | tm.mu.Unlock() |
| 181 | |
| 182 | // Use the same port on host as container for simplicity |
| 183 | hostPort := containerPort |
| 184 | |
| 185 | // Create SSH tunnel command: ssh -L hostPort:127.0.0.1:containerPort containerSSHHost |
| 186 | tunnelCtx, cancel := context.WithCancel(ctx) |
| 187 | cmd := exec.CommandContext(tunnelCtx, "ssh", |
| 188 | "-L", fmt.Sprintf("%s:127.0.0.1:%s", hostPort, containerPort), |
| 189 | "-N", // Don't execute remote commands |
| 190 | "-T", // Don't allocate TTY |
| 191 | tm.containerSSHHost, |
| 192 | ) |
| 193 | |
| 194 | // Start the tunnel |
| 195 | if err := cmd.Start(); err != nil { |
| 196 | slog.ErrorContext(ctx, "Failed to start SSH tunnel", "port", containerPort, "error", err) |
| 197 | cancel() |
| 198 | return |
| 199 | } |
| 200 | |
| 201 | // Store tunnel info |
| 202 | tunnel := &sshTunnel{ |
| 203 | containerPort: containerPort, |
| 204 | hostPort: hostPort, |
| 205 | cmd: cmd, |
| 206 | cancel: cancel, |
| 207 | } |
| 208 | tm.mu.Lock() |
| 209 | tm.activeTunnels[containerPort] = tunnel |
| 210 | tm.mu.Unlock() |
| 211 | |
| 212 | slog.InfoContext(ctx, "Created SSH tunnel", "container_port", containerPort, "host_port", hostPort) |
| 213 | |
| 214 | // Monitor tunnel in background |
| 215 | go func() { |
| 216 | err := cmd.Wait() |
| 217 | tm.mu.Lock() |
| 218 | delete(tm.activeTunnels, containerPort) |
| 219 | tm.mu.Unlock() |
| 220 | if err != nil && tunnelCtx.Err() == nil { |
| 221 | slog.ErrorContext(ctx, "SSH tunnel exited with error", "port", containerPort, "error", err) |
| 222 | } |
| 223 | }() |
| 224 | } |
| 225 | |
| 226 | // removeTunnel removes an SSH tunnel for the given container port |
| 227 | func (tm *TunnelManager) removeTunnel(ctx context.Context, containerPort string) { |
| 228 | tunnel, exists := tm.activeTunnels[containerPort] |
| 229 | if !exists { |
| 230 | slog.DebugContext(ctx, "No tunnel to remove for port", "port", containerPort) |
| 231 | return |
| 232 | } |
| 233 | |
| 234 | // Cancel the tunnel context and clean up |
| 235 | tunnel.cancel() |
| 236 | delete(tm.activeTunnels, containerPort) |
| 237 | |
| 238 | slog.InfoContext(ctx, "Removed SSH tunnel", "container_port", containerPort, "host_port", tunnel.hostPort) |
| 239 | } |
| 240 | |
| 241 | // cleanupAllTunnels stops all active tunnels |
| 242 | func (tm *TunnelManager) cleanupAllTunnels() { |
| 243 | tm.mu.Lock() |
| 244 | defer tm.mu.Unlock() |
| 245 | |
| 246 | for port, tunnel := range tm.activeTunnels { |
| 247 | tunnel.cancel() |
| 248 | delete(tm.activeTunnels, port) |
| 249 | } |
| 250 | } |
| 251 | |
| 252 | // GetActiveTunnels returns a list of currently active tunnels |
| 253 | func (tm *TunnelManager) GetActiveTunnels() map[string]string { |
| 254 | tm.mu.Lock() |
| 255 | defer tm.mu.Unlock() |
| 256 | |
| 257 | result := make(map[string]string) |
| 258 | for containerPort, tunnel := range tm.activeTunnels { |
| 259 | result[containerPort] = tunnel.hostPort |
| 260 | } |
| 261 | return result |
| 262 | } |