diff --git a/.agent/workflows/dev-environment.md b/.agent/workflows/dev-environment.md index 890d17d94..737ef79c5 100644 --- a/.agent/workflows/dev-environment.md +++ b/.agent/workflows/dev-environment.md @@ -44,3 +44,20 @@ journalctl -u pulse-hot-dev -f - **Hot-dev script**: `/opt/pulse/scripts/hot-dev.sh` - **Systemd service**: `/etc/systemd/system/pulse-hot-dev.service` - **Makefile targets**: `make dev` or `make dev-hot` + +## Encryption Key Monitoring + +A watcher service monitors the encryption key file for any changes or deletions: + +```bash +# Check if the watcher is running +systemctl status encryption-key-watcher + +# View recent encryption key events +sudo journalctl -u encryption-key-watcher -n 50 + +# View events around a specific time +sudo journalctl -u encryption-key-watcher --since "2025-12-09 14:00" --until "2025-12-09 15:00" +``` + +If the key ever goes missing, the logs will show what event happened and which processes had files open in `/etc/pulse` at that time. diff --git a/frontend-modern/src/api/hostMetadata.ts b/frontend-modern/src/api/hostMetadata.ts new file mode 100644 index 000000000..73026c0ca --- /dev/null +++ b/frontend-modern/src/api/hostMetadata.ts @@ -0,0 +1,42 @@ +// Host Metadata API +import { apiFetchJSON } from '@/utils/apiClient'; + +export interface HostMetadata { + id: string; + customUrl?: string; + description?: string; + tags?: string[]; + notes?: string[]; // User annotations for AI context +} + +export class HostMetadataAPI { + private static baseUrl = '/api/hosts/metadata'; + + // Get metadata for a specific host + static async getMetadata(hostId: string): Promise { + return apiFetchJSON(`${this.baseUrl}/${encodeURIComponent(hostId)}`); + } + + // Get all host metadata + static async getAllMetadata(): Promise> { + return apiFetchJSON(this.baseUrl); + } + + // Update metadata for a host + static async updateMetadata( + hostId: string, + metadata: Partial, + ): Promise { + return apiFetchJSON(`${this.baseUrl}/${encodeURIComponent(hostId)}`, { + method: 'PUT', + body: JSON.stringify(metadata), + }); + } + + // Delete metadata for a host + static async deleteMetadata(hostId: string): Promise { + await apiFetchJSON(`${this.baseUrl}/${encodeURIComponent(hostId)}`, { + method: 'DELETE', + }); + } +} diff --git a/frontend-modern/src/components/AI/AIChat.tsx b/frontend-modern/src/components/AI/AIChat.tsx index a2b53f1ab..2ac837f9f 100644 --- a/frontend-modern/src/components/AI/AIChat.tsx +++ b/frontend-modern/src/components/AI/AIChat.tsx @@ -546,6 +546,19 @@ export const AIChat: Component = (props) => { // Move from pending approvals to completed tool calls + const currentMessages = messages(); + const targetMessage = currentMessages.find((m) => m.id === messageId); + const pendingCount = targetMessage?.pendingApprovals?.length || 0; + const remainingAfterThis = (targetMessage?.pendingApprovals?.filter((a) => a.toolId !== approval.toolId) || []).length; + + logger.info('[AIChat] Approval processed', { + messageId, + toolId: approval.toolId, + pendingCount, + remainingAfterThis, + pendingApprovals: targetMessage?.pendingApprovals?.map(a => a.toolId) + }); + setMessages((prev) => prev.map((m) => { if (m.id !== messageId) return m; @@ -570,10 +583,152 @@ export const AIChat: Component = (props) => { }) ); - if (result.success) { - notificationStore.success('Command executed successfully'); + // No toast for success - the tool output shows the result inline + // Only show error toast for failures since they might need attention + if (!result.success && result.error) { + notificationStore.error(result.error); + } + + // After the last approval is processed, automatically continue the conversation + // This lets the AI analyze the command output and provide a summary + if (remainingAfterThis === 0) { + logger.info('[AIChat] Last approval processed, triggering auto-continuation'); + // Small delay to let the UI update first + setTimeout(async () => { + logger.info('[AIChat] Starting auto-continuation'); + setIsLoading(true); + + // Build history including the just-executed command + const currentMsgs = messages(); + logger.debug('[AIChat] Building history for continuation', { messageCount: currentMsgs.length }); + + const historyForContinuation = currentMsgs + .filter((m) => !m.isStreaming) + .filter((m) => m.content || (m.toolCalls && m.toolCalls.length > 0)) + .map((m) => { + let content = m.content || ''; + if (m.role === 'assistant' && m.toolCalls && m.toolCalls.length > 0) { + const toolSummary = m.toolCalls + .map((tc) => `Command: ${tc.input}\nOutput: ${tc.output}`) + .join('\n\n'); + content = toolSummary + (content ? '\n\n' + content : ''); + } + return { role: m.role, content }; + }) + .filter((m) => m.content); + + logger.debug('[AIChat] History for continuation built', { historyLength: historyForContinuation.length }); + + // Add a hidden continuation prompt - the AI will see it but user won't + const continuationPrompt = 'Continue analyzing the command output above and provide a summary.'; + + // Create the streaming assistant response message (no visible user message) + // Show "Analyzing..." as initial content so user sees inline feedback + const assistantId = generateId(); + const streamingMessage: Message = { + id: assistantId, + role: 'assistant', + content: '*Analyzing results...*', + timestamp: new Date(), + isStreaming: true, + pendingTools: [], + pendingApprovals: [], + toolCalls: [], + }; + setMessages((prev) => [...prev, streamingMessage]); + + try { + logger.info('[AIChat] Calling executeStream for continuation'); + await AIAPI.executeStream( + { + prompt: continuationPrompt, + target_type: targetType(), + target_id: targetId(), + context: contextData(), + history: historyForContinuation, + }, + (event: AIStreamEvent) => { + logger.debug('[AIChat] Continuation event received', { type: event.type }); + setMessages((prev) => + prev.map((msg) => { + if (msg.id !== assistantId) return msg; + switch (event.type) { + case 'content': + return { ...msg, content: event.data as string, isStreaming: false }; + case 'done': + return { ...msg, isStreaming: false }; + case 'error': + return { ...msg, content: `Error: ${event.data}`, isStreaming: false }; + case 'thinking': + // Ignore thinking events for now + return msg; + case 'processing': + // Ignore processing events + return msg; + case 'tool_start': { + const data = event.data as { name: string; input: string }; + return { + ...msg, + pendingTools: [...(msg.pendingTools || []), { name: data.name, input: data.input }], + }; + } + case 'tool_end': { + const data = event.data as { name: string; input: string; output: string; success: boolean }; + const pendingTools = msg.pendingTools || []; + const matchingIndex = pendingTools.findIndex((t) => t.name === data.name); + const updatedPending = matchingIndex >= 0 + ? [...pendingTools.slice(0, matchingIndex), ...pendingTools.slice(matchingIndex + 1)] + : pendingTools; + return { + ...msg, + pendingTools: updatedPending, + toolCalls: [...(msg.toolCalls || []), { + name: data.name, + input: data.input, + output: data.output, + success: data.success, + }], + }; + } + case 'approval_needed': { + const data = event.data as AIStreamApprovalNeededData; + logger.info('[AIChat] Approval needed in continuation', { command: data.command }); + return { + ...msg, + pendingApprovals: [...(msg.pendingApprovals || []), { + command: data.command, + toolId: data.tool_id, + toolName: data.tool_name, + runOnHost: data.run_on_host, + targetHost: data.target_host, + }], + isStreaming: false, // Stop streaming when approval is needed + }; + } + default: + logger.debug('[AIChat] Unhandled continuation event', { type: event.type, event }); + return msg; + } + }) + ); + } + ); + logger.info('[AIChat] Continuation executeStream completed'); + } catch (err) { + logger.error('[AIChat] Failed to continue after approval:', err); + setMessages((prev) => + prev.map((msg) => + msg.id === assistantId + ? { ...msg, content: 'Failed to analyze results.', isStreaming: false } + : msg + ) + ); + } finally { + setIsLoading(false); + } + }, 200); } else { - notificationStore.error(result.error || 'Command failed'); + logger.debug('[AIChat] Approvals remaining, not triggering continuation', { remainingAfterThis }); } } catch (error) { logger.error('[AIChat] Failed to execute approved command:', error); diff --git a/frontend-modern/src/components/Docker/DockerUnifiedTable.tsx b/frontend-modern/src/components/Docker/DockerUnifiedTable.tsx index 5f2dc24f9..87dad0f21 100644 --- a/frontend-modern/src/components/Docker/DockerUnifiedTable.tsx +++ b/frontend-modern/src/components/Docker/DockerUnifiedTable.tsx @@ -1195,13 +1195,10 @@ const DockerContainerRow: Component<{ +
{container.name || container.id} @@ -1231,6 +1228,17 @@ const DockerContainerRow: Component<{ + {/* Edit URL button - shows on hover */} + +
{service.name || service.id || 'Service'} @@ -2102,6 +2107,17 @@ const DockerServiceRow: Component<{ + {/* Edit URL button - shows on hover */} + Stack: {service.stack} diff --git a/frontend-modern/src/components/Hosts/HostsOverview.tsx b/frontend-modern/src/components/Hosts/HostsOverview.tsx index dfd6da0ae..eb08cf438 100644 --- a/frontend-modern/src/components/Hosts/HostsOverview.tsx +++ b/frontend-modern/src/components/Hosts/HostsOverview.tsx @@ -1,5 +1,5 @@ import type { Component } from 'solid-js'; -import { For, Show, createMemo, createSignal, onMount, onCleanup } from 'solid-js'; +import { For, Show, createMemo, createSignal, createEffect, onMount, onCleanup } from 'solid-js'; import { Portal } from 'solid-js/web'; import { useNavigate } from '@solidjs/router'; import type { Host, HostRAIDArray } from '@/types/api'; @@ -19,6 +19,8 @@ import { useColumnVisibility } from '@/hooks/useColumnVisibility'; import { aiChatStore } from '@/stores/aiChat'; import { STORAGE_KEYS } from '@/utils/localStorage'; import { useResourcesAsLegacy } from '@/hooks/useResources'; +import { HostMetadataAPI, type HostMetadata } from '@/api/hostMetadata'; +import { logger } from '@/utils/logger'; // Column definition for hosts table export interface HostColumnDef { @@ -492,6 +494,64 @@ export const HostsOverview: Component = () => { const visibleColumnIds = createMemo(() => visibleColumns().map(c => c.id)); const isColVisible = (colId: string) => visibleColumnIds().includes(colId); + // Host metadata management (for custom URLs) + const [hostMetadata, setHostMetadata] = createSignal>({}); + const [hostMetadataVersion, setHostMetadataVersion] = createSignal(0); + + // Load host metadata on mount + createEffect(() => { + HostMetadataAPI.getAllMetadata() + .then(data => { + setHostMetadata(data || {}); + logger.debug('Loaded host metadata', { count: Object.keys(data || {}).length }); + }) + .catch(err => { + logger.warn('Failed to load host metadata', { error: err }); + }); + }); + + // Get custom URL for a host + const getHostCustomUrl = (hostId: string): string | undefined => { + // Access version to trigger reactivity when metadata changes + hostMetadataVersion(); + return hostMetadata()[hostId]?.customUrl; + }; + + // Update custom URL for a host + const updateHostCustomUrl = async (hostId: string, url: string): Promise => { + try { + await HostMetadataAPI.updateMetadata(hostId, { customUrl: url }); + setHostMetadata(prev => ({ + ...prev, + [hostId]: { ...prev[hostId], id: hostId, customUrl: url } + })); + setHostMetadataVersion(v => v + 1); + logger.info('Updated host custom URL', { hostId, url }); + return true; + } catch (err) { + logger.error('Failed to update host custom URL', { hostId, url, error: err }); + return false; + } + }; + + // Delete custom URL for a host + const deleteHostCustomUrl = async (hostId: string): Promise => { + try { + await HostMetadataAPI.deleteMetadata(hostId); + setHostMetadata(prev => { + const next = { ...prev }; + delete next[hostId]; + return next; + }); + setHostMetadataVersion(v => v + 1); + logger.info('Deleted host custom URL', { hostId }); + return true; + } catch (err) { + logger.error('Failed to delete host custom URL', { hostId, error: err }); + return false; + } + }; + const handleSort = (key: SortKey) => { if (sortKey() === key) { setSortDirection(sortDirection() === 'asc' ? 'desc' : 'asc'); @@ -797,7 +857,7 @@ export const HostsOverview: Component = () => { - {(host) => } + {(host) => } @@ -846,8 +906,10 @@ interface HostRowProps { host: Host; isColVisible: (colId: string) => boolean; isMobile: () => boolean; - getDiskStats: (host: Host) => { percent: number; used: number; total: number }; + customUrl?: string; + onUpdateCustomUrl: (hostId: string, url: string) => Promise; + onDeleteCustomUrl: (hostId: string) => Promise; } const HostRow: Component = (props) => { @@ -856,6 +918,43 @@ const HostRow: Component = (props) => { // Check if this host is in AI context const isInAIContext = createMemo(() => aiChatStore.enabled && aiChatStore.hasContextItem(host.id)); + // URL editing state + const [isEditingUrl, setIsEditingUrl] = createSignal(false); + const [editingUrlValue, setEditingUrlValue] = createSignal(''); + const [isSavingUrl, setIsSavingUrl] = createSignal(false); + let urlInputRef: HTMLInputElement | undefined; + + // Start editing URL + const startEditingUrl = (e: MouseEvent) => { + e.stopPropagation(); + setEditingUrlValue(props.customUrl || ''); + setIsEditingUrl(true); + // Focus input after render + setTimeout(() => urlInputRef?.focus(), 0); + }; + + // Save URL + const saveUrl = async () => { + const url = editingUrlValue().trim(); + setIsSavingUrl(true); + try { + if (url) { + await props.onUpdateCustomUrl(host.id, url); + } else { + await props.onDeleteCustomUrl(host.id); + } + setIsEditingUrl(false); + } finally { + setIsSavingUrl(false); + } + }; + + // Cancel editing + const cancelEditingUrl = () => { + setIsEditingUrl(false); + setEditingUrlValue(''); + }; + // Build context for AI - includes routing fields const buildHostContext = (): Record => ({ hostName: host.displayName || host.hostname, @@ -873,7 +972,7 @@ const HostRow: Component = (props) => { // Handle row click - toggle AI context selection const handleRowClick = (event: MouseEvent) => { const target = event.target as HTMLElement; - if (target.closest('a, button, [data-prevent-toggle]')) { + if (target.closest('a, button, [data-prevent-toggle], [data-url-editor]')) { return; } @@ -914,31 +1013,119 @@ const HostRow: Component = (props) => { ariaLabel={hostStatus().label} size="xs" /> -
-
-

- {host.displayName || host.hostname || host.id} -

- -

- {host.hostname} -

-
- -

- Updated {formatRelativeTime(host.lastSeen!)} -

-
+ +
+

+ {host.displayName || host.hostname || host.id} +

+ +

+ {host.hostname} +

+
+ +

+ Updated {formatRelativeTime(host.lastSeen!)} +

+
+
+ {/* Custom URL link */} + + event.stopPropagation()} + > + + + + + + {/* Edit URL button - shows on hover */} + + {/* AI context indicator */} + + + + + + + +
+ } + > + {/* URL editing mode */} +
+ setEditingUrlValue(e.currentTarget.value)} + onKeyDown={(e) => { + if (e.key === 'Enter') { + e.preventDefault(); + saveUrl(); + } else if (e.key === 'Escape') { + e.preventDefault(); + cancelEditingUrl(); + } + }} + onClick={(e) => e.stopPropagation()} + placeholder="https://192.168.1.100:8080" + class="w-40 px-2 py-0.5 text-xs border border-blue-500 rounded bg-white dark:bg-gray-700 text-gray-900 dark:text-gray-100 focus:outline-none focus:ring-1 focus:ring-blue-500" + disabled={isSavingUrl()} + /> + +
- {/* AI context indicator */} - - - - - - - -
+
diff --git a/internal/ai/resource_context.go b/internal/ai/resource_context.go index 12cf4e95c..9e415c065 100644 --- a/internal/ai/resource_context.go +++ b/internal/ai/resource_context.go @@ -22,6 +22,9 @@ type ResourceProvider interface { GetTopByDisk(limit int, types []resources.ResourceType) []resources.Resource GetRelated(resourceID string) map[string][]resources.Resource GetResourceSummary() resources.ResourceSummary + + // AI Routing support + FindContainerHost(containerNameOrID string) string } // SetResourceProvider sets the resource provider for unified infrastructure context. diff --git a/internal/ai/routing.go b/internal/ai/routing.go index c97ddadfb..3907d4622 100644 --- a/internal/ai/routing.go +++ b/internal/ai/routing.go @@ -59,11 +59,10 @@ func (e *RoutingError) ForAI() string { // This is the authoritative routing function that should be used for all command execution. // // Routing priority: -// 1. VMID lookup from state (most reliable for pct/qm commands) -// 2. Explicit "node" field in context -// 3. Explicit "guest_node" field in context -// 4. "hostname" field for host targets -// 5. VMID extracted from target ID (last resort) +// 1. VMID lookup from command (for pct/qm commands) +// 2. Unified ResourceProvider lookup (PRIMARY - uses the new infrastructure model) +// 3. Explicit context fields (FALLBACK - for backwards compatibility) +// 4. VMID extracted from target ID // // Agent matching is EXACT only - no substring matching to prevent false positives. // If no direct match, cluster peer routing is attempted. @@ -140,51 +139,58 @@ func (s *Service) routeToAgent(req ExecuteRequest, command string, agents []agen } } - // Step 2: Try context-based routing (explicit node information) + // Step 2: Try unified ResourceProvider lookup (PRIMARY method for workloads) + // This uses the new redesigned infrastructure model which knows the relationships + // between all resources (containers → hosts, VMs → nodes, etc.) if result.TargetNode == "" { - if node, ok := req.Context["node"].(string); ok && node != "" { - result.TargetNode = strings.ToLower(node) - result.RoutingMethod = "context_node" - log.Debug(). - Str("node", node). - Str("command", command). - Msg("Routing via explicit 'node' in context") - } else if node, ok := req.Context["guest_node"].(string); ok && node != "" { - result.TargetNode = strings.ToLower(node) - result.RoutingMethod = "context_guest_node" - log.Debug(). - Str("guest_node", node). - Str("command", command). - Msg("Routing via 'guest_node' in context") - } else if req.TargetType == "host" { - // Check multiple possible keys for hostname - frontend uses host_name - hostname := "" - if h, ok := req.Context["hostname"].(string); ok && h != "" { - hostname = h - } else if h, ok := req.Context["host_name"].(string); ok && h != "" { - hostname = h + s.mu.RLock() + rp := s.resourceProvider + s.mu.RUnlock() + + if rp != nil { + // Try to find the host for this workload + resourceName := "" + if name, ok := req.Context["containerName"].(string); ok && name != "" { + resourceName = name + } else if name, ok := req.Context["name"].(string); ok && name != "" { + resourceName = name + } else if name, ok := req.Context["guestName"].(string); ok && name != "" { + resourceName = name } - if hostname != "" { - result.TargetNode = strings.ToLower(hostname) - result.RoutingMethod = "context_hostname" - log.Debug(). - Str("hostname", hostname). - Str("command", command). - Msg("Routing via hostname in context") - } else { - // For host target type with no node info, log a warning - // This is a common source of routing issues - log.Warn(). - Str("target_type", req.TargetType). - Str("target_id", req.TargetID). - Str("command", command). - Msg("Host command with no node/hostname in context - may route to wrong agent") - result.Warnings = append(result.Warnings, - "No target host specified in context. Use target_host parameter for reliable routing.") + + if resourceName != "" { + if host := rp.FindContainerHost(resourceName); host != "" { + result.TargetNode = strings.ToLower(host) + result.RoutingMethod = "resource_provider_lookup" + log.Info(). + Str("resource_name", resourceName). + Str("host", host). + Str("target_type", req.TargetType). + Str("command", command). + Msg("Routing via unified ResourceProvider") + } } } } + // Step 3: Fallback to explicit context fields (backwards compatibility) + // These are checked in order of specificity + if result.TargetNode == "" { + // Try the most specific fields first + hostFields := []string{"node", "host", "guest_node", "hostname", "host_name", "target_host"} + for _, field := range hostFields { + if value, ok := req.Context[field].(string); ok && value != "" { + result.TargetNode = strings.ToLower(value) + result.RoutingMethod = "context_" + field + log.Debug(). + Str("field", field). + Str("value", value). + Str("command", command). + Msg("Routing via context field (fallback)") + break + } + } + } // Step 3: Extract VMID from target ID and look up in state if result.TargetNode == "" && req.TargetID != "" { diff --git a/internal/ai/service.go b/internal/ai/service.go index 3611fbba9..583fa1d38 100644 --- a/internal/ai/service.go +++ b/internal/ai/service.go @@ -215,6 +215,7 @@ func (s *Service) LoadConfig() error { log.Info(). Str("provider", cfg.Provider). Str("model", cfg.GetModel()). + Bool("autonomous_mode", cfg.AutonomousMode). Msg("AI service initialized") return nil @@ -397,6 +398,50 @@ func isDangerousCommand(cmd string) bool { } if dangerousCommands[baseCmd] { + // Special case: allow read-only apt/apt-get operations + if baseCmd == "apt" || baseCmd == "apt-get" { + // First, check if it's a dry-run/simulate command (safe even for upgrade/install) + for _, part := range parts { + if part == "--dry-run" || part == "-s" || part == "--simulate" || part == "--just-print" { + return false // Dry-run is always safe + } + } + // Check for inherently read-only operations + safeAptOps := []string{"update", "list", "show", "search", "policy", "madison", "depends", "rdepends", "changelog"} + for _, safeOp := range safeAptOps { + if len(parts) > 1 && parts[1] == safeOp { + return false // Safe read-only operation + } + // Also handle sudo apt + if len(parts) > 2 && parts[0] == "sudo" && parts[2] == safeOp { + return false + } + } + } + // Special case: allow read-only systemctl operations + if baseCmd == "systemctl" { + safeSystemctlOps := []string{"status", "show", "list-units", "list-unit-files", "is-active", "is-enabled", "is-failed", "cat"} + for _, safeOp := range safeSystemctlOps { + if len(parts) > 1 && parts[1] == safeOp { + return false + } + if len(parts) > 2 && parts[0] == "sudo" && parts[2] == safeOp { + return false + } + } + } + // Special case: allow read-only dpkg operations + if baseCmd == "dpkg" { + safeDpkgOps := []string{"-l", "--list", "-L", "--listfiles", "-s", "--status", "-S", "--search", "-p", "--print-avail", "--get-selections"} + for _, safeOp := range safeDpkgOps { + if len(parts) > 1 && parts[1] == safeOp { + return false + } + if len(parts) > 2 && parts[0] == "sudo" && parts[2] == safeOp { + return false + } + } + } return true } @@ -860,16 +905,30 @@ Always execute the commands rather than telling the user how to do it.` }) // Execute each tool call and add results + // Track if any command needs approval - if so, we'll stop the loop after processing + anyNeedsApproval := false for _, tc := range resp.ToolCalls { toolInput := s.getToolInputDisplay(tc) - // Check if this command needs approval + // Check if this command needs approval needsApproval := false if tc.Name == "run_command" { cmd, _ := tc.Input["command"].(string) runOnHost, _ := tc.Input["run_on_host"].(bool) targetHost, _ := tc.Input["target_host"].(string) + // If AI didn't specify target_host, try to get it from request context + // This is crucial for proper routing when the command is approved + if targetHost == "" { + if node, ok := req.Context["node"].(string); ok && node != "" { + targetHost = node + } else if node, ok := req.Context["hostname"].(string); ok && node != "" { + targetHost = node + } else if node, ok := req.Context["host_name"].(string); ok && node != "" { + targetHost = node + } + } + isAuto := s.IsAutonomous() isReadOnly := isReadOnlyCommand(cmd) isDangerous := isDangerousCommand(cmd) @@ -881,10 +940,13 @@ Always execute the commands rather than telling the user how to do it.` Str("target_host", targetHost). Msg("Checking command approval") - // Dangerous commands ALWAYS need approval, even in autonomous mode - // In non-autonomous mode, non-read-only commands also need approval - if isDangerous || (!isAuto && !isReadOnly) { + // In autonomous mode, NO commands need approval - full trust + // In non-autonomous mode: + // - Dangerous commands always need approval + // - Non-read-only commands need approval + if !isAuto && (isDangerous || !isReadOnly) { needsApproval = true + anyNeedsApproval = true // Send approval needed event callback(StreamEvent{ Type: "approval_needed", @@ -904,20 +966,11 @@ Always execute the commands rather than telling the user how to do it.` var execution ToolExecution if needsApproval { - // Don't execute - tell the AI the command needs user approval - // The approval button has been sent to the frontend - tell AI to direct user to it - result = fmt.Sprintf("COMMAND_BLOCKED: This command (%s) requires user approval and was NOT executed. "+ - "An approval button has been displayed to the user in the chat. "+ - "DO NOT attempt to run this command again. "+ - "Tell the user to click the 'Run' button that appeared above to execute the command, "+ - "or explain what the command does if they need help deciding.", toolInput) - execution = ToolExecution{ - Name: tc.Name, - Input: toolInput, - Output: result, - Success: false, - } - toolExecutions = append(toolExecutions, execution) + // Don't execute - command needs user approval + // We'll break out of the loop after processing all tool calls + // Note: We don't add to toolExecutions here because the approval_needed event + // already tells the frontend to show the approval UI + result = fmt.Sprintf("Awaiting user approval: %s", toolInput) } else { // Stream tool start event callback(StreamEvent{ @@ -960,6 +1013,20 @@ Always execute the commands rather than telling the user how to do it.` }, }) } + + // If any command needed approval, stop the agentic loop here. + // Don't call the AI again with "COMMAND_BLOCKED" results - this causes duplicate + // approval requests and confusing "click the button" messages. + // The frontend will show approval buttons, and user action will continue the conversation. + if anyNeedsApproval { + log.Info(). + Int("pending_approvals", len(resp.ToolCalls)). + Int("iteration", iteration). + Msg("Stopping AI loop - commands need user approval") + // Use the AI's current response as final content (if any) + // This preserves any explanation the AI provided before requesting the command + break + } } // Stream the final content diff --git a/internal/api/ai_handlers.go b/internal/api/ai_handlers.go index d78ec8797..d5854552a 100644 --- a/internal/api/ai_handlers.go +++ b/internal/api/ai_handlers.go @@ -469,9 +469,10 @@ func (h *AISettingsHandler) HandleExecuteStream(w http.ResponseWriter, r *http.R // Flush headers immediately flusher.Flush() - // Create context with timeout (5 minutes for complex analysis with multiple tool calls) + // Create context with timeout (15 minutes for complex analysis with multiple tool calls) // Use background context to avoid browser disconnect canceling the request - ctx, cancel := context.WithTimeout(context.Background(), 300*time.Second) + // DeepSeek reasoning models + multiple tool executions can easily take 5+ minutes + ctx, cancel := context.WithTimeout(context.Background(), 900*time.Second) defer cancel() // Set up heartbeat to keep connection alive during long tool executions @@ -660,8 +661,8 @@ func (h *AISettingsHandler) HandleRunCommand(w http.ResponseWriter, r *http.Requ Str("target_host", req.TargetHost). Msg("Executing approved command") - // Execute with timeout - ctx, cancel := context.WithTimeout(r.Context(), 120*time.Second) + // Execute with timeout (5 minutes for long-running commands) + ctx, cancel := context.WithTimeout(r.Context(), 300*time.Second) defer cancel() resp, err := h.aiService.RunCommand(ctx, ai.RunCommandRequest{ diff --git a/internal/api/host_metadata.go b/internal/api/host_metadata.go new file mode 100644 index 000000000..b84c8ea41 --- /dev/null +++ b/internal/api/host_metadata.go @@ -0,0 +1,159 @@ +package api + +import ( + "encoding/json" + "net/http" + "net/url" + "strings" + + "github.com/rcourtman/pulse-go-rewrite/internal/config" + "github.com/rs/zerolog/log" +) + +// HostMetadataHandler handles host metadata operations +type HostMetadataHandler struct { + store *config.HostMetadataStore +} + +// NewHostMetadataHandler creates a new host metadata handler +func NewHostMetadataHandler(dataPath string) *HostMetadataHandler { + return &HostMetadataHandler{ + store: config.NewHostMetadataStore(dataPath), + } +} + +// HandleGetMetadata retrieves metadata for a specific host or all hosts +func (h *HostMetadataHandler) HandleGetMetadata(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodGet { + http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) + return + } + + // Check if requesting specific host + path := r.URL.Path + // Handle both /api/hosts/metadata and /api/hosts/metadata/ + if path == "/api/hosts/metadata" || path == "/api/hosts/metadata/" { + // Get all metadata + w.Header().Set("Content-Type", "application/json") + allMeta := h.store.GetAll() + if allMeta == nil { + // Return empty object instead of null + json.NewEncoder(w).Encode(make(map[string]*config.HostMetadata)) + } else { + json.NewEncoder(w).Encode(allMeta) + } + return + } + + // Get specific host ID from path + hostID := strings.TrimPrefix(path, "/api/hosts/metadata/") + + w.Header().Set("Content-Type", "application/json") + + if hostID != "" { + // Get specific host metadata + meta := h.store.Get(hostID) + if meta == nil { + // Return empty metadata instead of 404 + json.NewEncoder(w).Encode(&config.HostMetadata{ID: hostID}) + } else { + json.NewEncoder(w).Encode(meta) + } + } else { + // This shouldn't happen with current routing, but handle it anyway + http.Error(w, "Invalid request path", http.StatusBadRequest) + } +} + +// HandleUpdateMetadata updates metadata for a host +func (h *HostMetadataHandler) HandleUpdateMetadata(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPut && r.Method != http.MethodPost { + http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) + return + } + + hostID := strings.TrimPrefix(r.URL.Path, "/api/hosts/metadata/") + if hostID == "" || hostID == "metadata" { + http.Error(w, "Host ID required", http.StatusBadRequest) + return + } + + // Limit request body to 16KB to prevent memory exhaustion + r.Body = http.MaxBytesReader(w, r.Body, 16*1024) + + var meta config.HostMetadata + if err := json.NewDecoder(r.Body).Decode(&meta); err != nil { + http.Error(w, "Invalid request body", http.StatusBadRequest) + return + } + + // Validate URL if provided + if meta.CustomURL != "" { + // Parse and validate the URL + parsedURL, err := url.Parse(meta.CustomURL) + if err != nil { + http.Error(w, "Invalid URL format: "+err.Error(), http.StatusBadRequest) + return + } + + // Check scheme + if parsedURL.Scheme != "http" && parsedURL.Scheme != "https" { + http.Error(w, "URL must use http:// or https:// scheme", http.StatusBadRequest) + return + } + + // Check host is present and valid + if parsedURL.Host == "" { + http.Error(w, "Invalid URL: missing host/domain (e.g., use https://192.168.1.100:8006 or https://myhost.local)", http.StatusBadRequest) + return + } + + // Check for incomplete URLs like "https://host." + if strings.HasSuffix(parsedURL.Host, ".") && !strings.Contains(parsedURL.Host, "..") { + http.Error(w, "Incomplete URL: '"+meta.CustomURL+"' - please enter a complete domain or IP address", http.StatusBadRequest) + return + } + } + + if err := h.store.Set(hostID, &meta); err != nil { + log.Error().Err(err).Str("hostID", hostID).Msg("Failed to save host metadata") + // Provide more specific error message + errMsg := "Failed to save metadata" + if strings.Contains(err.Error(), "permission") { + errMsg = "Permission denied - check file permissions" + } else if strings.Contains(err.Error(), "no space") { + errMsg = "Disk full - cannot save metadata" + } + http.Error(w, errMsg, http.StatusInternalServerError) + return + } + + log.Info().Str("hostID", hostID).Str("url", meta.CustomURL).Msg("Updated host metadata") + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(&meta) +} + +// HandleDeleteMetadata removes metadata for a host +func (h *HostMetadataHandler) HandleDeleteMetadata(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodDelete { + http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) + return + } + + hostID := strings.TrimPrefix(r.URL.Path, "/api/hosts/metadata/") + if hostID == "" || hostID == "metadata" { + http.Error(w, "Host ID required", http.StatusBadRequest) + return + } + + if err := h.store.Delete(hostID); err != nil { + log.Error().Err(err).Str("hostID", hostID).Msg("Failed to delete host metadata") + http.Error(w, "Failed to delete metadata", http.StatusInternalServerError) + return + } + + log.Info().Str("hostID", hostID).Msg("Deleted host metadata") + + w.WriteHeader(http.StatusNoContent) +} diff --git a/internal/api/router.go b/internal/api/router.go index 89674b841..4fdfc5640 100644 --- a/internal/api/router.go +++ b/internal/api/router.go @@ -178,6 +178,7 @@ func (r *Router) setupRoutes() { r.notificationQueueHandlers = NewNotificationQueueHandlers(r.monitor) guestMetadataHandler := NewGuestMetadataHandler(r.config.DataPath) dockerMetadataHandler := NewDockerMetadataHandler(r.config.DataPath) + hostMetadataHandler := NewHostMetadataHandler(r.config.DataPath) r.configHandlers = NewConfigHandlers(r.config, r.monitor, r.reloadFunc, r.wsHub, guestMetadataHandler, r.reloadSystemSettings) updateHandlers := NewUpdateHandlers(r.updateManager, r.updateHistory) r.dockerAgentHandlers = NewDockerAgentHandlers(r.monitor, r.wsHub) @@ -276,6 +277,30 @@ func (r *Router) setupRoutes() { } })) + // Host metadata routes + r.mux.HandleFunc("/api/hosts/metadata", RequireAuth(r.config, RequireScope(config.ScopeMonitoringRead, hostMetadataHandler.HandleGetMetadata))) + r.mux.HandleFunc("/api/hosts/metadata/", RequireAuth(r.config, func(w http.ResponseWriter, req *http.Request) { + switch req.Method { + case http.MethodGet: + if !ensureScope(w, req, config.ScopeMonitoringRead) { + return + } + hostMetadataHandler.HandleGetMetadata(w, req) + case http.MethodPut, http.MethodPost: + if !ensureScope(w, req, config.ScopeMonitoringWrite) { + return + } + hostMetadataHandler.HandleUpdateMetadata(w, req) + case http.MethodDelete: + if !ensureScope(w, req, config.ScopeMonitoringWrite) { + return + } + hostMetadataHandler.HandleDeleteMetadata(w, req) + default: + http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) + } + })) + // Update routes r.mux.HandleFunc("/api/updates/check", RequireAdmin(r.config, RequireScope(config.ScopeSettingsRead, updateHandlers.HandleCheckUpdates))) r.mux.HandleFunc("/api/updates/apply", RequireAdmin(r.config, RequireScope(config.ScopeSettingsWrite, updateHandlers.HandleApplyUpdate))) diff --git a/internal/config/ai.go b/internal/config/ai.go index 40bdf4566..28e1073e5 100644 --- a/internal/config/ai.go +++ b/internal/config/ai.go @@ -43,7 +43,7 @@ const ( DefaultAIModelAnthropic = "claude-opus-4-5-20251101" DefaultAIModelOpenAI = "gpt-4o" DefaultAIModelOllama = "llama3" - DefaultAIModelDeepSeek = "deepseek-reasoner" + DefaultAIModelDeepSeek = "deepseek-chat" // V3.2 with tool-use support DefaultOllamaBaseURL = "http://localhost:11434" DefaultDeepSeekBaseURL = "https://api.deepseek.com/chat/completions" ) diff --git a/internal/config/host_metadata.go b/internal/config/host_metadata.go new file mode 100644 index 000000000..01bc0c829 --- /dev/null +++ b/internal/config/host_metadata.go @@ -0,0 +1,179 @@ +package config + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + "sync" + + "github.com/rs/zerolog/log" +) + +// HostMetadata holds additional metadata for a host +type HostMetadata struct { + ID string `json:"id"` // Host ID + CustomURL string `json:"customUrl"` // Custom URL for the host + Description string `json:"description"` // Optional description + Tags []string `json:"tags"` // Optional tags for categorization + Notes []string `json:"notes"` // User annotations for AI context +} + +// HostMetadataStore manages host metadata +type HostMetadataStore struct { + mu sync.RWMutex + metadata map[string]*HostMetadata // keyed by host ID + dataPath string +} + +// NewHostMetadataStore creates a new host metadata store +func NewHostMetadataStore(dataPath string) *HostMetadataStore { + store := &HostMetadataStore{ + metadata: make(map[string]*HostMetadata), + dataPath: dataPath, + } + + // Load existing metadata + if err := store.Load(); err != nil { + log.Warn().Err(err).Msg("Failed to load host metadata") + } + + return store +} + +// Get retrieves metadata for a host +func (s *HostMetadataStore) Get(hostID string) *HostMetadata { + s.mu.RLock() + defer s.mu.RUnlock() + + if meta, exists := s.metadata[hostID]; exists { + return meta + } + return nil +} + +// GetAll retrieves all host metadata +func (s *HostMetadataStore) GetAll() map[string]*HostMetadata { + s.mu.RLock() + defer s.mu.RUnlock() + + // Return a copy to prevent external modifications + result := make(map[string]*HostMetadata) + for k, v := range s.metadata { + result[k] = v + } + return result +} + +// Set updates or creates metadata for a host +func (s *HostMetadataStore) Set(hostID string, meta *HostMetadata) error { + s.mu.Lock() + defer s.mu.Unlock() + + if meta == nil { + return fmt.Errorf("metadata cannot be nil") + } + + meta.ID = hostID + s.metadata[hostID] = meta + + // Save to disk + return s.save() +} + +// Delete removes metadata for a host +func (s *HostMetadataStore) Delete(hostID string) error { + s.mu.Lock() + defer s.mu.Unlock() + + delete(s.metadata, hostID) + + // Save to disk + return s.save() +} + +// ReplaceAll replaces all metadata entries and persists them to disk. +func (s *HostMetadataStore) ReplaceAll(metadata map[string]*HostMetadata) error { + s.mu.Lock() + defer s.mu.Unlock() + + s.metadata = make(map[string]*HostMetadata) + + for hostID, meta := range metadata { + if meta == nil { + continue + } + + clone := *meta + clone.ID = hostID + // Ensure slice copy is not nil to allow JSON marshalling of empty tags + if clone.Tags == nil { + clone.Tags = []string{} + } + s.metadata[hostID] = &clone + } + + return s.save() +} + +// Load reads metadata from disk +func (s *HostMetadataStore) Load() error { + filePath := filepath.Join(s.dataPath, "host_metadata.json") + + log.Debug().Str("path", filePath).Msg("Loading host metadata from disk") + + data, err := os.ReadFile(filePath) + if err != nil { + if os.IsNotExist(err) { + // File doesn't exist yet, not an error + log.Debug().Str("path", filePath).Msg("Host metadata file does not exist yet") + return nil + } + return fmt.Errorf("failed to read metadata file: %w", err) + } + + s.mu.Lock() + defer s.mu.Unlock() + + if err := json.Unmarshal(data, &s.metadata); err != nil { + return fmt.Errorf("failed to unmarshal metadata: %w", err) + } + + log.Info(). + Int("hostCount", len(s.metadata)). + Msg("Loaded host metadata") + + return nil +} + +// save writes metadata to disk (must be called with lock held) +func (s *HostMetadataStore) save() error { + filePath := filepath.Join(s.dataPath, "host_metadata.json") + + log.Debug().Str("path", filePath).Msg("Saving host metadata to disk") + + data, err := json.Marshal(s.metadata) + if err != nil { + return fmt.Errorf("failed to marshal metadata: %w", err) + } + + // Ensure directory exists + if err := os.MkdirAll(s.dataPath, 0755); err != nil { + return fmt.Errorf("failed to create data directory: %w", err) + } + + // Write to temp file first for atomic operation + tempFile := filePath + ".tmp" + if err := os.WriteFile(tempFile, data, 0644); err != nil { + return fmt.Errorf("failed to write metadata file: %w", err) + } + + // Rename temp file to actual file (atomic on most systems) + if err := os.Rename(tempFile, filePath); err != nil { + return fmt.Errorf("failed to rename metadata file: %w", err) + } + + log.Debug().Str("path", filePath).Int("hosts", len(s.metadata)).Msg("Host metadata saved successfully") + + return nil +} diff --git a/internal/resources/store.go b/internal/resources/store.go index af391a9e6..10ef426d2 100644 --- a/internal/resources/store.go +++ b/internal/resources/store.go @@ -163,6 +163,54 @@ func (s *Store) GetChildren(parentID string) []Resource { return result } +// FindContainerHost looks up a Docker container by name or ID and returns the +// hostname of its parent DockerHost. This is used by AI routing to automatically +// determine which host should execute commands for a container. +// Returns empty string if not found. +func (s *Store) FindContainerHost(containerNameOrID string) string { + s.mu.RLock() + defer s.mu.RUnlock() + + if containerNameOrID == "" { + return "" + } + + containerNameLower := strings.ToLower(containerNameOrID) + + // Find the container + var container *Resource + for _, r := range s.resources { + if r.Type != ResourceTypeDockerContainer { + continue + } + // Match by name or ID (case-insensitive) + if strings.EqualFold(r.Name, containerNameOrID) || + strings.EqualFold(r.ID, containerNameOrID) || + strings.Contains(strings.ToLower(r.Name), containerNameLower) || + strings.Contains(strings.ToLower(r.ID), containerNameLower) { + container = r + break + } + } + + if container == nil || container.ParentID == "" { + return "" + } + + // Find the parent DockerHost + parent := s.resources[container.ParentID] + if parent == nil { + return "" + } + + // Return the hostname from identity, or the name + if parent.Identity != nil && parent.Identity.Hostname != "" { + return parent.Identity.Hostname + } + return parent.Name +} + + // Remove removes a resource from the store. func (s *Store) Remove(id string) { s.mu.Lock() diff --git a/scripts/hot-dev.sh b/scripts/hot-dev.sh index 9116373ef..4003906d2 100755 --- a/scripts/hot-dev.sh +++ b/scripts/hot-dev.sh @@ -241,6 +241,26 @@ else log_info "Production mode: Using dev config directory: ${PULSE_DATA_DIR}" fi + # Auto-restore encryption key from backup if missing + if [[ ! -f "${PULSE_DATA_DIR}/.encryption.key" ]]; then + BACKUP_KEY=$(find "${PULSE_DATA_DIR}" -maxdepth 1 -name '.encryption.key.bak*' -type f 2>/dev/null | head -1) + if [[ -n "${BACKUP_KEY}" ]] && [[ -f "${BACKUP_KEY}" ]]; then + echo "" + log_error "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" + log_error "!! ENCRYPTION KEY WAS MISSING - AUTO-RESTORING FROM BACKUP !!" + log_error "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" + log_error "!! Backup used: ${BACKUP_KEY}" + log_error "!! " + log_error "!! To find out what deleted the key, run:" + log_error "!! sudo journalctl -u encryption-key-watcher -n 100" + log_error "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" + echo "" + cp -f "${BACKUP_KEY}" "${PULSE_DATA_DIR}/.encryption.key" + chmod 600 "${PULSE_DATA_DIR}/.encryption.key" + log_info "Restored encryption key from backup" + fi + fi + if [[ -z ${PULSE_ENCRYPTION_KEY:-} ]]; then if [[ -f "${PULSE_DATA_DIR}/.encryption.key" ]]; then export PULSE_ENCRYPTION_KEY="$(<"${PULSE_DATA_DIR}/.encryption.key")" diff --git a/scripts/sync-production-config.sh b/scripts/sync-production-config.sh index 8e7dc488c..4c6cd527c 100755 --- a/scripts/sync-production-config.sh +++ b/scripts/sync-production-config.sh @@ -22,6 +22,29 @@ echo "" HAVE_PROD_KEY=false # CRITICAL: Always sync production encryption key to dev when it exists +# First, check if the key is missing but a backup exists - auto-restore it +if [ ! -f "$PROD_DIR/.encryption.key" ]; then + # Look for backup keys in production directory + BACKUP_KEY=$(find "$PROD_DIR" -maxdepth 1 -name '.encryption.key.bak*' -type f 2>/dev/null | head -1) + if [ -n "$BACKUP_KEY" ] && [ -f "$BACKUP_KEY" ]; then + echo "" + echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" + echo "!! ENCRYPTION KEY WAS MISSING - AUTO-RESTORING FROM BACKUP !!" + echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" + echo "!! Backup used: $BACKUP_KEY" + echo "!! " + echo "!! To find out what deleted the key, run:" + echo "!! sudo journalctl -u encryption-key-watcher -n 100" + echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" + echo "" + cp -f "$BACKUP_KEY" "$PROD_DIR/.encryption.key" + chmod 600 "$PROD_DIR/.encryption.key" + # Ensure proper ownership (may need root, so try but don't fail) + chown pulse:pulse "$PROD_DIR/.encryption.key" 2>/dev/null || true + echo "✓ Restored encryption key from backup" + fi +fi + if [ -f "$PROD_DIR/.encryption.key" ]; then if [ ! -f "$DEV_DIR/.encryption.key" ]; then cp -f "$PROD_DIR/.encryption.key" "$DEV_DIR/.encryption.key" diff --git a/scripts/watch-encryption-key.sh b/scripts/watch-encryption-key.sh new file mode 100755 index 000000000..ac71757f8 --- /dev/null +++ b/scripts/watch-encryption-key.sh @@ -0,0 +1,43 @@ +#!/bin/bash +# Watch the encryption key file for any modifications or deletions +# Logs to journald with full context about what process did it + +LOG_TAG="encryption-key-watcher" +WATCH_DIR="/etc/pulse" +WATCH_FILE=".encryption.key" + +log_event() { + local event="$1" + local file="$2" + + # Get current process info + echo "[$LOG_TAG] EVENT: $event on $file at $(date '+%Y-%m-%d %H:%M:%S')" + + # Try to capture what processes are accessing /etc/pulse + echo "[$LOG_TAG] Processes with open files in /etc/pulse:" + lsof +D /etc/pulse 2>/dev/null | head -20 || echo " (lsof failed)" + + # Log the current state of the file + if [[ -f "$WATCH_DIR/$WATCH_FILE" ]]; then + echo "[$LOG_TAG] File still exists: $(ls -la "$WATCH_DIR/$WATCH_FILE")" + else + echo "[$LOG_TAG] *** FILE IS MISSING! ***" + echo "[$LOG_TAG] Contents of $WATCH_DIR:" + ls -la "$WATCH_DIR" | grep -i enc + fi + + # Log recent sudo commands + echo "[$LOG_TAG] Recent sudo activity:" + journalctl -u sudo --since "2 minutes ago" --no-pager 2>/dev/null | tail -10 || true +} + +echo "[$LOG_TAG] Starting encryption key watcher..." +echo "[$LOG_TAG] Monitoring: $WATCH_DIR/$WATCH_FILE" + +# Watch for all relevant events on the directory +inotifywait -m -e delete,move,modify,attrib,create "$WATCH_DIR" --format '%e %f' 2>/dev/null | while read event file; do + # Only log events related to encryption key + if [[ "$file" == ".encryption.key"* ]]; then + log_event "$event" "$file" + fi +done