diff --git a/cmd/pulse-docker-agent/main.go b/cmd/pulse-docker-agent/main.go index 6fc7142ff..60b9f99c4 100644 --- a/cmd/pulse-docker-agent/main.go +++ b/cmd/pulse-docker-agent/main.go @@ -55,6 +55,12 @@ func main() { logger := zerolog.New(os.Stdout).Level(cfg.LogLevel).With().Timestamp().Logger() cfg.Logger = &logger + // Deprecation warning + logger.Warn().Msg("pulse-docker-agent is DEPRECATED and will be removed in a future release") + logger.Warn().Msg("Please migrate to the unified 'pulse-agent' with --enable-docker flag") + logger.Warn().Msg("Example: pulse-agent --url --token --enable-docker") + logger.Warn().Msg("") + agent, err := dockeragent.New(cfg) if err != nil { logger.Fatal().Err(err).Msg("Failed to create docker agent") diff --git a/cmd/pulse-host-agent/main.go b/cmd/pulse-host-agent/main.go index e1fe2d6e4..e64dc46e7 100644 --- a/cmd/pulse-host-agent/main.go +++ b/cmd/pulse-host-agent/main.go @@ -61,6 +61,12 @@ func main() { g, ctx := errgroup.WithContext(ctx) + // Deprecation warning + logger.Warn().Msg("pulse-host-agent is DEPRECATED and will be removed in a future release") + logger.Warn().Msg("Please migrate to the unified 'pulse-agent' with --enable-host flag") + logger.Warn().Msg("Example: pulse-agent --url --token --enable-host") + logger.Warn().Msg("") + logger.Info(). Str("version", Version). Str("pulse_url", hostCfg.PulseURL). diff --git a/docs/API.md b/docs/API.md index 32f258492..7fcda8109 100644 --- a/docs/API.md +++ b/docs/API.md @@ -112,23 +112,6 @@ Triggers a test alert to all configured channels. --- ---- -## 🖥️ Host Agent - -### Submit Report -`POST /api/agents/host/report` -Used by the Pulse Host Agent to push system metrics. - -### Lookup Agent -`POST /api/agents/host/lookup` -Check if a host agent is already registered. - -### Delete Host -`DELETE /api/agents/host/` -Remove a host agent from monitoring. - ---- - ## ⚙️ System Settings ### Get Settings @@ -157,15 +140,23 @@ Initiate OIDC login flow. --- -## 🐳 Docker Agent +## 🤖 Agent Endpoints -### Submit Report -`POST /api/agents/docker/report` -Used by the Pulse Docker Agent to push container metrics. +### Unified Agent (Recommended) +`GET /download/pulse-agent` +Downloads the unified agent binary for the current platform. -### Download Agent -`GET /download/pulse-docker-agent` -Downloads the binary for the current platform. +The unified agent combines host and Docker monitoring. Use `--enable-docker` to enable Docker metrics. + +See [UNIFIED_AGENT.md](UNIFIED_AGENT.md) for installation instructions. + +### Legacy Agents (Deprecated) +`GET /download/pulse-host-agent` - *Deprecated, use pulse-agent* +`GET /download/pulse-docker-agent` - *Deprecated, use pulse-agent --enable-docker* + +### Submit Reports +`POST /api/agents/host/report` - Host metrics +`POST /api/agents/docker/report` - Docker container metrics --- diff --git a/frontend-modern/package-lock.json b/frontend-modern/package-lock.json index e09c7a68e..6ef332dbd 100644 --- a/frontend-modern/package-lock.json +++ b/frontend-modern/package-lock.json @@ -11,10 +11,12 @@ "dependencies": { "@solidjs/router": "^0.10.10", "lucide-solid": "^0.545.0", + "marked": "^17.0.1", "solid-js": "^1.8.0" }, "devDependencies": { "@solidjs/testing-library": "^0.8.5", + "@tailwindcss/typography": "^0.5.19", "@testing-library/jest-dom": "^6.5.0", "@types/node": "^20.10.0", "@typescript-eslint/eslint-plugin": "^8.0.0", @@ -1429,6 +1431,33 @@ } } }, + "node_modules/@tailwindcss/typography": { + "version": "0.5.19", + "resolved": "https://registry.npmjs.org/@tailwindcss/typography/-/typography-0.5.19.tgz", + "integrity": "sha512-w31dd8HOx3k9vPtcQh5QHP9GwKcgbMp87j58qi6xgiBnFFtKEAgCWnDw4qUT8aHwkCp8bKvb/KGKWWHedP0AAg==", + "dev": true, + "license": "MIT", + "dependencies": { + "postcss-selector-parser": "6.0.10" + }, + "peerDependencies": { + "tailwindcss": ">=3.0.0 || insiders || >=4.0.0-alpha.20 || >=4.0.0-beta.1" + } + }, + "node_modules/@tailwindcss/typography/node_modules/postcss-selector-parser": { + "version": "6.0.10", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.0.10.tgz", + "integrity": "sha512-IQ7TZdoaqbT+LCpShg46jnZVlhWD2w6iQYAcYXfHARZ7X1t/UGhhceQDs5X0cGqKvYlHNOuv7Oa1xmb0oQuA3w==", + "dev": true, + "license": "MIT", + "dependencies": { + "cssesc": "^3.0.0", + "util-deprecate": "^1.0.2" + }, + "engines": { + "node": ">=4" + } + }, "node_modules/@testing-library/dom": { "version": "10.4.1", "resolved": "https://registry.npmjs.org/@testing-library/dom/-/dom-10.4.1.tgz", @@ -3895,6 +3924,18 @@ "@jridgewell/sourcemap-codec": "^1.5.5" } }, + "node_modules/marked": { + "version": "17.0.1", + "resolved": "https://registry.npmjs.org/marked/-/marked-17.0.1.tgz", + "integrity": "sha512-boeBdiS0ghpWcSwoNm/jJBwdpFaMnZWRzjA6SkUMYb40SVaN1x7mmfGKp0jvexGcx+7y2La5zRZsYFZI6Qpypg==", + "license": "MIT", + "bin": { + "marked": "bin/marked.js" + }, + "engines": { + "node": ">= 20" + } + }, "node_modules/math-intrinsics": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz", @@ -5113,6 +5154,7 @@ "integrity": "sha512-6A2rnmW5xZMdw11LYjhcI5846rt9pbLSabY5XPxo+XWdxwZaFEn47Go4NzFiHu9sNNmr/kXivP1vStfvMaK1GQ==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@alloc/quick-lru": "^5.2.0", "arg": "^5.0.2", diff --git a/frontend-modern/package.json b/frontend-modern/package.json index 666c8a95d..9dcc9258f 100644 --- a/frontend-modern/package.json +++ b/frontend-modern/package.json @@ -28,10 +28,12 @@ "dependencies": { "@solidjs/router": "^0.10.10", "lucide-solid": "^0.545.0", + "marked": "^17.0.1", "solid-js": "^1.8.0" }, "devDependencies": { "@solidjs/testing-library": "^0.8.5", + "@tailwindcss/typography": "^0.5.19", "@testing-library/jest-dom": "^6.5.0", "@types/node": "^20.10.0", "@typescript-eslint/eslint-plugin": "^8.0.0", diff --git a/frontend-modern/src/App.tsx b/frontend-modern/src/App.tsx index 5deca704a..ea1e57074 100644 --- a/frontend-modern/src/App.tsx +++ b/frontend-modern/src/App.tsx @@ -43,6 +43,8 @@ import { TokenRevealDialog } from './components/TokenRevealDialog'; import { useAlertsActivation } from './stores/alertsActivation'; import { UpdateProgressModal } from './components/UpdateProgressModal'; import type { UpdateStatus } from './api/updates'; +import { AIChat } from './components/AI/AIChat'; +import { aiChatStore } from './stores/aiChat'; const Dashboard = lazy(() => import('./components/Dashboard/Dashboard').then((module) => ({ default: module.Dashboard })), @@ -723,6 +725,39 @@ function App() { // Root layout component for Router const RootLayout = (props: { children?: JSX.Element }) => { + // Check AI settings on mount and setup keyboard shortcut + onMount(() => { + // Check if AI is enabled + import('./api/ai').then(({ AIAPI }) => { + AIAPI.getSettings() + .then((settings) => { + aiChatStore.setEnabled(settings.enabled && settings.configured); + }) + .catch(() => { + aiChatStore.setEnabled(false); + }); + }); + + // Keyboard shortcut: Cmd/Ctrl+K to toggle AI + const handleKeyDown = (e: KeyboardEvent) => { + if ((e.metaKey || e.ctrlKey) && e.key === 'k') { + e.preventDefault(); + if (aiChatStore.enabled) { + aiChatStore.toggle(); + } + } + // Escape to close + if (e.key === 'Escape' && aiChatStore.isOpen) { + aiChatStore.close(); + } + }; + + document.addEventListener('keydown', handleKeyDown); + onCleanup(() => { + document.removeEventListener('keydown', handleKeyDown); + }); + }); + return ( -
- - {props.children} - + {/* Main layout container - flexbox to allow AI panel to push content */} +
+ {/* Main content area - shrinks when AI panel is open, scrolls independently */} +
+ + {props.children} + +
+ {/* AI Panel - slides in from right, pushes content */} + aiChatStore.close()} />
+ {/* Fixed AI Assistant Button - always visible on the side when AI is enabled */} + + {/* This component only shows when chat is closed */} + + diff --git a/frontend-modern/src/api/ai.ts b/frontend-modern/src/api/ai.ts new file mode 100644 index 000000000..57ca2d49b --- /dev/null +++ b/frontend-modern/src/api/ai.ts @@ -0,0 +1,127 @@ +import { apiFetchJSON, apiFetch } from '@/utils/apiClient'; +import type { + AISettings, + AISettingsUpdateRequest, + AITestResult, + AIExecuteRequest, + AIExecuteResponse, + AIStreamEvent, +} from '@/types/ai'; + +export class AIAPI { + private static baseUrl = '/api'; + + // Get AI settings + static async getSettings(): Promise { + return apiFetchJSON(`${this.baseUrl}/settings/ai`) as Promise; + } + + // Update AI settings + static async updateSettings(settings: AISettingsUpdateRequest): Promise { + return apiFetchJSON(`${this.baseUrl}/settings/ai/update`, { + method: 'PUT', + body: JSON.stringify(settings), + }) as Promise; + } + + // Test AI connection + static async testConnection(): Promise { + return apiFetchJSON(`${this.baseUrl}/ai/test`, { + method: 'POST', + }) as Promise; + } + + // Execute an AI prompt + static async execute(request: AIExecuteRequest): Promise { + return apiFetchJSON(`${this.baseUrl}/ai/execute`, { + method: 'POST', + body: JSON.stringify(request), + }) as Promise; + } + + // Run a single command (for approved commands) + static async runCommand(request: { + command: string; + target_type: string; + target_id: string; + run_on_host: boolean; + vmid?: string; + }): Promise<{ output: string; success: boolean; error?: string }> { + return apiFetchJSON(`${this.baseUrl}/ai/run-command`, { + method: 'POST', + body: JSON.stringify(request), + }) as Promise<{ output: string; success: boolean; error?: string }>; + } + + // Execute an AI prompt with streaming + // Returns an abort function to cancel the request + static async executeStream( + request: AIExecuteRequest, + onEvent: (event: AIStreamEvent) => void, + signal?: AbortSignal + ): Promise { + console.log('[AI SSE] Starting streaming request:', request); + + const response = await apiFetch(`${this.baseUrl}/ai/execute/stream`, { + method: 'POST', + body: JSON.stringify(request), + headers: { + 'Content-Type': 'application/json', + 'Accept': 'text/event-stream', + }, + signal, + }); + + console.log('[AI SSE] Response status:', response.status, response.headers.get('content-type')); + + if (!response.ok) { + const text = await response.text(); + console.error('[AI SSE] Request failed:', text); + throw new Error(text || `Request failed with status ${response.status}`); + } + + const reader = response.body?.getReader(); + if (!reader) { + console.error('[AI SSE] No response body'); + throw new Error('No response body'); + } + + const decoder = new TextDecoder(); + let buffer = ''; + + console.log('[AI SSE] Starting to read stream...'); + + try { + while (true) { + const { done, value } = await reader.read(); + if (done) { + console.log('[AI SSE] Stream ended'); + break; + } + + const chunk = decoder.decode(value, { stream: true }); + console.log('[AI SSE] Received chunk:', chunk.length, 'bytes'); + buffer += chunk; + + // Process complete SSE messages + const lines = buffer.split('\n\n'); + buffer = lines.pop() || ''; // Keep incomplete message in buffer + + for (const line of lines) { + if (line.startsWith('data: ')) { + try { + const data = JSON.parse(line.slice(6)); + console.log('[AI SSE] Parsed event:', data.type, data); + onEvent(data as AIStreamEvent); + } catch (e) { + console.error('[AI SSE] Failed to parse event:', e, line); + } + } + } + } + } finally { + reader.releaseLock(); + console.log('[AI SSE] Reader released'); + } + } +} diff --git a/frontend-modern/src/api/dockerMetadata.ts b/frontend-modern/src/api/dockerMetadata.ts index ab3d4ab8b..2b9d929b5 100644 --- a/frontend-modern/src/api/dockerMetadata.ts +++ b/frontend-modern/src/api/dockerMetadata.ts @@ -6,6 +6,7 @@ export interface DockerMetadata { customUrl?: string; description?: string; tags?: string[]; + notes?: string[]; // User annotations for AI context } export class DockerMetadataAPI { diff --git a/frontend-modern/src/api/guestMetadata.ts b/frontend-modern/src/api/guestMetadata.ts index 3a29ecb4f..b3d1971c1 100644 --- a/frontend-modern/src/api/guestMetadata.ts +++ b/frontend-modern/src/api/guestMetadata.ts @@ -6,6 +6,7 @@ export interface GuestMetadata { customUrl?: string; description?: string; tags?: string[]; + notes?: string[]; // User annotations for AI context } export class GuestMetadataAPI { diff --git a/frontend-modern/src/components/AI/AIChat.tsx b/frontend-modern/src/components/AI/AIChat.tsx new file mode 100644 index 000000000..5e305a695 --- /dev/null +++ b/frontend-modern/src/components/AI/AIChat.tsx @@ -0,0 +1,1001 @@ +import { Component, Show, createSignal, For, createEffect, createMemo } from 'solid-js'; +import { marked } from 'marked'; +import { AIAPI } from '@/api/ai'; +import { notificationStore } from '@/stores/notifications'; +import { logger } from '@/utils/logger'; +import { aiChatStore } from '@/stores/aiChat'; +import { useWebSocket } from '@/App'; +import type { + AIToolExecution, + AIStreamEvent, + AIStreamToolStartData, + AIStreamToolEndData, + AIStreamCompleteData, + AIStreamApprovalNeededData, +} from '@/types/ai'; + +// Configure marked for safe rendering +marked.setOptions({ + breaks: true, // Convert \n to
+ gfm: true, // GitHub Flavored Markdown +}); + +// Helper to render markdown safely +const renderMarkdown = (content: string): string => { + try { + return marked.parse(content) as string; + } catch { + return content; + } +}; + +// In-progress tool execution (before completion) +interface PendingTool { + name: string; + input: string; +} + +// Command awaiting user approval +interface PendingApproval { + command: string; + toolId: string; + toolName: string; + runOnHost: boolean; + isExecuting?: boolean; +} + +interface Message { + id: string; + role: 'user' | 'assistant'; + content: string; + timestamp: Date; + model?: string; + tokens?: { input: number; output: number }; + toolCalls?: AIToolExecution[]; + // Streaming state + isStreaming?: boolean; + pendingTools?: PendingTool[]; + pendingApprovals?: PendingApproval[]; +} + +interface AIChatProps { + onClose: () => void; +} + +// Extract guest name from context if available +const getGuestName = (context?: Record): string | undefined => { + if (!context) return undefined; + if (typeof context.guestName === 'string') return context.guestName; + if (typeof context.name === 'string') return context.name; + return undefined; +}; + +export const AIChat: Component = (props) => { + // Read all context from store for proper SolidJS reactivity + const isOpen = () => aiChatStore.isOpen; + const context = () => aiChatStore.context; + const targetType = () => context().targetType; + const targetId = () => context().targetId; + const contextData = () => context().context; + const initialPrompt = () => context().initialPrompt; + + // Access WebSocket state for listing available resources + const wsContext = useWebSocket(); + + // Context picker state + const [showContextPicker, setShowContextPicker] = createSignal(false); + const [contextSearch, setContextSearch] = createSignal(''); + + // Build a list of all available resources for the context picker + const availableResources = createMemo(() => { + const resources: Array<{ + id: string; + type: 'vm' | 'container' | 'node' | 'host' | 'docker'; + name: string; + status: string; + node?: string; + data: Record; + }> = []; + + // Add VMs + for (const vm of wsContext.state.vms || []) { + resources.push({ + id: `${vm.node}-${vm.vmid}`, + type: 'vm', + name: vm.name || `VM ${vm.vmid}`, + status: vm.status, + node: vm.node, + data: { + guest_id: `${vm.node}-${vm.vmid}`, + guest_name: vm.name, + guest_vmid: vm.vmid, + guest_type: 'qemu', + guest_node: vm.node, + guest_status: vm.status, + cpu: vm.cpu, + mem: vm.mem, + maxmem: vm.maxmem, + disk: vm.disk, + maxdisk: vm.maxdisk, + }, + }); + } + + // Add containers + for (const ct of wsContext.state.containers || []) { + resources.push({ + id: `${ct.node}-${ct.vmid}`, + type: 'container', + name: ct.name || `CT ${ct.vmid}`, + status: ct.status, + node: ct.node, + data: { + guest_id: `${ct.node}-${ct.vmid}`, + guest_name: ct.name, + guest_vmid: ct.vmid, + guest_type: 'lxc', + guest_node: ct.node, + guest_status: ct.status, + cpu: ct.cpu, + mem: ct.mem, + maxmem: ct.maxmem, + disk: ct.disk, + maxdisk: ct.maxdisk, + }, + }); + } + + // Add Proxmox nodes + for (const node of wsContext.state.nodes || []) { + resources.push({ + id: `node-${node.node}`, + type: 'node', + name: node.node, + status: node.status, + data: { + node_name: node.node, + node_status: node.status, + cpu: node.cpu, + mem: node.mem, + maxmem: node.maxmem, + disk: node.disk, + maxdisk: node.maxdisk, + }, + }); + } + + // Add host agents + for (const host of wsContext.state.hosts || []) { + resources.push({ + id: `host-${host.hostname}`, + type: 'host', + name: host.hostname, + status: host.connected ? 'online' : 'offline', + data: { + host_name: host.hostname, + host_platform: host.platform, + host_version: host.version, + connected: host.connected, + }, + }); + } + + return resources; + }); + + // Filtered resources based on search + const filteredResources = createMemo(() => { + const search = contextSearch().toLowerCase(); + if (!search) return availableResources(); + return availableResources().filter( + (r) => + r.name.toLowerCase().includes(search) || + r.type.toLowerCase().includes(search) || + (r.node && r.node.toLowerCase().includes(search)) + ); + }); + + // Add a resource to context + const addResourceToContext = (resource: ReturnType[number]) => { + aiChatStore.addContextItem(resource.type, resource.id, resource.name, resource.data); + setShowContextPicker(false); + setContextSearch(''); + }; + + // Initialize messages from store (for persistence across navigation) + const [messages, setMessagesLocal] = createSignal( + aiChatStore.messages as Message[] || [] + ); + const [input, setInput] = createSignal(''); + const [isLoading, setIsLoading] = createSignal(false); + let messagesEndRef: HTMLDivElement | undefined; + let inputRef: HTMLTextAreaElement | undefined; + let abortControllerRef: AbortController | null = null; + + // Wrapper to sync messages to global store + const setMessages = (updater: Message[] | ((prev: Message[]) => Message[])) => { + setMessagesLocal((prev) => { + const newMsgs = typeof updater === 'function' ? updater(prev) : updater; + // Sync to global store for persistence (debounce or defer to avoid too many updates) + setTimeout(() => aiChatStore.setMessages(newMsgs as any), 0); + return newMsgs; + }); + }; + + // Auto-scroll to bottom when new messages arrive + createEffect(() => { + if (messages().length > 0 && messagesEndRef) { + messagesEndRef.scrollIntoView({ behavior: 'smooth' }); + } + }); + + // Focus input when drawer opens + createEffect(() => { + if (isOpen() && inputRef) { + setTimeout(() => inputRef?.focus(), 100); + } + }); + + // Handle initial prompt if provided + createEffect(() => { + if (initialPrompt() && isOpen()) { + setInput(initialPrompt()!); + } + }); + + const generateId = () => Math.random().toString(36).substring(2, 9); + + // Stop/cancel the current AI request + const handleStop = () => { + if (abortControllerRef) { + abortControllerRef.abort(); + abortControllerRef = null; + } + // Mark any streaming message as stopped + setMessages((prev) => + prev.map((msg) => + msg.isStreaming + ? { ...msg, isStreaming: false, content: msg.content || '(Stopped by user)' } + : msg + ) + ); + setIsLoading(false); + }; + + const handleSubmit = async (e?: Event) => { + e?.preventDefault(); + const prompt = input().trim(); + if (!prompt || isLoading()) return; + + // IMPORTANT: Capture the current messages BEFORE adding new ones to avoid race conditions + // SolidJS batches updates, so messages() may not be updated synchronously + const previousMessages = messages(); + + // Build conversation history from previous messages (before we add new ones) + const history = previousMessages + .filter((m) => m.content && !m.isStreaming) // Only include completed messages with content + .map((m) => ({ + role: m.role, + content: m.content, + })); + + // Add user message + const userMessage: Message = { + id: generateId(), + role: 'user', + content: prompt, + timestamp: new Date(), + }; + setMessages((prev) => [...prev, userMessage]); + setInput(''); + setIsLoading(true); + + // Create abort controller for this request + abortControllerRef = new AbortController(); + + // Create a streaming assistant message + const assistantId = generateId(); + const streamingMessage: Message = { + id: assistantId, + role: 'assistant', + content: '', + timestamp: new Date(), + isStreaming: true, + pendingTools: [], + pendingApprovals: [], + toolCalls: [], + }; + setMessages((prev) => [...prev, streamingMessage]); + + try { + await AIAPI.executeStream( + { + prompt, + target_type: targetType(), + target_id: targetId(), + context: contextData(), + history: history.length > 0 ? history : undefined, + }, + (event: AIStreamEvent) => { + console.log('[AIChat] Received event:', event.type, event); + // Update the streaming message based on event type + setMessages((prev) => + prev.map((msg) => { + if (msg.id !== assistantId) return msg; + + switch (event.type) { + case 'tool_start': { + const data = event.data as AIStreamToolStartData; + return { + ...msg, + pendingTools: [...(msg.pendingTools || []), { name: data.name, input: data.input }], + }; + } + case 'tool_end': { + const data = event.data as AIStreamToolEndData; + // Remove matching pending tool (by name since input won't match output) + const updatedPending = (msg.pendingTools || []).slice(0, -1); // Remove last pending + const newToolCall: AIToolExecution = { + name: data.name, + input: (msg.pendingTools || []).find((t) => t.name === data.name)?.input || data.name, + output: data.output, + success: data.success, + }; + return { + ...msg, + pendingTools: updatedPending, + toolCalls: [...(msg.toolCalls || []), newToolCall], + }; + } + case 'content': { + const content = event.data as string; + return { + ...msg, + content: content, + }; + } + case 'complete': { + // Complete event has flat structure (model, input_tokens at top level, not under data) + const completeEvent = event as unknown as AIStreamCompleteData & { type: string }; + return { + ...msg, + isStreaming: false, + pendingTools: [], + model: completeEvent.model, + tokens: { + input: completeEvent.input_tokens, + output: completeEvent.output_tokens, + }, + // Use tool_calls from complete if we missed any + toolCalls: msg.toolCalls?.length ? msg.toolCalls : completeEvent.tool_calls, + }; + } + case 'done': { + return { + ...msg, + isStreaming: false, + pendingTools: [], + }; + } + case 'error': { + const errorMsg = event.data as string; + return { + ...msg, + isStreaming: false, + pendingTools: [], + content: `Error: ${errorMsg}`, + }; + } + case 'approval_needed': { + const data = event.data as AIStreamApprovalNeededData; + return { + ...msg, + pendingApprovals: [...(msg.pendingApprovals || []), { + command: data.command, + toolId: data.tool_id, + toolName: data.tool_name, + runOnHost: data.run_on_host, + }], + }; + } + default: + return msg; + } + }) + ); + }, + abortControllerRef?.signal + ); + } catch (error) { + // Don't show error for user-initiated abort + if (error instanceof Error && error.name === 'AbortError') { + console.log('[AIChat] Request aborted by user'); + return; + } + logger.error('[AIChat] Execute failed:', error); + const errorMessage = error instanceof Error ? error.message : 'Failed to get AI response'; + notificationStore.error(errorMessage); + + // Update the streaming message to show error + setMessages((prev) => + prev.map((msg) => + msg.id === assistantId + ? { ...msg, isStreaming: false, content: `Error: ${errorMessage}` } + : msg + ) + ); + } finally { + abortControllerRef = null; + setIsLoading(false); + } + }; + + const handleKeyDown = (e: KeyboardEvent) => { + if (e.key === 'Enter' && !e.shiftKey) { + e.preventDefault(); + handleSubmit(); + } + }; + + const clearChat = () => { + setMessages([]); + aiChatStore.clearConversation(); + }; + + // Execute an approved command + const executeApprovedCommand = async (messageId: string, approval: PendingApproval) => { + // Mark as executing + setMessages((prev) => + prev.map((m) => + m.id === messageId + ? { + ...m, + pendingApprovals: m.pendingApprovals?.map((a) => + a.toolId === approval.toolId ? { ...a, isExecuting: true } : a + ), + } + : m + ) + ); + + try { + // Extract VMID from context if available + const vmid = contextData()?.vmid as string | undefined; + + const result = await AIAPI.runCommand({ + command: approval.command, + target_type: targetType() || '', + target_id: targetId() || '', + run_on_host: approval.runOnHost, + vmid, + }); + + // Move from pending approvals to completed tool calls + setMessages((prev) => + prev.map((m) => { + if (m.id !== messageId) return m; + + const newToolCall: AIToolExecution = { + name: approval.toolName, + input: approval.command, + output: result.output || result.error || '', + success: result.success, + }; + + return { + ...m, + pendingApprovals: m.pendingApprovals?.filter((a) => a.toolId !== approval.toolId), + toolCalls: [...(m.toolCalls || []), newToolCall], + }; + }) + ); + + if (result.success) { + notificationStore.success('Command executed successfully'); + } else { + notificationStore.error(result.error || 'Command failed'); + } + } catch (error) { + logger.error('[AIChat] Failed to execute approved command:', error); + const errorMsg = error instanceof Error ? error.message : 'Failed to execute command'; + notificationStore.error(errorMsg); + + // Mark as no longer executing + setMessages((prev) => + prev.map((m) => + m.id === messageId + ? { + ...m, + pendingApprovals: m.pendingApprovals?.map((a) => + a.toolId === approval.toolId ? { ...a, isExecuting: false } : a + ), + } + : m + ) + ); + } + }; + + // Panel renders as flex child, width controlled by isOpen state + return ( +
+ + {/* Header */} +
+
+
+ + + +
+
+

+ + Ask AI about {getGuestName(contextData())} + +

+ +

+ {targetType() === 'vm' ? 'Virtual Machine' : targetType() === 'container' ? 'LXC Container' : targetType()} +

+
+
+
+
+ + +
+
+ + {/* Messages Area */} +
+ +
+ + + + +

Start a conversation

+

+ Ask about your infrastructure, diagnose issues, or get remediation suggestions. +

+ + }> +

Ask about {getGuestName(contextData())}

+

+ AI has access to this guest's current metrics and state. Try asking: +

+
+ + + +
+
+
+
+ + + {(message) => ( +
+
+ {/* Show completed tool calls */} + 0}> +
+ + {(tool) => ( +
+
+ + + + {tool.input} +
+ +
+                                {tool.output.length > 500 ? tool.output.substring(0, 500) + '...' : tool.output}
+                              
+
+
+ )} +
+
+
+ + {/* Show subtle analyzing indicator when running commands */} + 0}> +
+ + + + + Analyzing... +
+
+ + {/* Show commands awaiting approval */} + 0}> +
+ + {(approval) => ( +
+
+ + + + Approval Required + + HOST + +
+
+ {approval.command} +
+ + +
+
+
+ )} +
+
+
+ + {/* Show streaming indicator if no content yet but streaming */} + +
+ + + + + Analyzing... +
+
+ + +
+ + {/* Minimal footer - no model/token info shown */} +
+
+ )} + + +
+
+ + {/* Input Area */} +
+ {/* Context section - always show with Add button */} +
+
+
+ + + + + Context {aiChatStore.contextItems.length > 0 ? `(${aiChatStore.contextItems.length})` : ''} + +
+
+ 0}> + + +
+
+ + {/* Context items */} +
+ + {(item) => ( + + {item.type} + {item.name} + + + )} + + + {/* Add context button */} +
+ + + {/* Context picker dropdown */} + +
+ {/* Search input */} +
+ setContextSearch(e.currentTarget.value)} + placeholder="Search VMs, containers, hosts..." + class="w-full px-2 py-1.5 text-xs rounded border border-gray-300 dark:border-gray-600 bg-white dark:bg-gray-900 text-gray-900 dark:text-gray-100 placeholder-gray-400 focus:outline-none focus:ring-1 focus:ring-purple-500" + autofocus + /> +
+ + {/* Resource list */} +
+ 0} fallback={ +
+ No resources found +
+ }> + + {(resource) => { + const isAlreadyAdded = () => aiChatStore.hasContextItem(resource.id); + return ( + + ); + }} + +
+
+ + {/* Close button */} +
+ +
+
+
+
+
+ + {/* Empty state hint */} + +

+ Add VMs, containers, or hosts to provide context for your questions +

+
+
+
+