diff --git a/.gemini/tasks/persistent-metrics-storage.md b/.gemini/tasks/persistent-metrics-storage.md new file mode 100644 index 000000000..a4dd3c3d2 --- /dev/null +++ b/.gemini/tasks/persistent-metrics-storage.md @@ -0,0 +1,182 @@ +# Task: Persistent Metrics Storage for Sparklines + +## Problem +Currently, metrics history for sparklines is stored **in-memory only**. When the Pulse backend restarts, all historical metrics are lost. Users expect to see historical trends even after being away for days. + +## Goal +Implement SQLite-based persistent metrics storage that: +- Survives backend restarts +- Provides historical data for sparklines/trends view +- Supports configurable retention periods +- Minimizes disk I/O and storage footprint + +## Architecture + +### Storage Tiers (Data Rollup) +``` +┌─────────────────────────────────────────────────────────┐ +│ RAW (5s intervals) → Keep 2 hours → ~1,440 pts │ +│ MINUTE (1min avg) → Keep 24 hours → ~1,440 pts │ +│ HOURLY (1hr avg) → Keep 7 days → ~168 pts │ +│ DAILY (1day avg) → Keep 90 days → ~90 pts │ +└─────────────────────────────────────────────────────────┘ +``` + +### Database Schema +```sql +-- Main metrics table (partitioned by time for efficient pruning) +CREATE TABLE metrics ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + resource_type TEXT NOT NULL, -- 'node', 'vm', 'container', 'storage' + resource_id TEXT NOT NULL, + metric_type TEXT NOT NULL, -- 'cpu', 'memory', 'disk' + value REAL NOT NULL, + timestamp INTEGER NOT NULL, -- Unix timestamp in seconds + tier TEXT DEFAULT 'raw' -- 'raw', 'minute', 'hourly', 'daily' +); + +-- Indexes for efficient queries +CREATE INDEX idx_metrics_lookup ON metrics(resource_type, resource_id, metric_type, tier, timestamp); +CREATE INDEX idx_metrics_timestamp ON metrics(timestamp); +CREATE INDEX idx_metrics_tier_time ON metrics(tier, timestamp); +``` + +### Configuration +```yaml +metrics: + enabled: true + database_path: "${PULSE_DATA_DIR}/metrics.db" + retention: + raw: 2h # 2 hours of raw data + minute: 24h # 24 hours of 1-minute averages + hourly: 168h # 7 days of hourly averages + daily: 2160h # 90 days of daily averages + write_buffer: 100 # Buffer size before batch write + rollup_interval: 5m # How often to run rollup job +``` + +## Implementation Steps + +### Phase 1: SQLite Foundation ✅ COMPLETED +- [x] Add SQLite dependency (`modernc.org/sqlite` - pure Go, no CGO) +- [x] Create `internal/metrics/store.go` with: + - `Store` struct + - `NewStore(config StoreConfig) (*Store, error)` + - `Close() error` + - Schema auto-migration on startup + +### Phase 2: Write Path ✅ COMPLETED +- [x] Create `Write(resourceType, resourceID, metricType string, value float64, timestamp time.Time)` +- [x] Implement write buffering (batch inserts every 100 records or 5 seconds) +- [x] Integrate with existing `AddGuestMetric`, `AddNodeMetric` calls in monitor.go and monitor_polling.go +- [x] Add graceful shutdown to flush buffer + +### Phase 3: Read Path ✅ COMPLETED +- [x] Create `Query(resourceType, resourceID, metricType string, start, end time.Time) ([]MetricPoint, error)` +- [x] Auto-select appropriate tier based on time range: + - < 2 hours → raw data + - 2-24 hours → minute data + - 1-7 days → hourly data + - 7+ days → daily data +- [x] Add `/api/metrics-store/stats` endpoint for monitoring + +### Phase 4: Rollup & Retention ✅ COMPLETED +- [x] Create background rollup job: + - Runs every 5 minutes + - Aggregates raw → minute (AVG, MIN, MAX) + - Aggregates minute → hourly + - Aggregates hourly → daily +- [x] Create retention pruning job: + - Runs every hour + - Deletes data older than configured retention +- [x] Use SQLite transactions for atomic operations + +### Phase 5: Integration +- [ ] Add configuration to `system.json` or new `metrics.json` +- [ ] Add Settings UI for metrics retention config +- [ ] Add database file size monitoring +- [ ] Add vacuum/optimize scheduled job (weekly) + +## Files to Create/Modify + +### New Files +``` +internal/metrics/ +├── store.go # MetricsStore implementation +├── store_test.go # Unit tests +├── rollup.go # Rollup/aggregation logic +├── retention.go # Retention/pruning logic +└── config.go # Metrics configuration +``` + +### Files to Modify +``` +internal/monitoring/monitor.go # Initialize MetricsStore, call Write() +internal/monitoring/metrics_history.go # Keep in-memory as cache, backed by SQLite +internal/api/router.go # Update handleCharts to query from store +internal/config/persistence.go # Add metrics config persistence +``` + +## API Changes + +### `/api/charts` Query Parameters +``` +GET /api/charts?range=1h # Last hour (raw/minute data) +GET /api/charts?range=24h # Last 24 hours (minute data) +GET /api/charts?range=7d # Last 7 days (hourly data) +GET /api/charts?range=30d # Last 30 days (daily data) +GET /api/charts?start=...&end=... # Custom range +``` + +### Response Enhancement +```json +{ + "data": { ... }, + "nodeData": { ... }, + "stats": { + "oldestDataTimestamp": 1699900000000, + "tier": "hourly", + "pointCount": 168 + } +} +``` + +## Performance Considerations + +1. **Write Buffering**: Batch inserts to reduce I/O +2. **WAL Mode**: Enable SQLite WAL for concurrent reads/writes +3. **Prepared Statements**: Reuse for repeated queries +4. **Index Strategy**: Composite index on (resource_type, resource_id, metric_type, tier, timestamp) +5. **Connection Pooling**: Single connection with proper locking for SQLite +6. **Memory Mapping**: Use `PRAGMA mmap_size` for faster reads + +## Storage Estimates +For a typical Pulse installation (5 nodes, 50 VMs, 20 containers, 10 storage): +- 85 resources × 3 metrics = 255 metric series +- Raw (2h at 5s): ~86,400 rows → ~10 MB +- Minute (24h): ~367,200 rows → ~40 MB +- Hourly (7d): ~42,840 rows → ~5 MB +- Daily (90d): ~22,950 rows → ~3 MB +- **Total: ~60-100 MB** for comprehensive historical data + +## Testing Plan +1. Unit tests for store CRUD operations +2. Unit tests for rollup logic +3. Integration tests with mock monitor +4. Performance tests with 100+ resources +5. Restart resilience tests + +## Rollout Plan +1. Implement as opt-in feature (disable by default initially) +2. Add migration path from in-memory to SQLite +3. Test in dev environment for 1 week +4. Enable by default in next minor release + +## Definition of Done +- [ ] SQLite metrics storage implemented +- [ ] Data survives backend restart +- [ ] Rollup/retention working correctly +- [ ] Charts endpoint serves historical data +- [ ] Documentation updated +- [ ] Settings UI for retention config +- [ ] Performance validated (no noticeable slowdown) diff --git a/cmd/pulse/main.go b/cmd/pulse/main.go index a6bb384d0..c0d003797 100644 --- a/cmd/pulse/main.go +++ b/cmd/pulse/main.go @@ -183,8 +183,8 @@ func runServer() { Addr: fmt.Sprintf("%s:%d", cfg.BackendHost, cfg.FrontendPort), Handler: router.Handler(), ReadHeaderTimeout: 15 * time.Second, - WriteTimeout: 60 * time.Second, // Increased from 15s to 60s to support large JSON responses (e.g., mock data) - IdleTimeout: 60 * time.Second, + WriteTimeout: 0, // Disabled to support SSE/streaming - each handler manages its own deadline + IdleTimeout: 120 * time.Second, } // Start config watcher for .env file changes diff --git a/frontend-modern/index.html b/frontend-modern/index.html index 78bd7f095..0ce2d4562 100644 --- a/frontend-modern/index.html +++ b/frontend-modern/index.html @@ -6,6 +6,34 @@ Pulse + + + + diff --git a/frontend-modern/src/App.tsx b/frontend-modern/src/App.tsx index ea1e57074..2a858cd44 100644 --- a/frontend-modern/src/App.tsx +++ b/frontend-modern/src/App.tsx @@ -35,6 +35,8 @@ import { createTooltipSystem } from './components/shared/Tooltip'; import type { State } from '@/types/api'; import { ProxmoxIcon } from '@/components/icons/ProxmoxIcon'; import { startMetricsSampler } from './stores/metricsSampler'; +import { seedFromBackend } from './stores/metricsHistory'; +import { getMetricsViewMode } from './stores/metricsViewMode'; import BoxesIcon from 'lucide-solid/icons/boxes'; import MonitorIcon from 'lucide-solid/icons/monitor'; import BellIcon from 'lucide-solid/icons/bell'; @@ -213,6 +215,13 @@ function App() { // Start metrics sampler for sparklines onMount(() => { startMetricsSampler(); + + // If user already has sparklines mode enabled, seed historical data immediately + if (getMetricsViewMode() === 'sparklines') { + seedFromBackend('1h').catch(() => { + // Errors are already logged in seedFromBackend + }); + } }); let hasPreloadedRoutes = false; @@ -253,23 +262,23 @@ function App() { pmg: [], replicationJobs: [], metrics: [], - pveBackups: { - backupTasks: [], - storageBackups: [], - guestSnapshots: [], - }, - pbsBackups: [], - pmgBackups: [], - backups: { - pve: { + pveBackups: { backupTasks: [], storageBackups: [], guestSnapshots: [], }, - pbs: [], - pmg: [], - }, - performance: { + pbsBackups: [], + pmgBackups: [], + backups: { + pve: { + backupTasks: [], + storageBackups: [], + guestSnapshots: [], + }, + pbs: [], + pmg: [], + }, + performance: { apiCallDuration: {}, lastPollDuration: 0, pollingStartTime: '', @@ -500,9 +509,9 @@ function App() { // Detect legacy DISABLE_AUTH flag (now ignored) so we can surface a warning if (securityData.deprecatedDisableAuth === true) { - logger.warn( - '[App] Legacy DISABLE_AUTH flag detected; authentication remains enabled. Remove the flag and restart Pulse to silence this warning.', - ); + logger.warn( + '[App] Legacy DISABLE_AUTH flag detected; authentication remains enabled. Remove the flag and restart Pulse to silence this warning.', + ); } const authConfigured = securityData.hasAuthentication || false; @@ -727,16 +736,19 @@ function App() { const RootLayout = (props: { children?: JSX.Element }) => { // Check AI settings on mount and setup keyboard shortcut onMount(() => { - // Check if AI is enabled - import('./api/ai').then(({ AIAPI }) => { - AIAPI.getSettings() - .then((settings) => { - aiChatStore.setEnabled(settings.enabled && settings.configured); - }) - .catch(() => { - aiChatStore.setEnabled(false); - }); - }); + // Only check AI settings if already authenticated (not on login screen) + // Otherwise, the 401 response triggers a redirect loop + if (!needsAuth()) { + import('./api/ai').then(({ AIAPI }) => { + AIAPI.getSettings() + .then((settings) => { + aiChatStore.setEnabled(settings.enabled && settings.configured); + }) + .catch(() => { + aiChatStore.setEnabled(false); + }); + }); + } // Keyboard shortcut: Cmd/Ctrl+K to toggle AI const handleKeyDown = (e: KeyboardEvent) => { @@ -762,14 +774,18 @@ function App() { +
Loading...
} > - }> + }> - Initializing...}> + +
Initializing...
+ + }> @@ -870,13 +886,12 @@ function ConnectionStatusBadge(props: { }) { return (
@@ -902,11 +917,10 @@ function ConnectionStatusBadge(props: { {props.connected() ? 'Connected' @@ -1241,12 +1255,12 @@ function AppLayout(props: { const baseClasses = 'tab relative px-2 sm:px-3 py-1.5 text-xs sm:text-sm font-medium flex items-center gap-1 sm:gap-1.5 rounded-t border border-transparent transition-colors whitespace-nowrap cursor-pointer'; - const className = () => { - if (isActive()) { - return `${baseClasses} bg-white dark:bg-gray-800 text-blue-600 dark:text-blue-400 border-gray-300 dark:border-gray-700 border-b border-b-white dark:border-b-gray-800 shadow-sm font-semibold`; - } - return `${baseClasses} text-gray-500 dark:text-gray-400 hover:text-gray-700 dark:hover:text-gray-300 hover:bg-gray-200/60 dark:hover:bg-gray-700/60`; - }; + const className = () => { + if (isActive()) { + return `${baseClasses} bg-white dark:bg-gray-800 text-blue-600 dark:text-blue-400 border-gray-300 dark:border-gray-700 border-b border-b-white dark:border-b-gray-800 shadow-sm font-semibold`; + } + return `${baseClasses} text-gray-500 dark:text-gray-400 hover:text-gray-700 dark:hover:text-gray-300 hover:bg-gray-200/60 dark:hover:bg-gray-700/60`; + }; return (
{ + // Ensure run_on_host is explicitly a boolean (not undefined) + const sanitizedRequest = { + command: request.command, + target_type: request.target_type, + target_id: request.target_id, + run_on_host: Boolean(request.run_on_host), + ...(request.vmid ? { vmid: String(request.vmid) } : {}), + ...(request.target_host ? { target_host: request.target_host } : {}), + }; + const body = JSON.stringify(sanitizedRequest); + console.log('[AI] runCommand request:', request); + console.log('[AI] runCommand sanitized:', sanitizedRequest); + console.log('[AI] runCommand body:', body); + console.log('[AI] runCommand body length:', body.length); return apiFetchJSON(`${this.baseUrl}/ai/run-command`, { method: 'POST', - body: JSON.stringify(request), + body, }) as Promise<{ output: string; success: boolean; error?: string }>; } + + // Investigate an alert with AI (one-click investigation) + static async investigateAlert( + request: { + alert_id: string; + resource_id: string; + resource_name: string; + resource_type: string; + alert_type: string; + level: string; + value: number; + threshold: number; + message: string; + duration: string; + node?: string; + vmid?: number; + }, + onEvent: (event: AIStreamEvent) => void, + signal?: AbortSignal + ): Promise { + console.log('[AI] Starting alert investigation:', request); + + const response = await apiFetch(`${this.baseUrl}/ai/investigate-alert`, { + method: 'POST', + body: JSON.stringify(request), + headers: { + 'Content-Type': 'application/json', + Accept: 'text/event-stream', + }, + signal, + }); + + if (!response.ok) { + const text = await response.text(); + throw new Error(text || `Request failed with status ${response.status}`); + } + + const reader = response.body?.getReader(); + if (!reader) { + throw new Error('No response body'); + } + + const decoder = new TextDecoder(); + let buffer = ''; + // 5 minutes timeout - Opus models can take a long time + const STREAM_TIMEOUT_MS = 300000; + let lastEventTime = Date.now(); + + try { + while (true) { + if (Date.now() - lastEventTime > STREAM_TIMEOUT_MS) { + console.warn('[AI] Alert investigation stream timeout'); + break; + } + + const readPromise = reader.read(); + const timeoutPromise = new Promise((_, reject) => { + setTimeout(() => reject(new Error('Read timeout')), STREAM_TIMEOUT_MS); + }); + + let result: ReadableStreamReadResult; + try { + result = await Promise.race([readPromise, timeoutPromise]); + } catch (e) { + if ((e as Error).message === 'Read timeout') break; + throw e; + } + + const { done, value } = result; + if (done) break; + + lastEventTime = Date.now(); + buffer += decoder.decode(value, { stream: true }); + + const normalizedBuffer = buffer.replace(/\r\n/g, '\n'); + const messages = normalizedBuffer.split('\n\n'); + buffer = messages.pop() || ''; + + for (const message of messages) { + if (!message.trim() || message.trim().startsWith(':')) continue; + + const dataLines = message.split('\n').filter((line) => line.startsWith('data: ')); + for (const line of dataLines) { + try { + const jsonStr = line.slice(6); + if (!jsonStr.trim()) continue; + const data = JSON.parse(jsonStr); + onEvent(data as AIStreamEvent); + } catch (e) { + console.error('[AI] Failed to parse investigation event:', e); + } + } + } + } + } finally { + reader.releaseLock(); + } + } + // Execute an AI prompt with streaming // Returns an abort function to cancel the request static async executeStream( @@ -88,30 +202,89 @@ export class AIAPI { const decoder = new TextDecoder(); let buffer = ''; + let lastEventTime = Date.now(); + let receivedComplete = false; + let receivedDone = false; + + // Timeout to detect stalled streams (5 minutes - Opus models can take a long time) + const STREAM_TIMEOUT_MS = 300000; console.log('[AI SSE] Starting to read stream...'); try { while (true) { - const { done, value } = await reader.read(); - if (done) { - console.log('[AI SSE] Stream ended'); + // Check for stream timeout + if (Date.now() - lastEventTime > STREAM_TIMEOUT_MS) { + console.warn('[AI SSE] Stream timeout - no data for', STREAM_TIMEOUT_MS / 1000, 'seconds'); break; } + // Create a promise with timeout for the read operation + const readPromise = reader.read(); + const timeoutPromise = new Promise((_, reject) => { + setTimeout(() => reject(new Error('Read timeout')), STREAM_TIMEOUT_MS); + }); + + let result: ReadableStreamReadResult; + try { + result = await Promise.race([readPromise, timeoutPromise]); + } catch (e) { + if ((e as Error).message === 'Read timeout') { + console.warn('[AI SSE] Read timeout, ending stream'); + break; + } + throw e; + } + + const { done, value } = result; + if (done) { + console.log('[AI SSE] Stream ended normally'); + break; + } + + lastEventTime = Date.now(); const chunk = decoder.decode(value, { stream: true }); - console.log('[AI SSE] Received chunk:', chunk.length, 'bytes'); + + // Log chunk info only if it's not just a heartbeat + if (!chunk.includes(': heartbeat')) { + console.log('[AI SSE] Received chunk:', chunk.length, 'bytes'); + } + buffer += chunk; - // Process complete SSE messages - const lines = buffer.split('\n\n'); - buffer = lines.pop() || ''; // Keep incomplete message in buffer + // Process complete SSE messages (separated by double newlines) + // Handle both \n\n and \r\n\r\n for cross-platform compatibility + const normalizedBuffer = buffer.replace(/\r\n/g, '\n'); + const messages = normalizedBuffer.split('\n\n'); + buffer = messages.pop() || ''; // Keep incomplete message in buffer - for (const line of lines) { - if (line.startsWith('data: ')) { + for (const message of messages) { + // Skip empty messages and heartbeat comments + if (!message.trim() || message.trim().startsWith(':')) { + if (message.includes('heartbeat')) { + console.debug('[AI SSE] Received heartbeat'); + } + continue; + } + + // Parse SSE message (can have multiple lines, look for data: prefix) + const dataLines = message.split('\n').filter(line => line.startsWith('data: ')); + for (const line of dataLines) { try { - const data = JSON.parse(line.slice(6)); + const jsonStr = line.slice(6); // Remove 'data: ' prefix + if (!jsonStr.trim()) continue; + + const data = JSON.parse(jsonStr); console.log('[AI SSE] Parsed event:', data.type, data); + + // Track completion events + if (data.type === 'complete') { + receivedComplete = true; + } + if (data.type === 'done') { + receivedDone = true; + } + onEvent(data as AIStreamEvent); } catch (e) { console.error('[AI SSE] Failed to parse event:', e, line); @@ -119,9 +292,33 @@ export class AIAPI { } } } + + // Process any remaining buffer content + if (buffer.trim() && buffer.trim().startsWith('data: ')) { + try { + const jsonStr = buffer.slice(6); + if (jsonStr.trim()) { + const data = JSON.parse(jsonStr); + console.log('[AI SSE] Parsed final buffered event:', data.type); + onEvent(data as AIStreamEvent); + if (data.type === 'complete') receivedComplete = true; + if (data.type === 'done') receivedDone = true; + } + } catch (e) { + console.warn('[AI SSE] Could not parse remaining buffer:', buffer.substring(0, 100)); + } + } + + // If we ended without receiving a done event, send a synthetic one + // This ensures the UI properly clears the streaming state + if (!receivedDone) { + console.warn('[AI SSE] Stream ended without done event, sending synthetic done'); + onEvent({ type: 'done', data: undefined }); + } + } finally { reader.releaseLock(); - console.log('[AI SSE] Reader released'); + console.log('[AI SSE] Reader released, receivedComplete:', receivedComplete, 'receivedDone:', receivedDone); } } } diff --git a/frontend-modern/src/api/charts.ts b/frontend-modern/src/api/charts.ts new file mode 100644 index 000000000..ced0d904f --- /dev/null +++ b/frontend-modern/src/api/charts.ts @@ -0,0 +1,65 @@ +/** + * Charts API + * + * Fetches historical metrics data from the backend for sparkline visualizations. + * The backend maintains proper historical data with 30s sample intervals. + */ + +import { apiFetchJSON } from '@/utils/apiClient'; + +// Types matching backend response format +export interface MetricPoint { + timestamp: number; // Unix timestamp in milliseconds + value: number; +} + +export interface ChartData { + cpu?: MetricPoint[]; + memory?: MetricPoint[]; + disk?: MetricPoint[]; + diskread?: MetricPoint[]; + diskwrite?: MetricPoint[]; + netin?: MetricPoint[]; + netout?: MetricPoint[]; +} + +export interface ChartStats { + oldestDataTimestamp: number; +} + +export interface ChartsResponse { + data: Record; // VM/Container data keyed by ID + nodeData: Record; // Node data keyed by ID + storageData: Record; // Storage data keyed by ID + timestamp: number; + stats: ChartStats; +} + +export type TimeRange = '5m' | '15m' | '30m' | '1h' | '4h' | '12h' | '24h' | '7d'; + +export class ChartsAPI { + private static baseUrl = '/api'; + + /** + * Fetch historical chart data for all resources + * @param range Time range to fetch (default: 1h) + */ + static async getCharts(range: TimeRange = '1h'): Promise { + const url = `${this.baseUrl}/charts?range=${range}`; + return apiFetchJSON(url); + } + + /** + * Fetch storage-specific chart data + * @param rangeMinutes Range in minutes (default: 60) + */ + static async getStorageCharts(rangeMinutes: number = 60): Promise> { + const url = `${this.baseUrl}/storage/charts?range=${rangeMinutes}`; + return apiFetchJSON(url); + } +} diff --git a/frontend-modern/src/components/AI/AIChat.tsx b/frontend-modern/src/components/AI/AIChat.tsx index 302e1d289..3894adc57 100644 --- a/frontend-modern/src/components/AI/AIChat.tsx +++ b/frontend-modern/src/components/AI/AIChat.tsx @@ -5,6 +5,7 @@ import { notificationStore } from '@/stores/notifications'; import { logger } from '@/utils/logger'; import { aiChatStore } from '@/stores/aiChat'; import { useWebSocket } from '@/App'; +import { GuestNotes } from './GuestNotes'; import type { AIToolExecution, AIStreamEvent, @@ -41,13 +42,16 @@ interface PendingApproval { toolId: string; toolName: string; runOnHost: boolean; + targetHost?: string; // Explicit host for command routing isExecuting?: boolean; } + interface Message { id: string; role: 'user' | 'assistant'; content: string; + thinking?: string; // DeepSeek reasoning/thinking content timestamp: Date; model?: string; tokens?: { input: number; output: number }; @@ -325,6 +329,31 @@ export const AIChat: Component = (props) => { }; setMessages((prev) => [...prev, streamingMessage]); + // Safety timeout - clear streaming state if we don't get any completion event + // This prevents the UI from getting stuck in a streaming state + let lastEventTime = Date.now(); + const SAFETY_TIMEOUT_MS = 120000; // 2 minutes + + const safetyCheckInterval = setInterval(() => { + const timeSinceLastEvent = Date.now() - lastEventTime; + if (timeSinceLastEvent > SAFETY_TIMEOUT_MS) { + console.warn('[AIChat] Safety timeout - forcing stream completion after', SAFETY_TIMEOUT_MS / 1000, 'seconds of inactivity'); + clearInterval(safetyCheckInterval); + setMessages((prev) => + prev.map((msg) => + msg.id === assistantId && msg.isStreaming + ? { ...msg, isStreaming: false, content: msg.content || '(Request timed out - no response received)' } + : msg + ) + ); + setIsLoading(false); + if (abortControllerRef) { + abortControllerRef.abort(); + abortControllerRef = null; + } + } + }, 10000); // Check every 10 seconds + try { await AIAPI.executeStream( { @@ -335,6 +364,7 @@ export const AIChat: Component = (props) => { history: history.length > 0 ? history : undefined, }, (event: AIStreamEvent) => { + lastEventTime = Date.now(); // Update last event time console.log('[AIChat] Received event:', event.type, event); // Update the streaming message based on event type setMessages((prev) => @@ -370,6 +400,13 @@ export const AIChat: Component = (props) => { toolCalls: [...(msg.toolCalls || []), newToolCall], }; } + case 'thinking': { + const thinking = event.data as string; + return { + ...msg, + thinking: (msg.thinking || '') + thinking, + }; + } case 'content': { const content = event.data as string; return { @@ -409,6 +446,16 @@ export const AIChat: Component = (props) => { content: `Error: ${errorMsg}`, }; } + case 'processing': { + // Show processing status for multi-iteration calls + const status = event.data as string; + console.log('[AIChat] Processing:', status); + // Add as a pending tool for visual feedback + return { + ...msg, + pendingTools: [{ name: 'processing', input: status }], + }; + } case 'approval_needed': { const data = event.data as AIStreamApprovalNeededData; return { @@ -417,10 +464,12 @@ export const AIChat: Component = (props) => { command: data.command, toolId: data.tool_id, toolName: data.tool_name, - runOnHost: data.run_on_host, + runOnHost: data.run_on_host ?? false, // Default to false if undefined + targetHost: data.target_host, // Pass through the explicit routing target }], }; } + default: return msg; } @@ -448,6 +497,7 @@ export const AIChat: Component = (props) => { ) ); } finally { + clearInterval(safetyCheckInterval); abortControllerRef = null; setIsLoading(false); } @@ -472,11 +522,11 @@ export const AIChat: Component = (props) => { prev.map((m) => m.id === messageId ? { - ...m, - pendingApprovals: m.pendingApprovals?.map((a) => - a.toolId === approval.toolId ? { ...a, isExecuting: true } : a - ), - } + ...m, + pendingApprovals: m.pendingApprovals?.map((a) => + a.toolId === approval.toolId ? { ...a, isExecuting: true } : a + ), + } : m ) ); @@ -491,8 +541,10 @@ export const AIChat: Component = (props) => { target_id: targetId() || '', run_on_host: approval.runOnHost, vmid, + target_host: approval.targetHost, // Pass through the explicit routing target }); + // Move from pending approvals to completed tool calls setMessages((prev) => prev.map((m) => { @@ -528,11 +580,11 @@ export const AIChat: Component = (props) => { prev.map((m) => m.id === messageId ? { - ...m, - pendingApprovals: m.pendingApprovals?.map((a) => - a.toolId === approval.toolId ? { ...a, isExecuting: false } : a - ), - } + ...m, + pendingApprovals: m.pendingApprovals?.map((a) => + a.toolId === approval.toolId ? { ...a, isExecuting: false } : a + ), + } : m ) ); @@ -542,9 +594,8 @@ export const AIChat: Component = (props) => { // Panel renders as flex child, width controlled by isOpen state return (
{/* Header */} @@ -673,23 +724,40 @@ export const AIChat: Component = (props) => { class={`flex ${message.role === 'user' ? 'justify-end' : 'justify-start'}`} >
+ {/* Show thinking/reasoning content (DeepSeek) */} + +
+ + + + + + + + Thinking... + ({message.thinking!.length} chars) + +
+ {message.thinking!.length > 2000 ? message.thinking!.substring(0, 2000) + '...' : message.thinking} +
+
+
+ {/* Show completed tool calls FIRST - chronological order */} 0}>
{(tool) => (
-
+
@@ -734,11 +802,10 @@ export const AIChat: Component = (props) => {
{/* Status indicator */} - + 'bg-yellow-500' + }`} /> {/* Check if already added */} @@ -956,6 +1020,15 @@ export const AIChat: Component = (props) => { Add VMs, containers, or hosts to provide context for your questions

+ + {/* Guest Notes - show for first context item */} + 0}> + +