mirror of
https://github.com/rcourtman/Pulse.git
synced 2026-02-18 00:17:39 +01:00
wip: AI chat integration with multi-provider support
- Add AI service with Anthropic, OpenAI, and Ollama providers - Add AI chat UI component with streaming responses - Add AI settings page for configuration - Add agent exec framework for command execution - Add API endpoints for AI chat and configuration
This commit is contained in:
@@ -55,6 +55,12 @@ func main() {
|
||||
logger := zerolog.New(os.Stdout).Level(cfg.LogLevel).With().Timestamp().Logger()
|
||||
cfg.Logger = &logger
|
||||
|
||||
// Deprecation warning
|
||||
logger.Warn().Msg("pulse-docker-agent is DEPRECATED and will be removed in a future release")
|
||||
logger.Warn().Msg("Please migrate to the unified 'pulse-agent' with --enable-docker flag")
|
||||
logger.Warn().Msg("Example: pulse-agent --url <URL> --token <TOKEN> --enable-docker")
|
||||
logger.Warn().Msg("")
|
||||
|
||||
agent, err := dockeragent.New(cfg)
|
||||
if err != nil {
|
||||
logger.Fatal().Err(err).Msg("Failed to create docker agent")
|
||||
|
||||
@@ -61,6 +61,12 @@ func main() {
|
||||
|
||||
g, ctx := errgroup.WithContext(ctx)
|
||||
|
||||
// Deprecation warning
|
||||
logger.Warn().Msg("pulse-host-agent is DEPRECATED and will be removed in a future release")
|
||||
logger.Warn().Msg("Please migrate to the unified 'pulse-agent' with --enable-host flag")
|
||||
logger.Warn().Msg("Example: pulse-agent --url <URL> --token <TOKEN> --enable-host")
|
||||
logger.Warn().Msg("")
|
||||
|
||||
logger.Info().
|
||||
Str("version", Version).
|
||||
Str("pulse_url", hostCfg.PulseURL).
|
||||
|
||||
39
docs/API.md
39
docs/API.md
@@ -112,23 +112,6 @@ Triggers a test alert to all configured channels.
|
||||
|
||||
---
|
||||
|
||||
---
|
||||
## 🖥️ Host Agent
|
||||
|
||||
### Submit Report
|
||||
`POST /api/agents/host/report`
|
||||
Used by the Pulse Host Agent to push system metrics.
|
||||
|
||||
### Lookup Agent
|
||||
`POST /api/agents/host/lookup`
|
||||
Check if a host agent is already registered.
|
||||
|
||||
### Delete Host
|
||||
`DELETE /api/agents/host/<id>`
|
||||
Remove a host agent from monitoring.
|
||||
|
||||
---
|
||||
|
||||
## ⚙️ System Settings
|
||||
|
||||
### Get Settings
|
||||
@@ -157,15 +140,23 @@ Initiate OIDC login flow.
|
||||
|
||||
---
|
||||
|
||||
## 🐳 Docker Agent
|
||||
## 🤖 Agent Endpoints
|
||||
|
||||
### Submit Report
|
||||
`POST /api/agents/docker/report`
|
||||
Used by the Pulse Docker Agent to push container metrics.
|
||||
### Unified Agent (Recommended)
|
||||
`GET /download/pulse-agent`
|
||||
Downloads the unified agent binary for the current platform.
|
||||
|
||||
### Download Agent
|
||||
`GET /download/pulse-docker-agent`
|
||||
Downloads the binary for the current platform.
|
||||
The unified agent combines host and Docker monitoring. Use `--enable-docker` to enable Docker metrics.
|
||||
|
||||
See [UNIFIED_AGENT.md](UNIFIED_AGENT.md) for installation instructions.
|
||||
|
||||
### Legacy Agents (Deprecated)
|
||||
`GET /download/pulse-host-agent` - *Deprecated, use pulse-agent*
|
||||
`GET /download/pulse-docker-agent` - *Deprecated, use pulse-agent --enable-docker*
|
||||
|
||||
### Submit Reports
|
||||
`POST /api/agents/host/report` - Host metrics
|
||||
`POST /api/agents/docker/report` - Docker container metrics
|
||||
|
||||
---
|
||||
|
||||
|
||||
42
frontend-modern/package-lock.json
generated
42
frontend-modern/package-lock.json
generated
@@ -11,10 +11,12 @@
|
||||
"dependencies": {
|
||||
"@solidjs/router": "^0.10.10",
|
||||
"lucide-solid": "^0.545.0",
|
||||
"marked": "^17.0.1",
|
||||
"solid-js": "^1.8.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@solidjs/testing-library": "^0.8.5",
|
||||
"@tailwindcss/typography": "^0.5.19",
|
||||
"@testing-library/jest-dom": "^6.5.0",
|
||||
"@types/node": "^20.10.0",
|
||||
"@typescript-eslint/eslint-plugin": "^8.0.0",
|
||||
@@ -1429,6 +1431,33 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/@tailwindcss/typography": {
|
||||
"version": "0.5.19",
|
||||
"resolved": "https://registry.npmjs.org/@tailwindcss/typography/-/typography-0.5.19.tgz",
|
||||
"integrity": "sha512-w31dd8HOx3k9vPtcQh5QHP9GwKcgbMp87j58qi6xgiBnFFtKEAgCWnDw4qUT8aHwkCp8bKvb/KGKWWHedP0AAg==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"postcss-selector-parser": "6.0.10"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"tailwindcss": ">=3.0.0 || insiders || >=4.0.0-alpha.20 || >=4.0.0-beta.1"
|
||||
}
|
||||
},
|
||||
"node_modules/@tailwindcss/typography/node_modules/postcss-selector-parser": {
|
||||
"version": "6.0.10",
|
||||
"resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.0.10.tgz",
|
||||
"integrity": "sha512-IQ7TZdoaqbT+LCpShg46jnZVlhWD2w6iQYAcYXfHARZ7X1t/UGhhceQDs5X0cGqKvYlHNOuv7Oa1xmb0oQuA3w==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"cssesc": "^3.0.0",
|
||||
"util-deprecate": "^1.0.2"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=4"
|
||||
}
|
||||
},
|
||||
"node_modules/@testing-library/dom": {
|
||||
"version": "10.4.1",
|
||||
"resolved": "https://registry.npmjs.org/@testing-library/dom/-/dom-10.4.1.tgz",
|
||||
@@ -3895,6 +3924,18 @@
|
||||
"@jridgewell/sourcemap-codec": "^1.5.5"
|
||||
}
|
||||
},
|
||||
"node_modules/marked": {
|
||||
"version": "17.0.1",
|
||||
"resolved": "https://registry.npmjs.org/marked/-/marked-17.0.1.tgz",
|
||||
"integrity": "sha512-boeBdiS0ghpWcSwoNm/jJBwdpFaMnZWRzjA6SkUMYb40SVaN1x7mmfGKp0jvexGcx+7y2La5zRZsYFZI6Qpypg==",
|
||||
"license": "MIT",
|
||||
"bin": {
|
||||
"marked": "bin/marked.js"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 20"
|
||||
}
|
||||
},
|
||||
"node_modules/math-intrinsics": {
|
||||
"version": "1.1.0",
|
||||
"resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz",
|
||||
@@ -5113,6 +5154,7 @@
|
||||
"integrity": "sha512-6A2rnmW5xZMdw11LYjhcI5846rt9pbLSabY5XPxo+XWdxwZaFEn47Go4NzFiHu9sNNmr/kXivP1vStfvMaK1GQ==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"@alloc/quick-lru": "^5.2.0",
|
||||
"arg": "^5.0.2",
|
||||
|
||||
@@ -28,10 +28,12 @@
|
||||
"dependencies": {
|
||||
"@solidjs/router": "^0.10.10",
|
||||
"lucide-solid": "^0.545.0",
|
||||
"marked": "^17.0.1",
|
||||
"solid-js": "^1.8.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@solidjs/testing-library": "^0.8.5",
|
||||
"@tailwindcss/typography": "^0.5.19",
|
||||
"@testing-library/jest-dom": "^6.5.0",
|
||||
"@types/node": "^20.10.0",
|
||||
"@typescript-eslint/eslint-plugin": "^8.0.0",
|
||||
|
||||
@@ -43,6 +43,8 @@ import { TokenRevealDialog } from './components/TokenRevealDialog';
|
||||
import { useAlertsActivation } from './stores/alertsActivation';
|
||||
import { UpdateProgressModal } from './components/UpdateProgressModal';
|
||||
import type { UpdateStatus } from './api/updates';
|
||||
import { AIChat } from './components/AI/AIChat';
|
||||
import { aiChatStore } from './stores/aiChat';
|
||||
|
||||
const Dashboard = lazy(() =>
|
||||
import('./components/Dashboard/Dashboard').then((module) => ({ default: module.Dashboard })),
|
||||
@@ -723,6 +725,39 @@ function App() {
|
||||
|
||||
// Root layout component for Router
|
||||
const RootLayout = (props: { children?: JSX.Element }) => {
|
||||
// Check AI settings on mount and setup keyboard shortcut
|
||||
onMount(() => {
|
||||
// Check if AI is enabled
|
||||
import('./api/ai').then(({ AIAPI }) => {
|
||||
AIAPI.getSettings()
|
||||
.then((settings) => {
|
||||
aiChatStore.setEnabled(settings.enabled && settings.configured);
|
||||
})
|
||||
.catch(() => {
|
||||
aiChatStore.setEnabled(false);
|
||||
});
|
||||
});
|
||||
|
||||
// Keyboard shortcut: Cmd/Ctrl+K to toggle AI
|
||||
const handleKeyDown = (e: KeyboardEvent) => {
|
||||
if ((e.metaKey || e.ctrlKey) && e.key === 'k') {
|
||||
e.preventDefault();
|
||||
if (aiChatStore.enabled) {
|
||||
aiChatStore.toggle();
|
||||
}
|
||||
}
|
||||
// Escape to close
|
||||
if (e.key === 'Escape' && aiChatStore.isOpen) {
|
||||
aiChatStore.close();
|
||||
}
|
||||
};
|
||||
|
||||
document.addEventListener('keydown', handleKeyDown);
|
||||
onCleanup(() => {
|
||||
document.removeEventListener('keydown', handleKeyDown);
|
||||
});
|
||||
});
|
||||
|
||||
return (
|
||||
<Show
|
||||
when={!isLoading()}
|
||||
@@ -741,24 +776,62 @@ function App() {
|
||||
<DemoBanner />
|
||||
<UpdateBanner />
|
||||
<GlobalUpdateProgressWatcher />
|
||||
<div class="min-h-screen bg-gray-100 dark:bg-gray-900 text-gray-800 dark:text-gray-200 font-sans py-4 sm:py-6">
|
||||
<AppLayout
|
||||
connected={connected}
|
||||
reconnecting={reconnecting}
|
||||
dataUpdated={dataUpdated}
|
||||
lastUpdateText={lastUpdateText}
|
||||
versionInfo={versionInfo}
|
||||
hasAuth={hasAuth}
|
||||
needsAuth={needsAuth}
|
||||
proxyAuthInfo={proxyAuthInfo}
|
||||
handleLogout={handleLogout}
|
||||
state={state}
|
||||
>
|
||||
{props.children}
|
||||
</AppLayout>
|
||||
{/* Main layout container - flexbox to allow AI panel to push content */}
|
||||
<div class="flex h-screen overflow-hidden">
|
||||
{/* Main content area - shrinks when AI panel is open, scrolls independently */}
|
||||
<div class={`flex-1 min-w-0 overflow-y-auto bg-gray-100 dark:bg-gray-900 text-gray-800 dark:text-gray-200 font-sans py-4 sm:py-6 transition-all duration-300`}>
|
||||
<AppLayout
|
||||
connected={connected}
|
||||
reconnecting={reconnecting}
|
||||
dataUpdated={dataUpdated}
|
||||
lastUpdateText={lastUpdateText}
|
||||
versionInfo={versionInfo}
|
||||
hasAuth={hasAuth}
|
||||
needsAuth={needsAuth}
|
||||
proxyAuthInfo={proxyAuthInfo}
|
||||
handleLogout={handleLogout}
|
||||
state={state}
|
||||
>
|
||||
{props.children}
|
||||
</AppLayout>
|
||||
</div>
|
||||
{/* AI Panel - slides in from right, pushes content */}
|
||||
<AIChat onClose={() => aiChatStore.close()} />
|
||||
</div>
|
||||
<ToastContainer />
|
||||
<TokenRevealDialog />
|
||||
{/* Fixed AI Assistant Button - always visible on the side when AI is enabled */}
|
||||
<Show when={aiChatStore.enabled !== false && !aiChatStore.isOpen}>
|
||||
{/* This component only shows when chat is closed */}
|
||||
<button
|
||||
type="button"
|
||||
onClick={() => aiChatStore.toggle()}
|
||||
class="fixed right-0 top-1/2 -translate-y-1/2 z-40 flex items-center gap-1.5 pl-2 pr-1.5 py-3 rounded-l-xl bg-gradient-to-r from-purple-600 to-purple-700 text-white shadow-lg hover:from-purple-700 hover:to-purple-800 transition-all duration-200 group"
|
||||
title={aiChatStore.context.context?.name ? `AI Assistant - ${aiChatStore.context.context.name}` : 'AI Assistant (⌘K)'}
|
||||
aria-label="Expand AI Assistant"
|
||||
>
|
||||
{/* Double chevron left - expand */}
|
||||
<svg
|
||||
class="h-4 w-4 flex-shrink-0"
|
||||
fill="none"
|
||||
stroke="currentColor"
|
||||
viewBox="0 0 24 24"
|
||||
stroke-width="2"
|
||||
>
|
||||
<path
|
||||
stroke-linecap="round"
|
||||
stroke-linejoin="round"
|
||||
d="M11 19l-7-7 7-7M18 19l-7-7 7-7"
|
||||
/>
|
||||
</svg>
|
||||
{/* Context indicator - shows count when items are in context */}
|
||||
<Show when={aiChatStore.contextItems.length > 0}>
|
||||
<span class="min-w-[18px] h-[18px] px-1 flex items-center justify-center text-[10px] font-bold bg-green-500 text-white rounded-full">
|
||||
{aiChatStore.contextItems.length}
|
||||
</span>
|
||||
</Show>
|
||||
</button>
|
||||
</Show>
|
||||
<TooltipRoot />
|
||||
</DarkModeContext.Provider>
|
||||
</WebSocketContext.Provider>
|
||||
|
||||
127
frontend-modern/src/api/ai.ts
Normal file
127
frontend-modern/src/api/ai.ts
Normal file
@@ -0,0 +1,127 @@
|
||||
import { apiFetchJSON, apiFetch } from '@/utils/apiClient';
|
||||
import type {
|
||||
AISettings,
|
||||
AISettingsUpdateRequest,
|
||||
AITestResult,
|
||||
AIExecuteRequest,
|
||||
AIExecuteResponse,
|
||||
AIStreamEvent,
|
||||
} from '@/types/ai';
|
||||
|
||||
export class AIAPI {
|
||||
private static baseUrl = '/api';
|
||||
|
||||
// Get AI settings
|
||||
static async getSettings(): Promise<AISettings> {
|
||||
return apiFetchJSON(`${this.baseUrl}/settings/ai`) as Promise<AISettings>;
|
||||
}
|
||||
|
||||
// Update AI settings
|
||||
static async updateSettings(settings: AISettingsUpdateRequest): Promise<AISettings> {
|
||||
return apiFetchJSON(`${this.baseUrl}/settings/ai/update`, {
|
||||
method: 'PUT',
|
||||
body: JSON.stringify(settings),
|
||||
}) as Promise<AISettings>;
|
||||
}
|
||||
|
||||
// Test AI connection
|
||||
static async testConnection(): Promise<AITestResult> {
|
||||
return apiFetchJSON(`${this.baseUrl}/ai/test`, {
|
||||
method: 'POST',
|
||||
}) as Promise<AITestResult>;
|
||||
}
|
||||
|
||||
// Execute an AI prompt
|
||||
static async execute(request: AIExecuteRequest): Promise<AIExecuteResponse> {
|
||||
return apiFetchJSON(`${this.baseUrl}/ai/execute`, {
|
||||
method: 'POST',
|
||||
body: JSON.stringify(request),
|
||||
}) as Promise<AIExecuteResponse>;
|
||||
}
|
||||
|
||||
// Run a single command (for approved commands)
|
||||
static async runCommand(request: {
|
||||
command: string;
|
||||
target_type: string;
|
||||
target_id: string;
|
||||
run_on_host: boolean;
|
||||
vmid?: string;
|
||||
}): Promise<{ output: string; success: boolean; error?: string }> {
|
||||
return apiFetchJSON(`${this.baseUrl}/ai/run-command`, {
|
||||
method: 'POST',
|
||||
body: JSON.stringify(request),
|
||||
}) as Promise<{ output: string; success: boolean; error?: string }>;
|
||||
}
|
||||
|
||||
// Execute an AI prompt with streaming
|
||||
// Returns an abort function to cancel the request
|
||||
static async executeStream(
|
||||
request: AIExecuteRequest,
|
||||
onEvent: (event: AIStreamEvent) => void,
|
||||
signal?: AbortSignal
|
||||
): Promise<void> {
|
||||
console.log('[AI SSE] Starting streaming request:', request);
|
||||
|
||||
const response = await apiFetch(`${this.baseUrl}/ai/execute/stream`, {
|
||||
method: 'POST',
|
||||
body: JSON.stringify(request),
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'Accept': 'text/event-stream',
|
||||
},
|
||||
signal,
|
||||
});
|
||||
|
||||
console.log('[AI SSE] Response status:', response.status, response.headers.get('content-type'));
|
||||
|
||||
if (!response.ok) {
|
||||
const text = await response.text();
|
||||
console.error('[AI SSE] Request failed:', text);
|
||||
throw new Error(text || `Request failed with status ${response.status}`);
|
||||
}
|
||||
|
||||
const reader = response.body?.getReader();
|
||||
if (!reader) {
|
||||
console.error('[AI SSE] No response body');
|
||||
throw new Error('No response body');
|
||||
}
|
||||
|
||||
const decoder = new TextDecoder();
|
||||
let buffer = '';
|
||||
|
||||
console.log('[AI SSE] Starting to read stream...');
|
||||
|
||||
try {
|
||||
while (true) {
|
||||
const { done, value } = await reader.read();
|
||||
if (done) {
|
||||
console.log('[AI SSE] Stream ended');
|
||||
break;
|
||||
}
|
||||
|
||||
const chunk = decoder.decode(value, { stream: true });
|
||||
console.log('[AI SSE] Received chunk:', chunk.length, 'bytes');
|
||||
buffer += chunk;
|
||||
|
||||
// Process complete SSE messages
|
||||
const lines = buffer.split('\n\n');
|
||||
buffer = lines.pop() || ''; // Keep incomplete message in buffer
|
||||
|
||||
for (const line of lines) {
|
||||
if (line.startsWith('data: ')) {
|
||||
try {
|
||||
const data = JSON.parse(line.slice(6));
|
||||
console.log('[AI SSE] Parsed event:', data.type, data);
|
||||
onEvent(data as AIStreamEvent);
|
||||
} catch (e) {
|
||||
console.error('[AI SSE] Failed to parse event:', e, line);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
reader.releaseLock();
|
||||
console.log('[AI SSE] Reader released');
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -6,6 +6,7 @@ export interface DockerMetadata {
|
||||
customUrl?: string;
|
||||
description?: string;
|
||||
tags?: string[];
|
||||
notes?: string[]; // User annotations for AI context
|
||||
}
|
||||
|
||||
export class DockerMetadataAPI {
|
||||
|
||||
@@ -6,6 +6,7 @@ export interface GuestMetadata {
|
||||
customUrl?: string;
|
||||
description?: string;
|
||||
tags?: string[];
|
||||
notes?: string[]; // User annotations for AI context
|
||||
}
|
||||
|
||||
export class GuestMetadataAPI {
|
||||
|
||||
1001
frontend-modern/src/components/AI/AIChat.tsx
Normal file
1001
frontend-modern/src/components/AI/AIChat.tsx
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,7 +1,11 @@
|
||||
import { Component, Show, For } from 'solid-js';
|
||||
import { Component, Show, For, createSignal, createEffect } from 'solid-js';
|
||||
import { VM, Container } from '@/types/api';
|
||||
import { formatBytes } from '@/utils/format';
|
||||
import { formatBytes, formatPercent, formatSpeed, formatUptime } from '@/utils/format';
|
||||
import { DiskList } from './DiskList';
|
||||
import { aiChatStore } from '@/stores/aiChat';
|
||||
import { AIAPI } from '@/api/ai';
|
||||
import { GuestMetadataAPI } from '@/api/guestMetadata';
|
||||
import { logger } from '@/utils/logger';
|
||||
|
||||
type Guest = VM | Container;
|
||||
|
||||
@@ -12,10 +16,185 @@ interface GuestDrawerProps {
|
||||
}
|
||||
|
||||
export const GuestDrawer: Component<GuestDrawerProps> = (props) => {
|
||||
const [aiEnabled, setAiEnabled] = createSignal(false);
|
||||
const [annotations, setAnnotations] = createSignal<string[]>([]);
|
||||
const [newAnnotation, setNewAnnotation] = createSignal('');
|
||||
const [saving, setSaving] = createSignal(false);
|
||||
|
||||
// Build guest ID for metadata
|
||||
const guestId = () => props.guest.id || `${props.guest.instance}-${props.guest.vmid}`;
|
||||
|
||||
// Check if AI is enabled and load annotations on mount
|
||||
createEffect(() => {
|
||||
AIAPI.getSettings()
|
||||
.then((settings) => setAiEnabled(settings.enabled && settings.configured))
|
||||
.catch((err) => logger.debug('[GuestDrawer] AI settings check failed:', err));
|
||||
|
||||
// Load existing annotations
|
||||
GuestMetadataAPI.getMetadata(guestId())
|
||||
.then((meta) => {
|
||||
if (meta.notes && Array.isArray(meta.notes)) setAnnotations(meta.notes);
|
||||
})
|
||||
.catch((err) => logger.debug('[GuestDrawer] Failed to load annotations:', err));
|
||||
});
|
||||
|
||||
// Update AI context whenever guest changes or annotations are loaded
|
||||
createEffect(() => {
|
||||
const guestType = props.guest.type === 'qemu' ? 'vm' : 'container';
|
||||
// Track annotations to re-run when they change
|
||||
void annotations();
|
||||
aiChatStore.setTargetContext(guestType, guestId(), {
|
||||
guestName: props.guest.name,
|
||||
...buildGuestContext(),
|
||||
});
|
||||
});
|
||||
|
||||
// Note: We no longer clear context on unmount - context accumulates across navigation
|
||||
// Users can clear individual items or all context from the AI panel
|
||||
|
||||
const saveAnnotations = async (updated: string[]) => {
|
||||
setSaving(true);
|
||||
try {
|
||||
await GuestMetadataAPI.updateMetadata(guestId(), { notes: updated });
|
||||
logger.debug('[GuestDrawer] Annotations saved');
|
||||
} catch (err) {
|
||||
logger.error('[GuestDrawer] Failed to save annotations:', err);
|
||||
} finally {
|
||||
setSaving(false);
|
||||
}
|
||||
};
|
||||
|
||||
const addAnnotation = () => {
|
||||
const text = newAnnotation().trim();
|
||||
if (!text) return;
|
||||
const updated = [...annotations(), text];
|
||||
setAnnotations(updated);
|
||||
setNewAnnotation('');
|
||||
saveAnnotations(updated);
|
||||
};
|
||||
|
||||
const removeAnnotation = (index: number) => {
|
||||
const updated = annotations().filter((_, i) => i !== index);
|
||||
setAnnotations(updated);
|
||||
saveAnnotations(updated);
|
||||
};
|
||||
|
||||
const handleKeyDown = (e: KeyboardEvent) => {
|
||||
if (e.key === 'Enter') {
|
||||
e.preventDefault();
|
||||
addAnnotation();
|
||||
}
|
||||
};
|
||||
|
||||
const isVM = (guest: Guest): guest is VM => {
|
||||
return guest.type === 'qemu';
|
||||
};
|
||||
|
||||
// Build comprehensive context for AI
|
||||
const buildGuestContext = () => {
|
||||
const guest = props.guest;
|
||||
const context: Record<string, unknown> = {
|
||||
name: guest.name,
|
||||
type: guest.type === 'qemu' ? 'Virtual Machine' : 'LXC Container',
|
||||
vmid: guest.vmid,
|
||||
node: guest.node, // PVE node this guest lives on
|
||||
guest_node: guest.node, // For backend agent routing
|
||||
status: guest.status,
|
||||
uptime: guest.uptime ? formatUptime(guest.uptime) : 'Not running',
|
||||
};
|
||||
|
||||
// CPU info
|
||||
if (guest.cpu !== undefined) {
|
||||
context.cpu_usage = formatPercent(guest.cpu * 100);
|
||||
}
|
||||
if (guest.cpus) {
|
||||
context.cpu_cores = guest.cpus;
|
||||
}
|
||||
|
||||
// Memory info
|
||||
if (guest.memory) {
|
||||
context.memory_used = formatBytes(guest.memory.used || 0);
|
||||
context.memory_total = formatBytes(guest.memory.total || 0);
|
||||
context.memory_usage = formatPercent(guest.memory.usage || 0);
|
||||
if (guest.memory.balloon && guest.memory.balloon !== guest.memory.total) {
|
||||
context.memory_balloon = formatBytes(guest.memory.balloon);
|
||||
}
|
||||
if (guest.memory.swapTotal && guest.memory.swapTotal > 0) {
|
||||
context.swap_used = formatBytes(guest.memory.swapUsed || 0);
|
||||
context.swap_total = formatBytes(guest.memory.swapTotal);
|
||||
}
|
||||
}
|
||||
|
||||
// Disk info
|
||||
if (guest.disk && guest.disk.total > 0) {
|
||||
context.disk_used = formatBytes(guest.disk.used || 0);
|
||||
context.disk_total = formatBytes(guest.disk.total || 0);
|
||||
context.disk_usage = formatPercent((guest.disk.used / guest.disk.total) * 100);
|
||||
}
|
||||
|
||||
// I/O rates
|
||||
if (guest.diskRead !== undefined) {
|
||||
context.disk_read_rate = formatSpeed(guest.diskRead);
|
||||
}
|
||||
if (guest.diskWrite !== undefined) {
|
||||
context.disk_write_rate = formatSpeed(guest.diskWrite);
|
||||
}
|
||||
if (guest.networkIn !== undefined) {
|
||||
context.network_in_rate = formatSpeed(guest.networkIn);
|
||||
}
|
||||
if (guest.networkOut !== undefined) {
|
||||
context.network_out_rate = formatSpeed(guest.networkOut);
|
||||
}
|
||||
|
||||
// OS info (VMs only)
|
||||
if (isVM(guest)) {
|
||||
if (guest.osName) context.os_name = guest.osName;
|
||||
if (guest.osVersion) context.os_version = guest.osVersion;
|
||||
if (guest.agentVersion) context.guest_agent = guest.agentVersion;
|
||||
if (guest.ipAddresses?.length) context.ip_addresses = guest.ipAddresses;
|
||||
}
|
||||
|
||||
// Tags
|
||||
if (guest.tags?.length) {
|
||||
context.tags = guest.tags;
|
||||
}
|
||||
|
||||
// Backup info - Pulse already has this from PBS
|
||||
if (guest.lastBackup) {
|
||||
const backupDate = new Date(guest.lastBackup);
|
||||
const now = new Date();
|
||||
const daysSinceBackup = Math.floor((now.getTime() - backupDate.getTime()) / (1000 * 60 * 60 * 24));
|
||||
context.last_backup = backupDate.toISOString();
|
||||
context.days_since_backup = daysSinceBackup;
|
||||
if (daysSinceBackup > 30) {
|
||||
context.backup_status = 'CRITICAL - No backup in over 30 days';
|
||||
} else if (daysSinceBackup > 7) {
|
||||
context.backup_status = 'Warning - Backup is over a week old';
|
||||
} else {
|
||||
context.backup_status = 'OK';
|
||||
}
|
||||
} else {
|
||||
context.backup_status = 'NEVER - No backup recorded';
|
||||
}
|
||||
|
||||
// User annotations (for AI context)
|
||||
if (annotations().length > 0) {
|
||||
context.user_annotations = annotations();
|
||||
}
|
||||
|
||||
return context;
|
||||
};
|
||||
|
||||
const handleAskAI = () => {
|
||||
const guestType = props.guest.type === 'qemu' ? 'vm' : 'container';
|
||||
const guestId = props.guest.id || `${props.guest.instance}-${props.guest.vmid}`;
|
||||
|
||||
aiChatStore.openForTarget(guestType, guestId, {
|
||||
guestName: props.guest.name,
|
||||
...buildGuestContext(),
|
||||
});
|
||||
};
|
||||
|
||||
const hasOsInfo = () => {
|
||||
if (!isVM(props.guest)) return false;
|
||||
return (props.guest.osName?.length ?? 0) > 0 || (props.guest.osVersion?.length ?? 0) > 0;
|
||||
@@ -79,7 +258,9 @@ export const GuestDrawer: Component<GuestDrawerProps> = (props) => {
|
||||
};
|
||||
|
||||
return (
|
||||
<div class="flex items-start gap-4">
|
||||
<div class="space-y-3">
|
||||
{/* Top row: metrics columns */}
|
||||
<div class="flex items-start gap-4">
|
||||
{/* Left Column: Guest Overview */}
|
||||
<div class="min-w-[220px] flex-1 rounded border border-gray-200 bg-white/70 p-2 shadow-sm dark:border-gray-600/70 dark:bg-gray-900/30">
|
||||
<div class="text-[11px] font-medium text-gray-700 dark:text-gray-200">Guest Overview</div>
|
||||
@@ -194,6 +375,73 @@ export const GuestDrawer: Component<GuestDrawerProps> = (props) => {
|
||||
</div>
|
||||
</div>
|
||||
</Show>
|
||||
</div>
|
||||
|
||||
{/* Bottom row: AI Context & Ask AI - only show when AI is enabled */}
|
||||
<Show when={aiEnabled()}>
|
||||
<div class="mt-3 pt-3 border-t border-gray-200 dark:border-gray-700 space-y-2">
|
||||
<div class="flex items-center gap-1.5">
|
||||
<span class="text-[10px] font-medium text-gray-500 dark:text-gray-400">AI Context</span>
|
||||
<Show when={saving()}>
|
||||
<span class="text-[9px] text-gray-400">saving...</span>
|
||||
</Show>
|
||||
</div>
|
||||
|
||||
{/* Existing annotations */}
|
||||
<Show when={annotations().length > 0}>
|
||||
<div class="flex flex-wrap gap-1.5">
|
||||
<For each={annotations()}>
|
||||
{(annotation, index) => (
|
||||
<span class="inline-flex items-center gap-1 px-2 py-1 text-[11px] rounded-md bg-purple-100 text-purple-800 dark:bg-purple-900/40 dark:text-purple-200">
|
||||
<span class="max-w-[300px] truncate">{annotation}</span>
|
||||
<button
|
||||
type="button"
|
||||
onClick={() => removeAnnotation(index())}
|
||||
class="ml-0.5 p-0.5 rounded hover:bg-purple-200 dark:hover:bg-purple-800 transition-colors"
|
||||
title="Remove"
|
||||
>
|
||||
<svg class="w-3 h-3" fill="none" stroke="currentColor" viewBox="0 0 24 24">
|
||||
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M6 18L18 6M6 6l12 12" />
|
||||
</svg>
|
||||
</button>
|
||||
</span>
|
||||
)}
|
||||
</For>
|
||||
</div>
|
||||
</Show>
|
||||
|
||||
{/* Add new annotation + Ask AI */}
|
||||
<div class="flex items-center gap-2">
|
||||
<input
|
||||
type="text"
|
||||
value={newAnnotation()}
|
||||
onInput={(e) => setNewAnnotation(e.currentTarget.value)}
|
||||
onKeyDown={handleKeyDown}
|
||||
placeholder="Add context for AI (press Enter)..."
|
||||
class="flex-1 px-2 py-1.5 text-[11px] rounded border border-gray-200 dark:border-gray-600 bg-white dark:bg-gray-800 text-gray-700 dark:text-gray-200 placeholder-gray-400 dark:placeholder-gray-500 focus:outline-none focus:ring-1 focus:ring-purple-500 focus:border-purple-500"
|
||||
/>
|
||||
<button
|
||||
type="button"
|
||||
onClick={addAnnotation}
|
||||
disabled={!newAnnotation().trim()}
|
||||
class="px-2 py-1.5 text-[11px] rounded border border-gray-200 dark:border-gray-600 bg-white dark:bg-gray-800 text-gray-700 dark:text-gray-200 hover:bg-gray-50 dark:hover:bg-gray-700 disabled:opacity-50 disabled:cursor-not-allowed transition-colors"
|
||||
>
|
||||
Add
|
||||
</button>
|
||||
<button
|
||||
type="button"
|
||||
onClick={handleAskAI}
|
||||
class="flex items-center gap-1.5 px-3 py-1.5 rounded-lg bg-gradient-to-r from-purple-500 to-pink-500 text-white text-[11px] font-medium shadow-sm hover:from-purple-600 hover:to-pink-600 transition-all"
|
||||
title={`Ask AI about ${props.guest.name}`}
|
||||
>
|
||||
<svg class="w-3.5 h-3.5" fill="none" stroke="currentColor" viewBox="0 0 24 24">
|
||||
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M9.75 3.104v5.714a2.25 2.25 0 01-.659 1.591L5 14.5M9.75 3.104c-.251.023-.501.05-.75.082m.75-.082a24.301 24.301 0 014.5 0m0 0v5.714c0 .597.237 1.17.659 1.591L19.8 15.3M14.25 3.104c.251.023.501.05.75.082M19.8 15.3l-1.57.393A9.065 9.065 0 0112 15a9.065 9.065 0 00-6.23.693L5 14.5m14.8.8l1.402 1.402c1.232 1.232.65 3.318-1.067 3.611l-2.576.43a18.003 18.003 0 01-5.118 0l-2.576-.43c-1.717-.293-2.299-2.379-1.067-3.611L5 14.5" />
|
||||
</svg>
|
||||
Ask AI
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
</Show>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
@@ -9,6 +9,8 @@ import { DockerMetadataAPI } from '@/api/dockerMetadata';
|
||||
import { resolveHostRuntime } from './runtimeDisplay';
|
||||
import { showSuccess, showError } from '@/utils/toast';
|
||||
import { logger } from '@/utils/logger';
|
||||
import { aiChatStore } from '@/stores/aiChat';
|
||||
import { AIAPI } from '@/api/ai';
|
||||
import { buildMetricKey } from '@/utils/metricsKeys';
|
||||
import { StatusDot } from '@/components/shared/StatusDot';
|
||||
import {
|
||||
@@ -790,6 +792,85 @@ const DockerContainerRow: Component<{
|
||||
});
|
||||
let urlInputRef: HTMLInputElement | undefined;
|
||||
|
||||
// Annotations and AI state
|
||||
const [aiEnabled, setAiEnabled] = createSignal(false);
|
||||
const [annotations, setAnnotations] = createSignal<string[]>([]);
|
||||
const [newAnnotation, setNewAnnotation] = createSignal('');
|
||||
const [saving, setSaving] = createSignal(false);
|
||||
|
||||
// Check if AI is enabled and load annotations on mount
|
||||
createEffect(() => {
|
||||
AIAPI.getSettings()
|
||||
.then((settings) => setAiEnabled(settings.enabled && settings.configured))
|
||||
.catch((err) => logger.debug('[DockerContainer] AI settings check failed:', err));
|
||||
|
||||
// Load existing annotations
|
||||
DockerMetadataAPI.getMetadata(resourceId())
|
||||
.then((meta) => {
|
||||
if (meta.notes && Array.isArray(meta.notes)) setAnnotations(meta.notes);
|
||||
})
|
||||
.catch((err) => logger.debug('[DockerContainer] Failed to load annotations:', err));
|
||||
});
|
||||
|
||||
const saveAnnotations = async (newAnnotations: string[]) => {
|
||||
setSaving(true);
|
||||
try {
|
||||
await DockerMetadataAPI.updateMetadata(resourceId(), { notes: newAnnotations });
|
||||
logger.debug('[DockerContainer] Annotations saved');
|
||||
} catch (err) {
|
||||
logger.error('[DockerContainer] Failed to save annotations:', err);
|
||||
} finally {
|
||||
setSaving(false);
|
||||
}
|
||||
};
|
||||
|
||||
const addAnnotation = () => {
|
||||
const text = newAnnotation().trim();
|
||||
if (!text) return;
|
||||
const updated = [...annotations(), text];
|
||||
setAnnotations(updated);
|
||||
setNewAnnotation('');
|
||||
saveAnnotations(updated);
|
||||
};
|
||||
|
||||
const removeAnnotation = (index: number) => {
|
||||
const updated = annotations().filter((_, i) => i !== index);
|
||||
setAnnotations(updated);
|
||||
saveAnnotations(updated);
|
||||
};
|
||||
|
||||
const handleKeyDown = (e: KeyboardEvent) => {
|
||||
if (e.key === 'Enter' && !e.shiftKey) {
|
||||
e.preventDefault();
|
||||
addAnnotation();
|
||||
}
|
||||
};
|
||||
|
||||
const buildContainerContext = () => {
|
||||
const ctx: Record<string, unknown> = {
|
||||
name: container.name,
|
||||
type: 'Docker Container',
|
||||
host: host.hostname,
|
||||
status: container.status || container.state,
|
||||
image: container.image,
|
||||
};
|
||||
if (container.cpuPercent !== undefined) ctx.cpu_usage = formatPercent(container.cpuPercent);
|
||||
if (container.memoryUsageBytes !== undefined) ctx.memory_used = formatBytes(container.memoryUsageBytes);
|
||||
if (container.memoryLimitBytes !== undefined) ctx.memory_limit = formatBytes(container.memoryLimitBytes);
|
||||
if (container.memoryPercent !== undefined) ctx.memory_usage = formatPercent(container.memoryPercent);
|
||||
if (container.uptimeSeconds) ctx.uptime = formatUptime(container.uptimeSeconds);
|
||||
if (container.ports?.length) ctx.ports = container.ports.map(p => p.publicPort ? `${p.publicPort}:${p.privatePort}/${p.protocol}` : `${p.privatePort}/${p.protocol}`);
|
||||
if (annotations().length > 0) ctx.user_notes = annotations().join('; ');
|
||||
return ctx;
|
||||
};
|
||||
|
||||
const handleAskAI = () => {
|
||||
aiChatStore.openForTarget('container', resourceId(), {
|
||||
containerName: container.name,
|
||||
...buildContainerContext(),
|
||||
});
|
||||
};
|
||||
|
||||
const writableLayerBytes = createMemo(() => container.writableLayerBytes ?? 0);
|
||||
const rootFilesystemBytes = createMemo(() => container.rootFilesystemBytes ?? 0);
|
||||
const hasDiskStats = createMemo(() => writableLayerBytes() > 0 || rootFilesystemBytes() > 0);
|
||||
@@ -1630,6 +1711,72 @@ const DockerContainerRow: Component<{
|
||||
</div>
|
||||
</div>
|
||||
</Show>
|
||||
|
||||
{/* Annotations & Ask AI row */}
|
||||
<Show when={aiEnabled()}>
|
||||
<div class="mt-3 pt-3 border-t border-gray-200 dark:border-gray-700 w-full space-y-2">
|
||||
<div class="flex items-center gap-1.5">
|
||||
<span class="text-[10px] font-medium text-gray-500 dark:text-gray-400">AI Context</span>
|
||||
<Show when={saving()}>
|
||||
<span class="text-[9px] text-gray-400">saving...</span>
|
||||
</Show>
|
||||
</div>
|
||||
|
||||
{/* Existing annotations */}
|
||||
<Show when={annotations().length > 0}>
|
||||
<div class="flex flex-wrap gap-1.5">
|
||||
<For each={annotations()}>
|
||||
{(annotation, index) => (
|
||||
<span class="inline-flex items-center gap-1 px-2 py-1 text-[11px] rounded-md bg-purple-100 text-purple-800 dark:bg-purple-900/40 dark:text-purple-200">
|
||||
<span class="max-w-[300px] truncate">{annotation}</span>
|
||||
<button
|
||||
type="button"
|
||||
onClick={() => removeAnnotation(index())}
|
||||
class="ml-0.5 p-0.5 rounded hover:bg-purple-200 dark:hover:bg-purple-800 transition-colors"
|
||||
title="Remove"
|
||||
>
|
||||
<svg class="w-3 h-3" fill="none" stroke="currentColor" viewBox="0 0 24 24">
|
||||
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M6 18L18 6M6 6l12 12" />
|
||||
</svg>
|
||||
</button>
|
||||
</span>
|
||||
)}
|
||||
</For>
|
||||
</div>
|
||||
</Show>
|
||||
|
||||
{/* Add new annotation */}
|
||||
<div class="flex items-center gap-2">
|
||||
<input
|
||||
type="text"
|
||||
value={newAnnotation()}
|
||||
onInput={(e) => setNewAnnotation(e.currentTarget.value)}
|
||||
onKeyDown={handleKeyDown}
|
||||
placeholder="Add context for AI (press Enter)..."
|
||||
class="flex-1 px-2 py-1.5 text-[11px] rounded border border-gray-200 dark:border-gray-600 bg-white dark:bg-gray-800 text-gray-700 dark:text-gray-200 placeholder-gray-400 dark:placeholder-gray-500 focus:outline-none focus:ring-1 focus:ring-purple-500 focus:border-purple-500"
|
||||
/>
|
||||
<button
|
||||
type="button"
|
||||
onClick={addAnnotation}
|
||||
disabled={!newAnnotation().trim()}
|
||||
class="px-2 py-1.5 text-[11px] rounded border border-purple-300 dark:border-purple-600 text-purple-700 dark:text-purple-300 hover:bg-purple-50 dark:hover:bg-purple-900/30 disabled:opacity-40 disabled:cursor-not-allowed transition-colors"
|
||||
>
|
||||
Add
|
||||
</button>
|
||||
<button
|
||||
type="button"
|
||||
onClick={handleAskAI}
|
||||
class="flex items-center gap-1.5 px-3 py-1.5 rounded-lg bg-gradient-to-r from-purple-500 to-pink-500 text-white text-[11px] font-medium shadow-sm hover:from-purple-600 hover:to-pink-600 transition-all"
|
||||
title={`Ask AI about ${container.name}`}
|
||||
>
|
||||
<svg class="w-3.5 h-3.5" fill="none" stroke="currentColor" viewBox="0 0 24 24">
|
||||
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M9.75 3.104v5.714a2.25 2.25 0 01-.659 1.591L5 14.5M9.75 3.104c-.251.023-.501.05-.75.082m.75-.082a24.301 24.301 0 014.5 0m0 0v5.714c0 .597.237 1.17.659 1.591L19.8 15.3M14.25 3.104c.251.023.501.05.75.082M19.8 15.3l-1.57.393A9.065 9.065 0 0112 15a9.065 9.065 0 00-6.23.693L5 14.5m14.8.8l1.402 1.402c1.232 1.232.65 3.318-1.067 3.611l-2.576.43a18.003 18.003 0 01-5.118 0l-2.576-.43c-1.717-.293-2.299-2.379-1.067-3.611L5 14.5" />
|
||||
</svg>
|
||||
Ask AI
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
</Show>
|
||||
</div>
|
||||
</div>
|
||||
</td>
|
||||
|
||||
@@ -16,6 +16,10 @@ import { StackedMemoryBar } from '@/components/Dashboard/StackedMemoryBar';
|
||||
import { EnhancedCPUBar } from '@/components/Dashboard/EnhancedCPUBar';
|
||||
import { TemperatureGauge } from '@/components/shared/TemperatureGauge';
|
||||
import { useBreakpoint } from '@/hooks/useBreakpoint';
|
||||
import { GuestMetadataAPI } from '@/api/guestMetadata';
|
||||
import { AIAPI } from '@/api/ai';
|
||||
import { aiChatStore } from '@/stores/aiChat';
|
||||
import { logger } from '@/utils/logger';
|
||||
|
||||
// Global drawer state to persist across re-renders
|
||||
const drawerState = new Map<string, boolean>();
|
||||
@@ -335,6 +339,76 @@ export const HostsOverview: Component<HostsOverviewProps> = (props) => {
|
||||
// Drawer state
|
||||
const [drawerOpen, setDrawerOpen] = createSignal(drawerState.get(host.id) ?? false);
|
||||
|
||||
// AI and annotations state
|
||||
const [aiEnabled, setAiEnabled] = createSignal(false);
|
||||
const [annotations, setAnnotations] = createSignal<string[]>([]);
|
||||
const [newAnnotation, setNewAnnotation] = createSignal('');
|
||||
const [saving, setSaving] = createSignal(false);
|
||||
|
||||
// Load AI settings and annotations when drawer opens
|
||||
createEffect(() => {
|
||||
if (drawerOpen()) {
|
||||
AIAPI.getSettings()
|
||||
.then((settings) => setAiEnabled(settings.enabled && settings.configured))
|
||||
.catch((err) => logger.debug('[HostsOverview] AI settings check failed:', err));
|
||||
|
||||
GuestMetadataAPI.getMetadata(`host-${host.id}`)
|
||||
.then((meta) => {
|
||||
if (meta.notes && Array.isArray(meta.notes)) setAnnotations(meta.notes);
|
||||
})
|
||||
.catch((err) => logger.debug('[HostsOverview] Failed to load annotations:', err));
|
||||
}
|
||||
});
|
||||
|
||||
const saveAnnotations = async (updated: string[]) => {
|
||||
setSaving(true);
|
||||
try {
|
||||
await GuestMetadataAPI.updateMetadata(`host-${host.id}`, { notes: updated });
|
||||
} catch (err) {
|
||||
logger.error('[HostsOverview] Failed to save annotations:', err);
|
||||
} finally {
|
||||
setSaving(false);
|
||||
}
|
||||
};
|
||||
|
||||
const addAnnotation = () => {
|
||||
const text = newAnnotation().trim();
|
||||
if (!text) return;
|
||||
const updated = [...annotations(), text];
|
||||
setAnnotations(updated);
|
||||
setNewAnnotation('');
|
||||
saveAnnotations(updated);
|
||||
};
|
||||
|
||||
const removeAnnotation = (index: number) => {
|
||||
const updated = annotations().filter((_, i) => i !== index);
|
||||
setAnnotations(updated);
|
||||
saveAnnotations(updated);
|
||||
};
|
||||
|
||||
const handleKeyDown = (e: KeyboardEvent) => {
|
||||
if (e.key === 'Enter') {
|
||||
e.preventDefault();
|
||||
addAnnotation();
|
||||
}
|
||||
};
|
||||
|
||||
const handleAskAI = () => {
|
||||
const context: Record<string, unknown> = {
|
||||
hostName: host.displayName || host.hostname,
|
||||
hostname: host.hostname,
|
||||
platform: host.platform,
|
||||
osName: host.osName,
|
||||
osVersion: host.osVersion,
|
||||
cpuUsage: host.cpuUsage ? `${host.cpuUsage.toFixed(1)}%` : undefined,
|
||||
memoryUsage: host.memory?.usage ? `${host.memory.usage.toFixed(1)}%` : undefined,
|
||||
uptime: host.uptimeSeconds ? formatUptime(host.uptimeSeconds) : undefined,
|
||||
};
|
||||
if (annotations().length > 0) context.user_annotations = annotations();
|
||||
|
||||
aiChatStore.openForTarget('host', host.id, context);
|
||||
};
|
||||
|
||||
// Check if we have additional info to show in drawer
|
||||
const hasDrawerContent = createMemo(() => {
|
||||
return (
|
||||
@@ -687,6 +761,72 @@ export const HostsOverview: Component<HostsOverviewProps> = (props) => {
|
||||
</For>
|
||||
</Show>
|
||||
</div>
|
||||
|
||||
{/* AI Context & Ask AI row */}
|
||||
<Show when={aiEnabled()}>
|
||||
<div class="mt-3 pt-3 border-t border-gray-200 dark:border-gray-700 space-y-2">
|
||||
<div class="flex items-center gap-1.5">
|
||||
<span class="text-[10px] font-medium text-gray-500 dark:text-gray-400">AI Context</span>
|
||||
<Show when={saving()}>
|
||||
<span class="text-[9px] text-gray-400">saving...</span>
|
||||
</Show>
|
||||
</div>
|
||||
|
||||
{/* Existing annotations */}
|
||||
<Show when={annotations().length > 0}>
|
||||
<div class="flex flex-wrap gap-1.5">
|
||||
<For each={annotations()}>
|
||||
{(annotation, index) => (
|
||||
<span class="inline-flex items-center gap-1 px-2 py-1 text-[11px] rounded-md bg-purple-100 text-purple-800 dark:bg-purple-900/40 dark:text-purple-200">
|
||||
<span class="max-w-[300px] truncate">{annotation}</span>
|
||||
<button
|
||||
type="button"
|
||||
onClick={() => removeAnnotation(index())}
|
||||
class="ml-0.5 p-0.5 rounded hover:bg-purple-200 dark:hover:bg-purple-800 transition-colors"
|
||||
title="Remove"
|
||||
>
|
||||
<svg class="w-3 h-3" fill="none" stroke="currentColor" viewBox="0 0 24 24">
|
||||
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M6 18L18 6M6 6l12 12" />
|
||||
</svg>
|
||||
</button>
|
||||
</span>
|
||||
)}
|
||||
</For>
|
||||
</div>
|
||||
</Show>
|
||||
|
||||
{/* Add new annotation + Ask AI */}
|
||||
<div class="flex items-center gap-2">
|
||||
<input
|
||||
type="text"
|
||||
value={newAnnotation()}
|
||||
onInput={(e) => setNewAnnotation(e.currentTarget.value)}
|
||||
onKeyDown={handleKeyDown}
|
||||
placeholder="Add context for AI (press Enter)..."
|
||||
class="flex-1 px-2 py-1.5 text-[11px] rounded border border-gray-200 dark:border-gray-600 bg-white dark:bg-gray-800 text-gray-700 dark:text-gray-200 placeholder-gray-400 dark:placeholder-gray-500 focus:outline-none focus:ring-1 focus:ring-purple-500 focus:border-purple-500"
|
||||
/>
|
||||
<button
|
||||
type="button"
|
||||
onClick={addAnnotation}
|
||||
disabled={!newAnnotation().trim()}
|
||||
class="px-2 py-1.5 text-[11px] rounded border border-gray-200 dark:border-gray-600 bg-white dark:bg-gray-800 text-gray-700 dark:text-gray-200 hover:bg-gray-50 dark:hover:bg-gray-700 disabled:opacity-50 disabled:cursor-not-allowed transition-colors"
|
||||
>
|
||||
Add
|
||||
</button>
|
||||
<button
|
||||
type="button"
|
||||
onClick={handleAskAI}
|
||||
class="flex items-center gap-1.5 px-3 py-1.5 rounded-lg bg-gradient-to-r from-purple-500 to-pink-500 text-white text-[11px] font-medium shadow-sm hover:from-purple-600 hover:to-pink-600 transition-all"
|
||||
title={`Ask AI about ${host.displayName || host.hostname}`}
|
||||
>
|
||||
<svg class="w-3.5 h-3.5" fill="none" stroke="currentColor" viewBox="0 0 24 24">
|
||||
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M9.75 3.104v5.714a2.25 2.25 0 01-.659 1.591L5 14.5M9.75 3.104c-.251.023-.501.05-.75.082m.75-.082a24.301 24.301 0 014.5 0m0 0v5.714c0 .597.237 1.17.659 1.591L19.8 15.3M14.25 3.104c.251.023.501.05.75.082M19.8 15.3l-1.57.393A9.065 9.065 0 0112 15a9.065 9.065 0 00-6.23.693L5 14.5m14.8.8l1.402 1.402c1.232 1.232.65 3.318-1.067 3.611l-2.576.43a18.003 18.003 0 01-5.118 0l-2.576-.43c-1.717-.293-2.299-2.379-1.067-3.611L5 14.5" />
|
||||
</svg>
|
||||
Ask AI
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
</Show>
|
||||
</div>
|
||||
</td>
|
||||
</tr>
|
||||
|
||||
424
frontend-modern/src/components/Settings/AISettings.tsx
Normal file
424
frontend-modern/src/components/Settings/AISettings.tsx
Normal file
@@ -0,0 +1,424 @@
|
||||
import { Component, Show, createSignal, onMount, For } from 'solid-js';
|
||||
import { createStore } from 'solid-js/store';
|
||||
import { Card } from '@/components/shared/Card';
|
||||
import { SectionHeader } from '@/components/shared/SectionHeader';
|
||||
import { Toggle } from '@/components/shared/Toggle';
|
||||
import { formField, labelClass, controlClass, formHelpText } from '@/components/shared/Form';
|
||||
import { notificationStore } from '@/stores/notifications';
|
||||
import { logger } from '@/utils/logger';
|
||||
import { AIAPI } from '@/api/ai';
|
||||
import type { AISettings as AISettingsType, AIProvider } from '@/types/ai';
|
||||
import { PROVIDER_NAMES, PROVIDER_DESCRIPTIONS, DEFAULT_MODELS } from '@/types/ai';
|
||||
|
||||
const PROVIDERS: AIProvider[] = ['anthropic', 'openai', 'ollama'];
|
||||
|
||||
export const AISettings: Component = () => {
|
||||
const [settings, setSettings] = createSignal<AISettingsType | null>(null);
|
||||
const [loading, setLoading] = createSignal(false);
|
||||
const [saving, setSaving] = createSignal(false);
|
||||
const [testing, setTesting] = createSignal(false);
|
||||
|
||||
const [form, setForm] = createStore({
|
||||
enabled: false,
|
||||
provider: 'anthropic' as AIProvider,
|
||||
apiKey: '',
|
||||
model: '',
|
||||
baseUrl: '',
|
||||
clearApiKey: false,
|
||||
autonomousMode: false,
|
||||
});
|
||||
|
||||
const resetForm = (data: AISettingsType | null) => {
|
||||
if (!data) {
|
||||
setForm({
|
||||
enabled: false,
|
||||
provider: 'anthropic',
|
||||
apiKey: '',
|
||||
model: DEFAULT_MODELS.anthropic,
|
||||
baseUrl: '',
|
||||
clearApiKey: false,
|
||||
autonomousMode: false,
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
setForm({
|
||||
enabled: data.enabled,
|
||||
provider: data.provider,
|
||||
apiKey: '',
|
||||
model: data.model || DEFAULT_MODELS[data.provider],
|
||||
baseUrl: data.base_url || '',
|
||||
clearApiKey: false,
|
||||
autonomousMode: data.autonomous_mode || false,
|
||||
});
|
||||
};
|
||||
|
||||
const loadSettings = async () => {
|
||||
setLoading(true);
|
||||
try {
|
||||
const data = await AIAPI.getSettings();
|
||||
setSettings(data);
|
||||
resetForm(data);
|
||||
} catch (error) {
|
||||
logger.error('[AISettings] Failed to load settings:', error);
|
||||
notificationStore.error('Failed to load AI settings');
|
||||
setSettings(null);
|
||||
resetForm(null);
|
||||
} finally {
|
||||
setLoading(false);
|
||||
}
|
||||
};
|
||||
|
||||
onMount(() => {
|
||||
loadSettings();
|
||||
});
|
||||
|
||||
const handleProviderChange = (provider: AIProvider) => {
|
||||
setForm('provider', provider);
|
||||
// Update model to default for new provider if current model doesn't look like it belongs
|
||||
const currentModel = form.model;
|
||||
if (!currentModel || currentModel === DEFAULT_MODELS[settings()?.provider || 'anthropic']) {
|
||||
setForm('model', DEFAULT_MODELS[provider]);
|
||||
}
|
||||
};
|
||||
|
||||
const handleSave = async (event?: Event) => {
|
||||
event?.preventDefault();
|
||||
|
||||
setSaving(true);
|
||||
try {
|
||||
const payload: Record<string, unknown> = {
|
||||
provider: form.provider,
|
||||
model: form.model.trim(),
|
||||
};
|
||||
|
||||
// Only include base_url if it's set or if provider is ollama
|
||||
if (form.baseUrl.trim() || form.provider === 'ollama') {
|
||||
payload.base_url = form.baseUrl.trim();
|
||||
}
|
||||
|
||||
// Handle API key
|
||||
if (form.apiKey.trim() !== '') {
|
||||
payload.api_key = form.apiKey.trim();
|
||||
} else if (form.clearApiKey) {
|
||||
payload.api_key = '';
|
||||
}
|
||||
|
||||
// Only include enabled if we're toggling it
|
||||
if (form.enabled !== settings()?.enabled) {
|
||||
payload.enabled = form.enabled;
|
||||
}
|
||||
|
||||
// Include autonomous mode if changed
|
||||
if (form.autonomousMode !== settings()?.autonomous_mode) {
|
||||
payload.autonomous_mode = form.autonomousMode;
|
||||
}
|
||||
|
||||
const updated = await AIAPI.updateSettings(payload);
|
||||
setSettings(updated);
|
||||
resetForm(updated);
|
||||
notificationStore.success('AI settings saved');
|
||||
} catch (error) {
|
||||
logger.error('[AISettings] Failed to save settings:', error);
|
||||
const message = error instanceof Error ? error.message : 'Failed to save AI settings';
|
||||
notificationStore.error(message);
|
||||
} finally {
|
||||
setSaving(false);
|
||||
}
|
||||
};
|
||||
|
||||
const handleTest = async () => {
|
||||
setTesting(true);
|
||||
try {
|
||||
const result = await AIAPI.testConnection();
|
||||
if (result.success) {
|
||||
notificationStore.success(result.message);
|
||||
} else {
|
||||
notificationStore.error(result.message);
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error('[AISettings] Test failed:', error);
|
||||
const message = error instanceof Error ? error.message : 'Connection test failed';
|
||||
notificationStore.error(message);
|
||||
} finally {
|
||||
setTesting(false);
|
||||
}
|
||||
};
|
||||
|
||||
const needsApiKey = () => form.provider !== 'ollama';
|
||||
const showBaseUrl = () => form.provider === 'ollama' || form.provider === 'openai';
|
||||
|
||||
return (
|
||||
<Card
|
||||
padding="none"
|
||||
class="overflow-hidden border border-gray-200 dark:border-gray-700"
|
||||
border={false}
|
||||
>
|
||||
<div class="bg-gradient-to-r from-purple-50 to-pink-50 dark:from-purple-900/20 dark:to-pink-900/20 px-6 py-4 border-b border-gray-200 dark:border-gray-700">
|
||||
<div class="flex items-center gap-3">
|
||||
<div class="p-2 bg-purple-100 dark:bg-purple-900/40 rounded-lg">
|
||||
<svg
|
||||
class="w-5 h-5 text-purple-600 dark:text-purple-300"
|
||||
fill="none"
|
||||
stroke="currentColor"
|
||||
viewBox="0 0 24 24"
|
||||
>
|
||||
<path
|
||||
stroke-linecap="round"
|
||||
stroke-linejoin="round"
|
||||
stroke-width="1.8"
|
||||
d="M9.75 3.104v5.714a2.25 2.25 0 01-.659 1.591L5 14.5M9.75 3.104c-.251.023-.501.05-.75.082m.75-.082a24.301 24.301 0 014.5 0m0 0v5.714c0 .597.237 1.17.659 1.591L19.8 15.3M14.25 3.104c.251.023.501.05.75.082M19.8 15.3l-1.57.393A9.065 9.065 0 0112 15a9.065 9.065 0 00-6.23.693L5 14.5m14.8.8l1.402 1.402c1.232 1.232.65 3.318-1.067 3.611l-2.576.43a18.003 18.003 0 01-5.118 0l-2.576-.43c-1.717-.293-2.299-2.379-1.067-3.611L5 14.5"
|
||||
/>
|
||||
</svg>
|
||||
</div>
|
||||
<SectionHeader
|
||||
title="AI Assistant"
|
||||
description="Configure AI-powered infrastructure analysis"
|
||||
size="sm"
|
||||
class="flex-1"
|
||||
/>
|
||||
<Toggle
|
||||
checked={form.enabled}
|
||||
onChange={(event) => {
|
||||
setForm('enabled', event.currentTarget.checked);
|
||||
}}
|
||||
disabled={loading() || saving()}
|
||||
containerClass="items-center gap-2"
|
||||
label={
|
||||
<span class="text-xs font-medium text-gray-600 dark:text-gray-300">
|
||||
{form.enabled ? 'Enabled' : 'Disabled'}
|
||||
</span>
|
||||
}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<form class="p-6 space-y-5" onSubmit={handleSave}>
|
||||
<div class="bg-purple-50 dark:bg-purple-900/30 border border-purple-200 dark:border-purple-800 rounded-lg p-3 text-xs text-purple-800 dark:text-purple-200">
|
||||
<p class="font-medium mb-1">AI Assistant helps you:</p>
|
||||
<ul class="space-y-0.5 list-disc pl-4">
|
||||
<li>Diagnose infrastructure issues with context-aware analysis</li>
|
||||
<li>Get remediation suggestions based on your specific environment</li>
|
||||
<li>Understand alerts and metrics with plain-language explanations</li>
|
||||
</ul>
|
||||
</div>
|
||||
|
||||
<Show when={loading()}>
|
||||
<div class="flex items-center gap-3 text-sm text-gray-600 dark:text-gray-300">
|
||||
<span class="h-4 w-4 border-2 border-current border-t-transparent rounded-full animate-spin" />
|
||||
Loading AI settings...
|
||||
</div>
|
||||
</Show>
|
||||
|
||||
<Show when={!loading()}>
|
||||
<div class="space-y-4">
|
||||
{/* Provider Selection */}
|
||||
<div class={formField}>
|
||||
<label class={labelClass()}>AI Provider</label>
|
||||
<div class="grid grid-cols-3 gap-3">
|
||||
<For each={PROVIDERS}>
|
||||
{(provider) => (
|
||||
<button
|
||||
type="button"
|
||||
class={`p-3 rounded-lg border-2 text-left transition-all ${
|
||||
form.provider === provider
|
||||
? 'border-purple-500 bg-purple-50 dark:bg-purple-900/30'
|
||||
: 'border-gray-200 dark:border-gray-700 hover:border-gray-300 dark:hover:border-gray-600'
|
||||
}`}
|
||||
onClick={() => handleProviderChange(provider)}
|
||||
disabled={saving()}
|
||||
>
|
||||
<div class="font-medium text-sm text-gray-900 dark:text-gray-100">
|
||||
{PROVIDER_NAMES[provider]}
|
||||
</div>
|
||||
<div class="text-xs text-gray-500 dark:text-gray-400 mt-0.5">
|
||||
{PROVIDER_DESCRIPTIONS[provider]}
|
||||
</div>
|
||||
</button>
|
||||
)}
|
||||
</For>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* API Key - not shown for Ollama */}
|
||||
<Show when={needsApiKey()}>
|
||||
<div class={formField}>
|
||||
<div class="flex items-center justify-between">
|
||||
<label class={labelClass('mb-0')}>API Key</label>
|
||||
<Show when={settings()?.api_key_set}>
|
||||
<button
|
||||
type="button"
|
||||
class="text-xs text-purple-600 hover:underline dark:text-purple-300"
|
||||
onClick={() => {
|
||||
if (!saving()) {
|
||||
setForm('apiKey', '');
|
||||
setForm('clearApiKey', true);
|
||||
notificationStore.info('API key will be cleared on save', 2500);
|
||||
}
|
||||
}}
|
||||
disabled={saving()}
|
||||
>
|
||||
Clear stored key
|
||||
</button>
|
||||
</Show>
|
||||
</div>
|
||||
<input
|
||||
type="password"
|
||||
value={form.apiKey}
|
||||
onInput={(event) => {
|
||||
setForm('apiKey', event.currentTarget.value);
|
||||
if (event.currentTarget.value.trim() !== '') {
|
||||
setForm('clearApiKey', false);
|
||||
}
|
||||
}}
|
||||
placeholder={
|
||||
settings()?.api_key_set
|
||||
? '•••••••• (leave blank to keep existing)'
|
||||
: `Enter ${PROVIDER_NAMES[form.provider]} API key`
|
||||
}
|
||||
class={controlClass()}
|
||||
disabled={saving()}
|
||||
/>
|
||||
<p class={formHelpText}>
|
||||
{form.provider === 'anthropic'
|
||||
? 'Get your API key from console.anthropic.com'
|
||||
: 'Get your API key from platform.openai.com'}
|
||||
</p>
|
||||
</div>
|
||||
</Show>
|
||||
|
||||
{/* Model */}
|
||||
<div class={formField}>
|
||||
<label class={labelClass()}>Model</label>
|
||||
<input
|
||||
type="text"
|
||||
value={form.model}
|
||||
onInput={(event) => setForm('model', event.currentTarget.value)}
|
||||
placeholder={DEFAULT_MODELS[form.provider]}
|
||||
class={controlClass()}
|
||||
disabled={saving()}
|
||||
/>
|
||||
<p class={formHelpText}>
|
||||
{form.provider === 'anthropic'
|
||||
? 'e.g., claude-opus-4-5-20251101, claude-sonnet-4-20250514'
|
||||
: form.provider === 'openai'
|
||||
? 'e.g., gpt-4o, gpt-4-turbo'
|
||||
: 'e.g., llama3, mixtral, codellama'}
|
||||
</p>
|
||||
</div>
|
||||
|
||||
{/* Base URL - shown for Ollama (required) and OpenAI (optional) */}
|
||||
<Show when={showBaseUrl()}>
|
||||
<div class={formField}>
|
||||
<label class={labelClass()}>
|
||||
{form.provider === 'ollama' ? 'Ollama Server URL' : 'API Base URL (optional)'}
|
||||
</label>
|
||||
<input
|
||||
type="url"
|
||||
value={form.baseUrl}
|
||||
onInput={(event) => setForm('baseUrl', event.currentTarget.value)}
|
||||
placeholder={
|
||||
form.provider === 'ollama'
|
||||
? 'http://localhost:11434'
|
||||
: 'https://api.openai.com/v1'
|
||||
}
|
||||
class={controlClass()}
|
||||
disabled={saving()}
|
||||
/>
|
||||
<p class={formHelpText}>
|
||||
{form.provider === 'ollama'
|
||||
? 'URL where your Ollama server is running'
|
||||
: 'Custom endpoint for Azure OpenAI or compatible APIs'}
|
||||
</p>
|
||||
</div>
|
||||
</Show>
|
||||
|
||||
{/* Autonomous Mode */}
|
||||
<div class={`${formField} p-4 rounded-lg border ${form.autonomousMode ? 'bg-amber-50 dark:bg-amber-900/20 border-amber-200 dark:border-amber-800' : 'bg-gray-50 dark:bg-gray-800/50 border-gray-200 dark:border-gray-700'}`}>
|
||||
<div class="flex items-start justify-between gap-4">
|
||||
<div class="flex-1">
|
||||
<label class={`${labelClass()} flex items-center gap-2`}>
|
||||
Autonomous Mode
|
||||
<Show when={form.autonomousMode}>
|
||||
<span class="px-1.5 py-0.5 text-[10px] font-semibold bg-amber-200 dark:bg-amber-800 text-amber-800 dark:text-amber-200 rounded">
|
||||
ENABLED
|
||||
</span>
|
||||
</Show>
|
||||
</label>
|
||||
<p class="text-xs text-gray-600 dark:text-gray-400 mt-1">
|
||||
{form.autonomousMode
|
||||
? 'AI will execute all commands without asking for approval. Only enable if you trust your configured model.'
|
||||
: 'AI will ask for approval before running commands that modify your system. Read-only commands (like df, ps, docker stats) run automatically.'}
|
||||
</p>
|
||||
</div>
|
||||
<Toggle
|
||||
checked={form.autonomousMode}
|
||||
onChange={(event) => setForm('autonomousMode', event.currentTarget.checked)}
|
||||
disabled={saving()}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
</div>
|
||||
|
||||
{/* Status indicator */}
|
||||
<Show when={settings()}>
|
||||
<div
|
||||
class={`flex items-center gap-2 p-3 rounded-lg ${
|
||||
settings()?.configured
|
||||
? 'bg-green-50 dark:bg-green-900/30 text-green-800 dark:text-green-200'
|
||||
: 'bg-amber-50 dark:bg-amber-900/30 text-amber-800 dark:text-amber-200'
|
||||
}`}
|
||||
>
|
||||
<div
|
||||
class={`w-2 h-2 rounded-full ${
|
||||
settings()?.configured ? 'bg-green-500' : 'bg-amber-500'
|
||||
}`}
|
||||
/>
|
||||
<span class="text-xs font-medium">
|
||||
{settings()?.configured
|
||||
? `Ready to use with ${settings()?.model}`
|
||||
: needsApiKey()
|
||||
? 'API key required to enable AI features'
|
||||
: 'Configure Ollama server URL to enable AI features'}
|
||||
</span>
|
||||
</div>
|
||||
</Show>
|
||||
|
||||
{/* Actions */}
|
||||
<div class="flex flex-wrap items-center justify-between gap-3 pt-4">
|
||||
<Show when={settings()?.api_key_set}>
|
||||
<button
|
||||
type="button"
|
||||
class="px-4 py-2 text-sm border border-purple-300 dark:border-purple-600 text-purple-700 dark:text-purple-300 rounded-md hover:bg-purple-50 dark:hover:bg-purple-900/30 disabled:opacity-50 disabled:cursor-not-allowed"
|
||||
onClick={handleTest}
|
||||
disabled={testing() || saving() || loading()}
|
||||
>
|
||||
{testing() ? 'Testing...' : 'Test Connection'}
|
||||
</button>
|
||||
</Show>
|
||||
<div class="flex gap-3 ml-auto">
|
||||
<button
|
||||
type="button"
|
||||
class="px-4 py-2 border border-gray-300 dark:border-gray-600 text-gray-700 dark:text-gray-300 rounded-md hover:bg-gray-50 dark:hover:bg-gray-700"
|
||||
onClick={() => resetForm(settings())}
|
||||
disabled={saving() || loading()}
|
||||
>
|
||||
Reset
|
||||
</button>
|
||||
<button
|
||||
type="submit"
|
||||
class="px-4 py-2 bg-purple-600 text-white rounded-md hover:bg-purple-700 disabled:opacity-50 disabled:cursor-not-allowed"
|
||||
disabled={saving() || loading()}
|
||||
>
|
||||
{saving() ? 'Saving...' : 'Save changes'}
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
</Show>
|
||||
</form>
|
||||
</Card>
|
||||
);
|
||||
};
|
||||
|
||||
export default AISettings;
|
||||
@@ -27,6 +27,7 @@ import { ChangePasswordModal } from './ChangePasswordModal';
|
||||
import { UnifiedAgents } from './UnifiedAgents';
|
||||
import APITokenManager from './APITokenManager';
|
||||
import { OIDCPanel } from './OIDCPanel';
|
||||
import { AISettings } from './AISettings';
|
||||
import { QuickSecuritySetup } from './QuickSecuritySetup';
|
||||
import { SecurityPostureSummary } from './SecurityPostureSummary';
|
||||
import {
|
||||
@@ -57,6 +58,7 @@ import Monitor from 'lucide-solid/icons/monitor';
|
||||
import Sliders from 'lucide-solid/icons/sliders-horizontal';
|
||||
import RefreshCw from 'lucide-solid/icons/refresh-cw';
|
||||
import Clock from 'lucide-solid/icons/clock';
|
||||
import Sparkles from 'lucide-solid/icons/sparkles';
|
||||
import { ProxmoxIcon } from '@/components/icons/ProxmoxIcon';
|
||||
import BadgeCheck from 'lucide-solid/icons/badge-check';
|
||||
import type { NodeConfig } from '@/types/nodes';
|
||||
@@ -324,6 +326,7 @@ type SettingsTab =
|
||||
| 'system-network'
|
||||
| 'system-updates'
|
||||
| 'system-backups'
|
||||
| 'system-ai'
|
||||
| 'api'
|
||||
| 'security-overview'
|
||||
| 'security-auth'
|
||||
@@ -369,6 +372,10 @@ const SETTINGS_HEADER_META: Record<SettingsTab, { title: string; description: st
|
||||
title: 'Backup Polling',
|
||||
description: 'Control how often Pulse queries Proxmox for backup tasks and snapshots.',
|
||||
},
|
||||
'system-ai': {
|
||||
title: 'AI Assistant',
|
||||
description: 'Configure AI-powered infrastructure analysis and remediation suggestions.',
|
||||
},
|
||||
api: {
|
||||
title: 'API access',
|
||||
description:
|
||||
@@ -1281,6 +1288,12 @@ const Settings: Component<SettingsProps> = (props) => {
|
||||
icon: Clock,
|
||||
iconProps: { strokeWidth: 2 },
|
||||
},
|
||||
{
|
||||
id: 'system-ai',
|
||||
label: 'AI Assistant',
|
||||
icon: Sparkles,
|
||||
iconProps: { strokeWidth: 2 },
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
@@ -4915,6 +4928,11 @@ const Settings: Component<SettingsProps> = (props) => {
|
||||
</div>
|
||||
</Show>
|
||||
|
||||
{/* AI Assistant Tab */}
|
||||
<Show when={activeTab() === 'system-ai'}>
|
||||
<AISettings />
|
||||
</Show>
|
||||
|
||||
{/* API Access */}
|
||||
<Show when={activeTab() === 'api'}>
|
||||
<div class="space-y-6">
|
||||
|
||||
192
frontend-modern/src/stores/aiChat.ts
Normal file
192
frontend-modern/src/stores/aiChat.ts
Normal file
@@ -0,0 +1,192 @@
|
||||
import { createSignal } from 'solid-js';
|
||||
|
||||
interface AIChatContext {
|
||||
targetType?: string;
|
||||
targetId?: string;
|
||||
context?: Record<string, unknown>;
|
||||
initialPrompt?: string;
|
||||
}
|
||||
|
||||
// A single context item that can be accumulated
|
||||
interface ContextItem {
|
||||
id: string; // unique identifier (e.g., "vm-delly-101")
|
||||
type: string; // "vm", "container", "storage", "node", etc.
|
||||
name: string; // display name
|
||||
data: Record<string, unknown>; // the actual context data
|
||||
addedAt: Date;
|
||||
}
|
||||
|
||||
// Message type for persisted conversation
|
||||
interface Message {
|
||||
id: string;
|
||||
role: 'user' | 'assistant';
|
||||
content: string;
|
||||
timestamp: Date;
|
||||
model?: string;
|
||||
tokens?: { input: number; output: number };
|
||||
toolCalls?: Array<{
|
||||
name: string;
|
||||
input: string;
|
||||
output: string;
|
||||
success: boolean;
|
||||
}>;
|
||||
}
|
||||
|
||||
// Global state for the AI chat drawer
|
||||
const [isAIChatOpen, setIsAIChatOpen] = createSignal(false);
|
||||
const [aiChatContext, setAIChatContext] = createSignal<AIChatContext>({});
|
||||
const [contextItems, setContextItems] = createSignal<ContextItem[]>([]);
|
||||
const [messages, setMessages] = createSignal<Message[]>([]);
|
||||
const [aiEnabled, setAiEnabled] = createSignal<boolean | null>(null); // null = not checked yet
|
||||
|
||||
export const aiChatStore = {
|
||||
// Check if chat is open
|
||||
get isOpen() {
|
||||
return isAIChatOpen();
|
||||
},
|
||||
|
||||
// Get current context (legacy single-item)
|
||||
get context() {
|
||||
return aiChatContext();
|
||||
},
|
||||
|
||||
// Get all accumulated context items
|
||||
get contextItems() {
|
||||
return contextItems();
|
||||
},
|
||||
|
||||
// Get messages (for persistence)
|
||||
get messages() {
|
||||
return messages();
|
||||
},
|
||||
|
||||
// Get AI enabled state
|
||||
get enabled() {
|
||||
return aiEnabled();
|
||||
},
|
||||
|
||||
// Check if a specific item is in context
|
||||
hasContextItem(id: string) {
|
||||
return contextItems().some(item => item.id === id);
|
||||
},
|
||||
|
||||
// Set AI enabled state (called from settings check)
|
||||
setEnabled(enabled: boolean) {
|
||||
setAiEnabled(enabled);
|
||||
},
|
||||
|
||||
// Set messages (for persistence from AIChat component)
|
||||
setMessages(msgs: Message[]) {
|
||||
setMessages(msgs);
|
||||
},
|
||||
|
||||
// Toggle the AI chat panel
|
||||
toggle() {
|
||||
setIsAIChatOpen(!isAIChatOpen());
|
||||
},
|
||||
|
||||
// Open the AI chat with optional context
|
||||
open(context?: AIChatContext) {
|
||||
if (context) {
|
||||
setAIChatContext(context);
|
||||
}
|
||||
setIsAIChatOpen(true);
|
||||
},
|
||||
|
||||
// Close the AI chat
|
||||
close() {
|
||||
setIsAIChatOpen(false);
|
||||
// Keep context and messages for when user reopens
|
||||
},
|
||||
|
||||
// Update context without opening (for navigation-based context changes)
|
||||
setContext(context: AIChatContext) {
|
||||
setAIChatContext(context);
|
||||
},
|
||||
|
||||
// Clear single-item context (legacy)
|
||||
clearContext() {
|
||||
setAIChatContext({});
|
||||
},
|
||||
|
||||
// Add an item to the context (accumulative)
|
||||
addContextItem(type: string, id: string, name: string, data: Record<string, unknown>) {
|
||||
setContextItems(prev => {
|
||||
// Don't add duplicates
|
||||
if (prev.some(item => item.id === id)) {
|
||||
// Update existing item with new data
|
||||
return prev.map(item =>
|
||||
item.id === id
|
||||
? { ...item, data, addedAt: new Date() }
|
||||
: item
|
||||
);
|
||||
}
|
||||
return [...prev, { id, type, name, data, addedAt: new Date() }];
|
||||
});
|
||||
// Also update legacy context to point to most recently added
|
||||
setAIChatContext({
|
||||
targetType: type,
|
||||
targetId: id,
|
||||
context: data,
|
||||
});
|
||||
},
|
||||
|
||||
// Remove an item from context
|
||||
removeContextItem(id: string) {
|
||||
setContextItems(prev => prev.filter(item => item.id !== id));
|
||||
// Update legacy context if we removed the current one
|
||||
const current = aiChatContext();
|
||||
if (current.targetId === id) {
|
||||
const remaining = contextItems().filter(item => item.id !== id);
|
||||
if (remaining.length > 0) {
|
||||
const last = remaining[remaining.length - 1];
|
||||
setAIChatContext({
|
||||
targetType: last.type,
|
||||
targetId: last.id,
|
||||
context: last.data,
|
||||
});
|
||||
} else {
|
||||
setAIChatContext({});
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
// Clear all context items
|
||||
clearAllContext() {
|
||||
setContextItems([]);
|
||||
setAIChatContext({});
|
||||
},
|
||||
|
||||
// Clear conversation (start fresh)
|
||||
clearConversation() {
|
||||
setMessages([]);
|
||||
},
|
||||
|
||||
// Convenience method to update context for a specific target (host, VM, container, etc.)
|
||||
// This is called when user selects/views a specific resource
|
||||
setTargetContext(targetType: string, targetId: string, additionalContext?: Record<string, unknown>) {
|
||||
// Use addContextItem instead of replacing
|
||||
const name = (additionalContext?.guestName as string) ||
|
||||
(additionalContext?.name as string) ||
|
||||
targetId;
|
||||
this.addContextItem(targetType, targetId, name, additionalContext || {});
|
||||
},
|
||||
|
||||
// Open for a specific target - opens the panel and adds to context
|
||||
openForTarget(targetType: string, targetId: string, additionalContext?: Record<string, unknown>) {
|
||||
const name = (additionalContext?.guestName as string) ||
|
||||
(additionalContext?.name as string) ||
|
||||
targetId;
|
||||
this.addContextItem(targetType, targetId, name, additionalContext || {});
|
||||
setIsAIChatOpen(true);
|
||||
},
|
||||
|
||||
// Open with a pre-filled prompt
|
||||
openWithPrompt(prompt: string, context?: Omit<AIChatContext, 'initialPrompt'>) {
|
||||
setAIChatContext({
|
||||
...context,
|
||||
initialPrompt: prompt,
|
||||
});
|
||||
setIsAIChatOpen(true);
|
||||
},
|
||||
};
|
||||
115
frontend-modern/src/types/ai.ts
Normal file
115
frontend-modern/src/types/ai.ts
Normal file
@@ -0,0 +1,115 @@
|
||||
// AI feature types
|
||||
|
||||
export type AIProvider = 'anthropic' | 'openai' | 'ollama';
|
||||
|
||||
export interface AISettings {
|
||||
enabled: boolean;
|
||||
provider: AIProvider;
|
||||
api_key_set: boolean; // API key is never exposed, just whether it's set
|
||||
model: string;
|
||||
base_url?: string;
|
||||
configured: boolean; // true if AI is ready to use
|
||||
autonomous_mode: boolean; // true if AI can execute commands without approval
|
||||
custom_context: string; // user-provided infrastructure context
|
||||
}
|
||||
|
||||
export interface AISettingsUpdateRequest {
|
||||
enabled?: boolean;
|
||||
provider?: AIProvider;
|
||||
api_key?: string; // empty string clears, undefined preserves
|
||||
model?: string;
|
||||
base_url?: string;
|
||||
autonomous_mode?: boolean;
|
||||
custom_context?: string; // user-provided infrastructure context
|
||||
}
|
||||
|
||||
export interface AITestResult {
|
||||
success: boolean;
|
||||
message: string;
|
||||
model?: string;
|
||||
}
|
||||
|
||||
// Default models for each provider
|
||||
export const DEFAULT_MODELS: Record<AIProvider, string> = {
|
||||
anthropic: 'claude-opus-4-5-20251101',
|
||||
openai: 'gpt-4o',
|
||||
ollama: 'llama3',
|
||||
};
|
||||
|
||||
// Provider display names
|
||||
export const PROVIDER_NAMES: Record<AIProvider, string> = {
|
||||
anthropic: 'Anthropic',
|
||||
openai: 'OpenAI',
|
||||
ollama: 'Ollama',
|
||||
};
|
||||
|
||||
// Provider descriptions
|
||||
export const PROVIDER_DESCRIPTIONS: Record<AIProvider, string> = {
|
||||
anthropic: 'Claude models from Anthropic',
|
||||
openai: 'GPT models from OpenAI',
|
||||
ollama: 'Local models via Ollama',
|
||||
};
|
||||
|
||||
// Conversation history for multi-turn chats
|
||||
export interface AIConversationMessage {
|
||||
role: 'user' | 'assistant';
|
||||
content: string;
|
||||
}
|
||||
|
||||
// AI Execute request/response types
|
||||
export interface AIExecuteRequest {
|
||||
prompt: string;
|
||||
target_type?: string; // "host", "container", "vm", "node"
|
||||
target_id?: string;
|
||||
context?: Record<string, unknown>;
|
||||
history?: AIConversationMessage[]; // Previous conversation messages
|
||||
}
|
||||
|
||||
// Tool execution info
|
||||
export interface AIToolExecution {
|
||||
name: string; // "run_command", "read_file"
|
||||
input: string; // The command or file path
|
||||
output: string; // Result of execution
|
||||
success: boolean;
|
||||
}
|
||||
|
||||
export interface AIExecuteResponse {
|
||||
content: string;
|
||||
model: string;
|
||||
input_tokens: number;
|
||||
output_tokens: number;
|
||||
tool_calls?: AIToolExecution[]; // Commands that were executed
|
||||
}
|
||||
|
||||
// Streaming event types
|
||||
export type AIStreamEventType = 'tool_start' | 'tool_end' | 'content' | 'done' | 'error' | 'complete' | 'approval_needed';
|
||||
|
||||
export interface AIStreamToolStartData {
|
||||
name: string;
|
||||
input: string;
|
||||
}
|
||||
|
||||
export interface AIStreamToolEndData {
|
||||
name: string;
|
||||
output: string;
|
||||
success: boolean;
|
||||
}
|
||||
|
||||
export interface AIStreamApprovalNeededData {
|
||||
command: string;
|
||||
tool_id: string;
|
||||
tool_name: string;
|
||||
run_on_host: boolean;
|
||||
}
|
||||
|
||||
export interface AIStreamEvent {
|
||||
type: AIStreamEventType;
|
||||
data?: string | AIStreamToolStartData | AIStreamToolEndData | AIStreamCompleteData | AIStreamApprovalNeededData;
|
||||
}
|
||||
|
||||
export interface AIStreamCompleteData {
|
||||
model: string;
|
||||
input_tokens: number;
|
||||
output_tokens: number;
|
||||
tool_calls?: AIToolExecution[];
|
||||
}
|
||||
@@ -1,3 +1,5 @@
|
||||
import typography from '@tailwindcss/typography';
|
||||
|
||||
export default {
|
||||
content: [
|
||||
"./index.html",
|
||||
@@ -23,5 +25,5 @@ export default {
|
||||
}
|
||||
},
|
||||
},
|
||||
plugins: [],
|
||||
plugins: [typography],
|
||||
}
|
||||
@@ -52,6 +52,41 @@ export default defineConfig({
|
||||
ws: true,
|
||||
changeOrigin: true,
|
||||
},
|
||||
'/api/ai/execute/stream': {
|
||||
target: backendUrl,
|
||||
changeOrigin: true,
|
||||
// SSE requires special handling to prevent proxy timeouts
|
||||
// Set timeout to 10 minutes (600000ms) for long-running AI requests
|
||||
timeout: 600000,
|
||||
proxyTimeout: 600000,
|
||||
configure: (proxy, _options) => {
|
||||
// Set proxy-level timeouts
|
||||
proxy.on('proxyReq', (proxyReq, req, res) => {
|
||||
// Disable socket timeouts for SSE
|
||||
req.socket.setTimeout(0);
|
||||
req.socket.setNoDelay(true);
|
||||
req.socket.setKeepAlive(true);
|
||||
// Also set on the proxy request
|
||||
proxyReq.socket?.setTimeout(0);
|
||||
});
|
||||
proxy.on('proxyRes', (proxyRes, req, res) => {
|
||||
// Disable response socket timeout
|
||||
res.socket?.setTimeout(0);
|
||||
res.socket?.setNoDelay(true);
|
||||
res.socket?.setKeepAlive(true);
|
||||
// Also disable on proxy response socket
|
||||
proxyRes.socket?.setTimeout(0);
|
||||
});
|
||||
proxy.on('error', (err, req, res) => {
|
||||
console.error('[SSE Proxy Error]', err.message);
|
||||
});
|
||||
},
|
||||
},
|
||||
'/api/agent/ws': {
|
||||
target: backendWsUrl,
|
||||
ws: true,
|
||||
changeOrigin: true,
|
||||
},
|
||||
'/api': {
|
||||
target: backendUrl,
|
||||
changeOrigin: true,
|
||||
|
||||
249
internal/agentexec/policy.go
Normal file
249
internal/agentexec/policy.go
Normal file
@@ -0,0 +1,249 @@
|
||||
package agentexec
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// CommandPolicy defines what commands are allowed, blocked, or require approval
|
||||
type CommandPolicy struct {
|
||||
// AutoApprove patterns - commands matching these are automatically allowed
|
||||
AutoApprove []string
|
||||
|
||||
// RequireApproval patterns - commands matching these need user approval
|
||||
RequireApproval []string
|
||||
|
||||
// Blocked patterns - commands matching these are never allowed
|
||||
Blocked []string
|
||||
|
||||
// compiled regex patterns
|
||||
autoApproveRe []*regexp.Regexp
|
||||
requireApprovalRe []*regexp.Regexp
|
||||
blockedRe []*regexp.Regexp
|
||||
}
|
||||
|
||||
// DefaultPolicy returns a sensible default command policy
|
||||
func DefaultPolicy() *CommandPolicy {
|
||||
p := &CommandPolicy{
|
||||
AutoApprove: []string{
|
||||
// System inspection
|
||||
`^ps\s`,
|
||||
`^top\s+-bn`,
|
||||
`^df\s`,
|
||||
`^free\s`,
|
||||
`^uptime$`,
|
||||
`^hostname$`,
|
||||
`^uname\s`,
|
||||
`^cat\s+/proc/`,
|
||||
`^cat\s+/etc/os-release`,
|
||||
`^lsof\s`,
|
||||
`^netstat\s`,
|
||||
`^ss\s`,
|
||||
`^ip\s+(addr|route|link)`,
|
||||
`^ifconfig`,
|
||||
`^w$`,
|
||||
`^who$`,
|
||||
`^last\s`,
|
||||
|
||||
// Log reading (read-only)
|
||||
`^cat\s+/var/log/`,
|
||||
`^tail\s+.*(/var/log/|/log/)`,
|
||||
`^head\s+.*(/var/log/|/log/)`,
|
||||
`^grep\s+.*(/var/log/|/log/)`,
|
||||
`^journalctl\s`,
|
||||
|
||||
// Service status (read-only)
|
||||
`^systemctl\s+status\s`,
|
||||
`^systemctl\s+is-active\s`,
|
||||
`^systemctl\s+is-enabled\s`,
|
||||
`^systemctl\s+list-units`,
|
||||
`^service\s+\S+\s+status`,
|
||||
|
||||
// Docker inspection (read-only)
|
||||
`^docker\s+ps`,
|
||||
`^docker\s+logs\s`,
|
||||
`^docker\s+inspect\s`,
|
||||
`^docker\s+stats\s+--no-stream`,
|
||||
`^docker\s+top\s`,
|
||||
`^docker\s+images`,
|
||||
`^docker\s+network\s+ls`,
|
||||
`^docker\s+volume\s+ls`,
|
||||
|
||||
// Proxmox inspection (read-only)
|
||||
`^pct\s+list`,
|
||||
`^pct\s+config\s`,
|
||||
`^pct\s+status\s`,
|
||||
`^qm\s+list`,
|
||||
`^qm\s+config\s`,
|
||||
`^qm\s+status\s`,
|
||||
`^pvesh\s+get\s`,
|
||||
|
||||
// Disk/storage info
|
||||
`^lsblk`,
|
||||
`^blkid`,
|
||||
`^fdisk\s+-l`,
|
||||
`^du\s`,
|
||||
`^ls\s`,
|
||||
`^stat\s`,
|
||||
`^file\s`,
|
||||
`^find\s+/.*-size`, // Find large files
|
||||
`^find\s+/.*-mtime`, // Find by modification time
|
||||
`^find\s+/.*-type`, // Find by type
|
||||
|
||||
// Memory inspection
|
||||
`^vmstat`,
|
||||
`^sar\s`,
|
||||
`^iostat`,
|
||||
`^mpstat`,
|
||||
|
||||
// Docker inspection (read-only)
|
||||
`^docker\s+system\s+df`,
|
||||
|
||||
// APT inspection (read-only)
|
||||
`^apt\s+list`,
|
||||
`^apt-cache\s`,
|
||||
`^dpkg\s+-l`,
|
||||
`^dpkg\s+--list`,
|
||||
},
|
||||
|
||||
RequireApproval: []string{
|
||||
// Service control
|
||||
`^systemctl\s+(restart|stop|start|reload)\s`,
|
||||
`^service\s+\S+\s+(restart|stop|start|reload)`,
|
||||
|
||||
// Docker control
|
||||
`^docker\s+(restart|stop|start|kill)\s`,
|
||||
`^docker\s+exec\s`,
|
||||
`^docker\s+rm\s`,
|
||||
|
||||
// Process control
|
||||
`^kill\s`,
|
||||
`^pkill\s`,
|
||||
`^killall\s`,
|
||||
|
||||
// Package management
|
||||
`^apt\s`,
|
||||
`^apt-get\s`,
|
||||
`^yum\s`,
|
||||
`^dnf\s`,
|
||||
`^pacman\s`,
|
||||
|
||||
// Proxmox control
|
||||
`^pct\s+(start|stop|shutdown|reboot|resize|set)\s`,
|
||||
`^qm\s+(start|stop|shutdown|reboot|reset|resize|set)\s`,
|
||||
},
|
||||
|
||||
Blocked: []string{
|
||||
// Destructive filesystem operations
|
||||
`rm\s+-rf\s+/`,
|
||||
`rm\s+--no-preserve-root`,
|
||||
`mkfs`,
|
||||
`dd\s+.*of=/dev/`,
|
||||
`>\s*/dev/sd`,
|
||||
`>\s*/dev/nvme`,
|
||||
|
||||
// System destruction
|
||||
`shutdown`,
|
||||
`reboot`,
|
||||
`init\s+0`,
|
||||
`poweroff`,
|
||||
`halt`,
|
||||
|
||||
// Dangerous permissions
|
||||
`chmod\s+777`,
|
||||
`chmod\s+-R\s+777`,
|
||||
`chown\s+-R\s+.*:.*\s+/`,
|
||||
|
||||
// Remote code execution
|
||||
`curl.*\|\s*(ba)?sh`,
|
||||
`wget.*\|\s*(ba)?sh`,
|
||||
`bash\s+-c\s+.*curl`,
|
||||
`bash\s+-c\s+.*wget`,
|
||||
|
||||
// Crypto mining indicators
|
||||
`xmrig`,
|
||||
`minerd`,
|
||||
`cpuminer`,
|
||||
|
||||
// Fork bomb patterns
|
||||
`:\(\)\s*{\s*:\s*\|\s*:`,
|
||||
|
||||
// Clear system logs
|
||||
`>\s*/var/log/`,
|
||||
`truncate.*--size.*0.*/var/log/`,
|
||||
},
|
||||
}
|
||||
|
||||
p.compile()
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *CommandPolicy) compile() {
|
||||
p.autoApproveRe = compilePatterns(p.AutoApprove)
|
||||
p.requireApprovalRe = compilePatterns(p.RequireApproval)
|
||||
p.blockedRe = compilePatterns(p.Blocked)
|
||||
}
|
||||
|
||||
func compilePatterns(patterns []string) []*regexp.Regexp {
|
||||
result := make([]*regexp.Regexp, 0, len(patterns))
|
||||
for _, pattern := range patterns {
|
||||
re, err := regexp.Compile(pattern)
|
||||
if err == nil {
|
||||
result = append(result, re)
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// PolicyDecision represents the policy decision for a command
|
||||
type PolicyDecision string
|
||||
|
||||
const (
|
||||
PolicyAllow PolicyDecision = "allow"
|
||||
PolicyRequireApproval PolicyDecision = "require_approval"
|
||||
PolicyBlock PolicyDecision = "block"
|
||||
)
|
||||
|
||||
// Evaluate checks a command against the policy
|
||||
func (p *CommandPolicy) Evaluate(command string) PolicyDecision {
|
||||
command = strings.TrimSpace(command)
|
||||
|
||||
// Check blocked first (highest priority)
|
||||
for _, re := range p.blockedRe {
|
||||
if re.MatchString(command) {
|
||||
return PolicyBlock
|
||||
}
|
||||
}
|
||||
|
||||
// Check require approval
|
||||
for _, re := range p.requireApprovalRe {
|
||||
if re.MatchString(command) {
|
||||
return PolicyRequireApproval
|
||||
}
|
||||
}
|
||||
|
||||
// Check auto-approve
|
||||
for _, re := range p.autoApproveRe {
|
||||
if re.MatchString(command) {
|
||||
return PolicyAllow
|
||||
}
|
||||
}
|
||||
|
||||
// Default: require approval for unknown commands
|
||||
return PolicyRequireApproval
|
||||
}
|
||||
|
||||
// IsBlocked returns true if a command is blocked
|
||||
func (p *CommandPolicy) IsBlocked(command string) bool {
|
||||
return p.Evaluate(command) == PolicyBlock
|
||||
}
|
||||
|
||||
// NeedsApproval returns true if a command needs user approval
|
||||
func (p *CommandPolicy) NeedsApproval(command string) bool {
|
||||
return p.Evaluate(command) == PolicyRequireApproval
|
||||
}
|
||||
|
||||
// IsAutoApproved returns true if a command can run without approval
|
||||
func (p *CommandPolicy) IsAutoApproved(command string) bool {
|
||||
return p.Evaluate(command) == PolicyAllow
|
||||
}
|
||||
360
internal/agentexec/server.go
Normal file
360
internal/agentexec/server.go
Normal file
@@ -0,0 +1,360 @@
|
||||
package agentexec
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/websocket"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
var upgrader = websocket.Upgrader{
|
||||
ReadBufferSize: 1024,
|
||||
WriteBufferSize: 1024,
|
||||
CheckOrigin: func(r *http.Request) bool {
|
||||
return true // Agents can connect from anywhere
|
||||
},
|
||||
}
|
||||
|
||||
// Server manages WebSocket connections from agents
|
||||
type Server struct {
|
||||
mu sync.RWMutex
|
||||
agents map[string]*agentConn // agentID -> connection
|
||||
pendingReqs map[string]chan CommandResultPayload // requestID -> response channel
|
||||
validateToken func(token string) bool
|
||||
}
|
||||
|
||||
type agentConn struct {
|
||||
conn *websocket.Conn
|
||||
agent ConnectedAgent
|
||||
writeMu sync.Mutex
|
||||
done chan struct{}
|
||||
}
|
||||
|
||||
// NewServer creates a new agent execution server
|
||||
func NewServer(validateToken func(token string) bool) *Server {
|
||||
return &Server{
|
||||
agents: make(map[string]*agentConn),
|
||||
pendingReqs: make(map[string]chan CommandResultPayload),
|
||||
validateToken: validateToken,
|
||||
}
|
||||
}
|
||||
|
||||
// HandleWebSocket handles incoming WebSocket connections from agents
|
||||
func (s *Server) HandleWebSocket(w http.ResponseWriter, r *http.Request) {
|
||||
conn, err := upgrader.Upgrade(w, r, nil)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("Failed to upgrade WebSocket connection")
|
||||
return
|
||||
}
|
||||
|
||||
// Read first message (must be agent_register)
|
||||
conn.SetReadDeadline(time.Now().Add(30 * time.Second))
|
||||
_, msgBytes, err := conn.ReadMessage()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("Failed to read registration message")
|
||||
conn.Close()
|
||||
return
|
||||
}
|
||||
|
||||
var msg Message
|
||||
if err := json.Unmarshal(msgBytes, &msg); err != nil {
|
||||
log.Error().Err(err).Msg("Failed to parse registration message")
|
||||
conn.Close()
|
||||
return
|
||||
}
|
||||
|
||||
if msg.Type != MsgTypeAgentRegister {
|
||||
log.Error().Str("type", string(msg.Type)).Msg("First message must be agent_register")
|
||||
conn.Close()
|
||||
return
|
||||
}
|
||||
|
||||
// Parse registration payload
|
||||
payloadBytes, err := json.Marshal(msg.Payload)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("Failed to marshal registration payload")
|
||||
conn.Close()
|
||||
return
|
||||
}
|
||||
|
||||
var reg AgentRegisterPayload
|
||||
if err := json.Unmarshal(payloadBytes, ®); err != nil {
|
||||
log.Error().Err(err).Msg("Failed to parse registration payload")
|
||||
conn.Close()
|
||||
return
|
||||
}
|
||||
|
||||
// Validate token
|
||||
if s.validateToken != nil && !s.validateToken(reg.Token) {
|
||||
log.Warn().Str("agent_id", reg.AgentID).Msg("Agent registration rejected: invalid token")
|
||||
s.sendMessage(conn, Message{
|
||||
Type: MsgTypeRegistered,
|
||||
Timestamp: time.Now(),
|
||||
Payload: RegisteredPayload{Success: false, Message: "Invalid token"},
|
||||
})
|
||||
conn.Close()
|
||||
return
|
||||
}
|
||||
|
||||
// Create agent connection
|
||||
ac := &agentConn{
|
||||
conn: conn,
|
||||
agent: ConnectedAgent{
|
||||
AgentID: reg.AgentID,
|
||||
Hostname: reg.Hostname,
|
||||
Version: reg.Version,
|
||||
Platform: reg.Platform,
|
||||
Tags: reg.Tags,
|
||||
ConnectedAt: time.Now(),
|
||||
},
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
|
||||
// Register agent
|
||||
s.mu.Lock()
|
||||
// Close existing connection if any
|
||||
if existing, ok := s.agents[reg.AgentID]; ok {
|
||||
close(existing.done)
|
||||
existing.conn.Close()
|
||||
}
|
||||
s.agents[reg.AgentID] = ac
|
||||
s.mu.Unlock()
|
||||
|
||||
log.Info().
|
||||
Str("agent_id", reg.AgentID).
|
||||
Str("hostname", reg.Hostname).
|
||||
Str("version", reg.Version).
|
||||
Str("platform", reg.Platform).
|
||||
Msg("Agent connected")
|
||||
|
||||
// Send registration success
|
||||
s.sendMessage(conn, Message{
|
||||
Type: MsgTypeRegistered,
|
||||
Timestamp: time.Now(),
|
||||
Payload: RegisteredPayload{Success: true, Message: "Registered"},
|
||||
})
|
||||
|
||||
// Clear deadline for normal operation
|
||||
conn.SetReadDeadline(time.Time{})
|
||||
|
||||
// Run read loop (blocking) - don't use goroutine, or HTTP handler will close connection
|
||||
s.readLoop(ac)
|
||||
}
|
||||
|
||||
func (s *Server) readLoop(ac *agentConn) {
|
||||
defer func() {
|
||||
s.mu.Lock()
|
||||
if existing, ok := s.agents[ac.agent.AgentID]; ok && existing == ac {
|
||||
delete(s.agents, ac.agent.AgentID)
|
||||
}
|
||||
s.mu.Unlock()
|
||||
ac.conn.Close()
|
||||
log.Info().Str("agent_id", ac.agent.AgentID).Msg("Agent disconnected")
|
||||
}()
|
||||
|
||||
log.Debug().Str("agent_id", ac.agent.AgentID).Msg("Starting read loop for agent")
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ac.done:
|
||||
log.Debug().Str("agent_id", ac.agent.AgentID).Msg("Read loop exiting: done channel closed")
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
_, msgBytes, err := ac.conn.ReadMessage()
|
||||
if err != nil {
|
||||
log.Debug().Err(err).Str("agent_id", ac.agent.AgentID).Msg("Read loop exiting: read error")
|
||||
if websocket.IsUnexpectedCloseError(err, websocket.CloseGoingAway, websocket.CloseAbnormalClosure) {
|
||||
log.Error().Err(err).Str("agent_id", ac.agent.AgentID).Msg("WebSocket read error")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var msg Message
|
||||
if err := json.Unmarshal(msgBytes, &msg); err != nil {
|
||||
log.Error().Err(err).Str("agent_id", ac.agent.AgentID).Msg("Failed to parse message")
|
||||
continue
|
||||
}
|
||||
|
||||
switch msg.Type {
|
||||
case MsgTypeAgentPing:
|
||||
s.sendMessage(ac.conn, Message{
|
||||
Type: MsgTypePong,
|
||||
Timestamp: time.Now(),
|
||||
})
|
||||
|
||||
case MsgTypeCommandResult:
|
||||
payloadBytes, _ := json.Marshal(msg.Payload)
|
||||
var result CommandResultPayload
|
||||
if err := json.Unmarshal(payloadBytes, &result); err != nil {
|
||||
log.Error().Err(err).Msg("Failed to parse command result")
|
||||
continue
|
||||
}
|
||||
|
||||
s.mu.RLock()
|
||||
ch, ok := s.pendingReqs[result.RequestID]
|
||||
s.mu.RUnlock()
|
||||
|
||||
if ok {
|
||||
select {
|
||||
case ch <- result:
|
||||
default:
|
||||
log.Warn().Str("request_id", result.RequestID).Msg("Result channel full, dropping")
|
||||
}
|
||||
} else {
|
||||
log.Warn().Str("request_id", result.RequestID).Msg("No pending request for result")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) sendMessage(conn *websocket.Conn, msg Message) error {
|
||||
msgBytes, err := json.Marshal(msg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return conn.WriteMessage(websocket.TextMessage, msgBytes)
|
||||
}
|
||||
|
||||
// ExecuteCommand sends a command to an agent and waits for the result
|
||||
func (s *Server) ExecuteCommand(ctx context.Context, agentID string, cmd ExecuteCommandPayload) (*CommandResultPayload, error) {
|
||||
s.mu.RLock()
|
||||
ac, ok := s.agents[agentID]
|
||||
s.mu.RUnlock()
|
||||
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("agent %s not connected", agentID)
|
||||
}
|
||||
|
||||
// Create response channel
|
||||
respCh := make(chan CommandResultPayload, 1)
|
||||
s.mu.Lock()
|
||||
s.pendingReqs[cmd.RequestID] = respCh
|
||||
s.mu.Unlock()
|
||||
|
||||
defer func() {
|
||||
s.mu.Lock()
|
||||
delete(s.pendingReqs, cmd.RequestID)
|
||||
s.mu.Unlock()
|
||||
}()
|
||||
|
||||
// Send command
|
||||
msg := Message{
|
||||
Type: MsgTypeExecuteCmd,
|
||||
ID: cmd.RequestID,
|
||||
Timestamp: time.Now(),
|
||||
Payload: cmd,
|
||||
}
|
||||
|
||||
ac.writeMu.Lock()
|
||||
err := s.sendMessage(ac.conn, msg)
|
||||
ac.writeMu.Unlock()
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to send command: %w", err)
|
||||
}
|
||||
|
||||
// Wait for result
|
||||
timeout := time.Duration(cmd.Timeout) * time.Second
|
||||
if timeout <= 0 {
|
||||
timeout = 60 * time.Second
|
||||
}
|
||||
|
||||
select {
|
||||
case result := <-respCh:
|
||||
return &result, nil
|
||||
case <-time.After(timeout):
|
||||
return nil, fmt.Errorf("command timed out after %v", timeout)
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
// ReadFile reads a file from an agent
|
||||
func (s *Server) ReadFile(ctx context.Context, agentID string, req ReadFilePayload) (*CommandResultPayload, error) {
|
||||
s.mu.RLock()
|
||||
ac, ok := s.agents[agentID]
|
||||
s.mu.RUnlock()
|
||||
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("agent %s not connected", agentID)
|
||||
}
|
||||
|
||||
// Create response channel
|
||||
respCh := make(chan CommandResultPayload, 1)
|
||||
s.mu.Lock()
|
||||
s.pendingReqs[req.RequestID] = respCh
|
||||
s.mu.Unlock()
|
||||
|
||||
defer func() {
|
||||
s.mu.Lock()
|
||||
delete(s.pendingReqs, req.RequestID)
|
||||
s.mu.Unlock()
|
||||
}()
|
||||
|
||||
// Send request
|
||||
msg := Message{
|
||||
Type: MsgTypeReadFile,
|
||||
ID: req.RequestID,
|
||||
Timestamp: time.Now(),
|
||||
Payload: req,
|
||||
}
|
||||
|
||||
ac.writeMu.Lock()
|
||||
err := s.sendMessage(ac.conn, msg)
|
||||
ac.writeMu.Unlock()
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to send read_file request: %w", err)
|
||||
}
|
||||
|
||||
// Wait for result
|
||||
timeout := 30 * time.Second
|
||||
select {
|
||||
case result := <-respCh:
|
||||
return &result, nil
|
||||
case <-time.After(timeout):
|
||||
return nil, fmt.Errorf("read_file timed out after %v", timeout)
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
// GetConnectedAgents returns a list of currently connected agents
|
||||
func (s *Server) GetConnectedAgents() []ConnectedAgent {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
|
||||
agents := make([]ConnectedAgent, 0, len(s.agents))
|
||||
for _, ac := range s.agents {
|
||||
agents = append(agents, ac.agent)
|
||||
}
|
||||
return agents
|
||||
}
|
||||
|
||||
// IsAgentConnected checks if an agent is currently connected
|
||||
func (s *Server) IsAgentConnected(agentID string) bool {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
_, ok := s.agents[agentID]
|
||||
return ok
|
||||
}
|
||||
|
||||
// GetAgentForHost finds the agent for a given hostname
|
||||
func (s *Server) GetAgentForHost(hostname string) (string, bool) {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
|
||||
for _, ac := range s.agents {
|
||||
if ac.agent.Hostname == hostname {
|
||||
return ac.agent.AgentID, true
|
||||
}
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
84
internal/agentexec/types.go
Normal file
84
internal/agentexec/types.go
Normal file
@@ -0,0 +1,84 @@
|
||||
package agentexec
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// MessageType identifies the type of WebSocket message
|
||||
type MessageType string
|
||||
|
||||
const (
|
||||
// Agent -> Server messages
|
||||
MsgTypeAgentRegister MessageType = "agent_register"
|
||||
MsgTypeAgentPing MessageType = "agent_ping"
|
||||
MsgTypeCommandResult MessageType = "command_result"
|
||||
|
||||
// Server -> Agent messages
|
||||
MsgTypeRegistered MessageType = "registered"
|
||||
MsgTypePong MessageType = "pong"
|
||||
MsgTypeExecuteCmd MessageType = "execute_command"
|
||||
MsgTypeReadFile MessageType = "read_file"
|
||||
)
|
||||
|
||||
// Message is the envelope for all WebSocket messages
|
||||
type Message struct {
|
||||
Type MessageType `json:"type"`
|
||||
ID string `json:"id,omitempty"` // Unique message ID for request/response correlation
|
||||
Timestamp time.Time `json:"timestamp"`
|
||||
Payload interface{} `json:"payload,omitempty"`
|
||||
}
|
||||
|
||||
// AgentRegisterPayload is sent by agent on connection
|
||||
type AgentRegisterPayload struct {
|
||||
AgentID string `json:"agent_id"`
|
||||
Hostname string `json:"hostname"`
|
||||
Version string `json:"version"`
|
||||
Platform string `json:"platform"` // "linux", "windows", "darwin"
|
||||
Tags []string `json:"tags,omitempty"`
|
||||
Token string `json:"token"` // API token for authentication
|
||||
}
|
||||
|
||||
// RegisteredPayload is sent by server after successful registration
|
||||
type RegisteredPayload struct {
|
||||
Success bool `json:"success"`
|
||||
Message string `json:"message,omitempty"`
|
||||
}
|
||||
|
||||
// ExecuteCommandPayload is sent by server to request command execution
|
||||
type ExecuteCommandPayload struct {
|
||||
RequestID string `json:"request_id"`
|
||||
Command string `json:"command"`
|
||||
TargetType string `json:"target_type"` // "host", "container", "vm"
|
||||
TargetID string `json:"target_id,omitempty"` // VMID for container/VM
|
||||
Timeout int `json:"timeout,omitempty"` // seconds, 0 = default
|
||||
}
|
||||
|
||||
// ReadFilePayload is sent by server to request file content
|
||||
type ReadFilePayload struct {
|
||||
RequestID string `json:"request_id"`
|
||||
Path string `json:"path"`
|
||||
TargetType string `json:"target_type"`
|
||||
TargetID string `json:"target_id,omitempty"`
|
||||
MaxBytes int64 `json:"max_bytes,omitempty"` // 0 = default (1MB)
|
||||
}
|
||||
|
||||
// CommandResultPayload is sent by agent with command execution result
|
||||
type CommandResultPayload struct {
|
||||
RequestID string `json:"request_id"`
|
||||
Success bool `json:"success"`
|
||||
Stdout string `json:"stdout,omitempty"`
|
||||
Stderr string `json:"stderr,omitempty"`
|
||||
ExitCode int `json:"exit_code"`
|
||||
Error string `json:"error,omitempty"`
|
||||
Duration int64 `json:"duration_ms"`
|
||||
}
|
||||
|
||||
// ConnectedAgent represents an agent connected via WebSocket
|
||||
type ConnectedAgent struct {
|
||||
AgentID string
|
||||
Hostname string
|
||||
Version string
|
||||
Platform string
|
||||
Tags []string
|
||||
ConnectedAt time.Time
|
||||
}
|
||||
19
internal/ai/config.go
Normal file
19
internal/ai/config.go
Normal file
@@ -0,0 +1,19 @@
|
||||
// Package ai provides AI-powered infrastructure investigation and remediation.
|
||||
package ai
|
||||
|
||||
import "github.com/rcourtman/pulse-go-rewrite/internal/config"
|
||||
|
||||
// Re-export config types for convenience
|
||||
type Config = config.AIConfig
|
||||
|
||||
// Provider constants (re-exported for convenience)
|
||||
const (
|
||||
ProviderAnthropic = config.AIProviderAnthropic
|
||||
ProviderOpenAI = config.AIProviderOpenAI
|
||||
ProviderOllama = config.AIProviderOllama
|
||||
)
|
||||
|
||||
// NewDefaultConfig returns a new AI config with sensible defaults
|
||||
func NewDefaultConfig() *Config {
|
||||
return config.NewDefaultAIConfig()
|
||||
}
|
||||
267
internal/ai/providers/anthropic.go
Normal file
267
internal/ai/providers/anthropic.go
Normal file
@@ -0,0 +1,267 @@
|
||||
package providers
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
anthropicAPIURL = "https://api.anthropic.com/v1/messages"
|
||||
anthropicAPIVersion = "2023-06-01"
|
||||
)
|
||||
|
||||
// AnthropicClient implements the Provider interface for Anthropic's Claude API
|
||||
type AnthropicClient struct {
|
||||
apiKey string
|
||||
model string
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
// NewAnthropicClient creates a new Anthropic API client
|
||||
func NewAnthropicClient(apiKey, model string) *AnthropicClient {
|
||||
return &AnthropicClient{
|
||||
apiKey: apiKey,
|
||||
model: model,
|
||||
client: &http.Client{
|
||||
Timeout: 120 * time.Second, // LLM responses can take a while
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Name returns the provider name
|
||||
func (c *AnthropicClient) Name() string {
|
||||
return "anthropic"
|
||||
}
|
||||
|
||||
// anthropicRequest is the request body for the Anthropic API
|
||||
type anthropicRequest struct {
|
||||
Model string `json:"model"`
|
||||
Messages []anthropicMessage `json:"messages"`
|
||||
MaxTokens int `json:"max_tokens"`
|
||||
System string `json:"system,omitempty"`
|
||||
Temperature float64 `json:"temperature,omitempty"`
|
||||
Tools []anthropicTool `json:"tools,omitempty"`
|
||||
}
|
||||
|
||||
type anthropicMessage struct {
|
||||
Role string `json:"role"`
|
||||
Content interface{} `json:"content"` // Can be string or []anthropicContent
|
||||
}
|
||||
|
||||
type anthropicTool struct {
|
||||
Name string `json:"name"`
|
||||
Description string `json:"description"`
|
||||
InputSchema map[string]interface{} `json:"input_schema"`
|
||||
}
|
||||
|
||||
// anthropicResponse is the response from the Anthropic API
|
||||
type anthropicResponse struct {
|
||||
ID string `json:"id"`
|
||||
Type string `json:"type"`
|
||||
Role string `json:"role"`
|
||||
Content []anthropicContent `json:"content"`
|
||||
Model string `json:"model"`
|
||||
StopReason string `json:"stop_reason"`
|
||||
StopSequence string `json:"stop_sequence,omitempty"`
|
||||
Usage anthropicUsage `json:"usage"`
|
||||
}
|
||||
|
||||
type anthropicContent struct {
|
||||
Type string `json:"type"` // "text" or "tool_use" or "tool_result"
|
||||
Text string `json:"text,omitempty"`
|
||||
ID string `json:"id,omitempty"` // For tool_use
|
||||
Name string `json:"name,omitempty"` // For tool_use
|
||||
Input map[string]interface{} `json:"input,omitempty"` // For tool_use
|
||||
ToolUseID string `json:"tool_use_id,omitempty"` // For tool_result
|
||||
Content string `json:"content,omitempty"` // For tool_result (when it's a string)
|
||||
IsError bool `json:"is_error,omitempty"` // For tool_result
|
||||
}
|
||||
|
||||
type anthropicUsage struct {
|
||||
InputTokens int `json:"input_tokens"`
|
||||
OutputTokens int `json:"output_tokens"`
|
||||
}
|
||||
|
||||
type anthropicError struct {
|
||||
Type string `json:"type"`
|
||||
Error anthropicErrorDetail `json:"error"`
|
||||
}
|
||||
|
||||
type anthropicErrorDetail struct {
|
||||
Type string `json:"type"`
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
// Chat sends a chat request to the Anthropic API
|
||||
func (c *AnthropicClient) Chat(ctx context.Context, req ChatRequest) (*ChatResponse, error) {
|
||||
// Convert messages to Anthropic format
|
||||
messages := make([]anthropicMessage, 0, len(req.Messages))
|
||||
for _, m := range req.Messages {
|
||||
// Anthropic doesn't use "system" role in messages array
|
||||
if m.Role == "system" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Handle tool results specially
|
||||
if m.ToolResult != nil {
|
||||
// Tool result message
|
||||
messages = append(messages, anthropicMessage{
|
||||
Role: "user",
|
||||
Content: []anthropicContent{
|
||||
{
|
||||
Type: "tool_result",
|
||||
ToolUseID: m.ToolResult.ToolUseID,
|
||||
Content: m.ToolResult.Content,
|
||||
IsError: m.ToolResult.IsError,
|
||||
},
|
||||
},
|
||||
})
|
||||
continue
|
||||
}
|
||||
|
||||
// Handle assistant messages with tool calls
|
||||
if m.Role == "assistant" && len(m.ToolCalls) > 0 {
|
||||
contentBlocks := make([]anthropicContent, 0)
|
||||
if m.Content != "" {
|
||||
contentBlocks = append(contentBlocks, anthropicContent{
|
||||
Type: "text",
|
||||
Text: m.Content,
|
||||
})
|
||||
}
|
||||
for _, tc := range m.ToolCalls {
|
||||
contentBlocks = append(contentBlocks, anthropicContent{
|
||||
Type: "tool_use",
|
||||
ID: tc.ID,
|
||||
Name: tc.Name,
|
||||
Input: tc.Input,
|
||||
})
|
||||
}
|
||||
messages = append(messages, anthropicMessage{
|
||||
Role: "assistant",
|
||||
Content: contentBlocks,
|
||||
})
|
||||
continue
|
||||
}
|
||||
|
||||
// Simple text message
|
||||
messages = append(messages, anthropicMessage{
|
||||
Role: m.Role,
|
||||
Content: m.Content,
|
||||
})
|
||||
}
|
||||
|
||||
// Use provided model or fall back to client default
|
||||
model := req.Model
|
||||
if model == "" {
|
||||
model = c.model
|
||||
}
|
||||
|
||||
// Set max tokens (Anthropic requires this)
|
||||
maxTokens := req.MaxTokens
|
||||
if maxTokens == 0 {
|
||||
maxTokens = 4096
|
||||
}
|
||||
|
||||
anthropicReq := anthropicRequest{
|
||||
Model: model,
|
||||
Messages: messages,
|
||||
MaxTokens: maxTokens,
|
||||
System: req.System,
|
||||
}
|
||||
|
||||
if req.Temperature > 0 {
|
||||
anthropicReq.Temperature = req.Temperature
|
||||
}
|
||||
|
||||
// Add tools if provided
|
||||
if len(req.Tools) > 0 {
|
||||
anthropicReq.Tools = make([]anthropicTool, len(req.Tools))
|
||||
for i, t := range req.Tools {
|
||||
anthropicReq.Tools[i] = anthropicTool{
|
||||
Name: t.Name,
|
||||
Description: t.Description,
|
||||
InputSchema: t.InputSchema,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
body, err := json.Marshal(anthropicReq)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal request: %w", err)
|
||||
}
|
||||
|
||||
httpReq, err := http.NewRequestWithContext(ctx, "POST", anthropicAPIURL, bytes.NewReader(body))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create request: %w", err)
|
||||
}
|
||||
|
||||
httpReq.Header.Set("Content-Type", "application/json")
|
||||
httpReq.Header.Set("x-api-key", c.apiKey)
|
||||
httpReq.Header.Set("anthropic-version", anthropicAPIVersion)
|
||||
|
||||
resp, err := c.client.Do(httpReq)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("request failed: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
respBody, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read response: %w", err)
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
var errResp anthropicError
|
||||
if err := json.Unmarshal(respBody, &errResp); err == nil && errResp.Error.Message != "" {
|
||||
return nil, fmt.Errorf("API error (%d): %s", resp.StatusCode, errResp.Error.Message)
|
||||
}
|
||||
return nil, fmt.Errorf("API error (%d): %s", resp.StatusCode, string(respBody))
|
||||
}
|
||||
|
||||
var anthropicResp anthropicResponse
|
||||
if err := json.Unmarshal(respBody, &anthropicResp); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse response: %w", err)
|
||||
}
|
||||
|
||||
// Extract content and tool calls from response
|
||||
var textContent string
|
||||
var toolCalls []ToolCall
|
||||
for _, c := range anthropicResp.Content {
|
||||
switch c.Type {
|
||||
case "text":
|
||||
textContent += c.Text
|
||||
case "tool_use":
|
||||
toolCalls = append(toolCalls, ToolCall{
|
||||
ID: c.ID,
|
||||
Name: c.Name,
|
||||
Input: c.Input,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return &ChatResponse{
|
||||
Content: textContent,
|
||||
Model: anthropicResp.Model,
|
||||
StopReason: anthropicResp.StopReason,
|
||||
ToolCalls: toolCalls,
|
||||
InputTokens: anthropicResp.Usage.InputTokens,
|
||||
OutputTokens: anthropicResp.Usage.OutputTokens,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// TestConnection validates the API key by making a minimal request
|
||||
func (c *AnthropicClient) TestConnection(ctx context.Context) error {
|
||||
// Make a minimal request to validate the API key
|
||||
_, err := c.Chat(ctx, ChatRequest{
|
||||
Messages: []Message{
|
||||
{Role: "user", Content: "Hi"},
|
||||
},
|
||||
MaxTokens: 10, // Minimal tokens to save cost
|
||||
})
|
||||
return err
|
||||
}
|
||||
38
internal/ai/providers/factory.go
Normal file
38
internal/ai/providers/factory.go
Normal file
@@ -0,0 +1,38 @@
|
||||
package providers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/rcourtman/pulse-go-rewrite/internal/config"
|
||||
)
|
||||
|
||||
// NewFromConfig creates a Provider based on the AIConfig settings
|
||||
func NewFromConfig(cfg *config.AIConfig) (Provider, error) {
|
||||
if cfg == nil {
|
||||
return nil, fmt.Errorf("AI config is nil")
|
||||
}
|
||||
|
||||
if !cfg.Enabled {
|
||||
return nil, fmt.Errorf("AI is not enabled")
|
||||
}
|
||||
|
||||
switch cfg.Provider {
|
||||
case config.AIProviderAnthropic:
|
||||
if cfg.APIKey == "" {
|
||||
return nil, fmt.Errorf("Anthropic API key is required")
|
||||
}
|
||||
return NewAnthropicClient(cfg.APIKey, cfg.GetModel()), nil
|
||||
|
||||
case config.AIProviderOpenAI:
|
||||
if cfg.APIKey == "" {
|
||||
return nil, fmt.Errorf("OpenAI API key is required")
|
||||
}
|
||||
return NewOpenAIClient(cfg.APIKey, cfg.GetModel(), cfg.GetBaseURL()), nil
|
||||
|
||||
case config.AIProviderOllama:
|
||||
return NewOllamaClient(cfg.GetModel(), cfg.GetBaseURL()), nil
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown provider: %s", cfg.Provider)
|
||||
}
|
||||
}
|
||||
173
internal/ai/providers/ollama.go
Normal file
173
internal/ai/providers/ollama.go
Normal file
@@ -0,0 +1,173 @@
|
||||
package providers
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
// OllamaClient implements the Provider interface for Ollama's local API
|
||||
type OllamaClient struct {
|
||||
model string
|
||||
baseURL string
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
// NewOllamaClient creates a new Ollama API client
|
||||
func NewOllamaClient(model, baseURL string) *OllamaClient {
|
||||
if baseURL == "" {
|
||||
baseURL = "http://localhost:11434"
|
||||
}
|
||||
return &OllamaClient{
|
||||
model: model,
|
||||
baseURL: baseURL,
|
||||
client: &http.Client{
|
||||
Timeout: 300 * time.Second, // Local models can be slow
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Name returns the provider name
|
||||
func (c *OllamaClient) Name() string {
|
||||
return "ollama"
|
||||
}
|
||||
|
||||
// ollamaRequest is the request body for the Ollama API
|
||||
type ollamaRequest struct {
|
||||
Model string `json:"model"`
|
||||
Messages []ollamaMessage `json:"messages"`
|
||||
Stream bool `json:"stream"`
|
||||
Options *ollamaOptions `json:"options,omitempty"`
|
||||
}
|
||||
|
||||
type ollamaMessage struct {
|
||||
Role string `json:"role"`
|
||||
Content string `json:"content"`
|
||||
}
|
||||
|
||||
type ollamaOptions struct {
|
||||
NumPredict int `json:"num_predict,omitempty"`
|
||||
Temperature float64 `json:"temperature,omitempty"`
|
||||
}
|
||||
|
||||
// ollamaResponse is the response from the Ollama API
|
||||
type ollamaResponse struct {
|
||||
Model string `json:"model"`
|
||||
CreatedAt string `json:"created_at"`
|
||||
Message ollamaMessage `json:"message"`
|
||||
Done bool `json:"done"`
|
||||
DoneReason string `json:"done_reason,omitempty"`
|
||||
TotalDuration int64 `json:"total_duration,omitempty"`
|
||||
LoadDuration int64 `json:"load_duration,omitempty"`
|
||||
PromptEvalCount int `json:"prompt_eval_count,omitempty"`
|
||||
EvalCount int `json:"eval_count,omitempty"`
|
||||
}
|
||||
|
||||
// Chat sends a chat request to the Ollama API
|
||||
func (c *OllamaClient) Chat(ctx context.Context, req ChatRequest) (*ChatResponse, error) {
|
||||
// Convert messages to Ollama format
|
||||
messages := make([]ollamaMessage, 0, len(req.Messages)+1)
|
||||
|
||||
// Add system message if provided
|
||||
if req.System != "" {
|
||||
messages = append(messages, ollamaMessage{
|
||||
Role: "system",
|
||||
Content: req.System,
|
||||
})
|
||||
}
|
||||
|
||||
for _, m := range req.Messages {
|
||||
messages = append(messages, ollamaMessage{
|
||||
Role: m.Role,
|
||||
Content: m.Content,
|
||||
})
|
||||
}
|
||||
|
||||
// Use provided model or fall back to client default
|
||||
model := req.Model
|
||||
if model == "" {
|
||||
model = c.model
|
||||
}
|
||||
|
||||
ollamaReq := ollamaRequest{
|
||||
Model: model,
|
||||
Messages: messages,
|
||||
Stream: false, // Non-streaming for now
|
||||
}
|
||||
|
||||
if req.MaxTokens > 0 || req.Temperature > 0 {
|
||||
ollamaReq.Options = &ollamaOptions{}
|
||||
if req.MaxTokens > 0 {
|
||||
ollamaReq.Options.NumPredict = req.MaxTokens
|
||||
}
|
||||
if req.Temperature > 0 {
|
||||
ollamaReq.Options.Temperature = req.Temperature
|
||||
}
|
||||
}
|
||||
|
||||
body, err := json.Marshal(ollamaReq)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal request: %w", err)
|
||||
}
|
||||
|
||||
url := c.baseURL + "/api/chat"
|
||||
httpReq, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewReader(body))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create request: %w", err)
|
||||
}
|
||||
|
||||
httpReq.Header.Set("Content-Type", "application/json")
|
||||
|
||||
resp, err := c.client.Do(httpReq)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("request failed: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
respBody, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read response: %w", err)
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("API error (%d): %s", resp.StatusCode, string(respBody))
|
||||
}
|
||||
|
||||
var ollamaResp ollamaResponse
|
||||
if err := json.Unmarshal(respBody, &ollamaResp); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse response: %w", err)
|
||||
}
|
||||
|
||||
return &ChatResponse{
|
||||
Content: ollamaResp.Message.Content,
|
||||
Model: ollamaResp.Model,
|
||||
StopReason: ollamaResp.DoneReason,
|
||||
InputTokens: ollamaResp.PromptEvalCount,
|
||||
OutputTokens: ollamaResp.EvalCount,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// TestConnection validates connectivity by checking the Ollama version endpoint
|
||||
func (c *OllamaClient) TestConnection(ctx context.Context) error {
|
||||
url := c.baseURL + "/api/version"
|
||||
httpReq, err := http.NewRequestWithContext(ctx, "GET", url, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create request: %w", err)
|
||||
}
|
||||
|
||||
resp, err := c.client.Do(httpReq)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to connect to Ollama at %s: %w", c.baseURL, err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("Ollama returned status %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
188
internal/ai/providers/openai.go
Normal file
188
internal/ai/providers/openai.go
Normal file
@@ -0,0 +1,188 @@
|
||||
package providers
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
openaiAPIURL = "https://api.openai.com/v1/chat/completions"
|
||||
)
|
||||
|
||||
// OpenAIClient implements the Provider interface for OpenAI's API
|
||||
type OpenAIClient struct {
|
||||
apiKey string
|
||||
model string
|
||||
baseURL string
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
// NewOpenAIClient creates a new OpenAI API client
|
||||
func NewOpenAIClient(apiKey, model, baseURL string) *OpenAIClient {
|
||||
if baseURL == "" {
|
||||
baseURL = openaiAPIURL
|
||||
}
|
||||
return &OpenAIClient{
|
||||
apiKey: apiKey,
|
||||
model: model,
|
||||
baseURL: baseURL,
|
||||
client: &http.Client{
|
||||
Timeout: 120 * time.Second,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Name returns the provider name
|
||||
func (c *OpenAIClient) Name() string {
|
||||
return "openai"
|
||||
}
|
||||
|
||||
// openaiRequest is the request body for the OpenAI API
|
||||
type openaiRequest struct {
|
||||
Model string `json:"model"`
|
||||
Messages []openaiMessage `json:"messages"`
|
||||
MaxTokens int `json:"max_tokens,omitempty"`
|
||||
Temperature float64 `json:"temperature,omitempty"`
|
||||
}
|
||||
|
||||
type openaiMessage struct {
|
||||
Role string `json:"role"`
|
||||
Content string `json:"content"`
|
||||
}
|
||||
|
||||
// openaiResponse is the response from the OpenAI API
|
||||
type openaiResponse struct {
|
||||
ID string `json:"id"`
|
||||
Object string `json:"object"`
|
||||
Created int64 `json:"created"`
|
||||
Model string `json:"model"`
|
||||
Choices []openaiChoice `json:"choices"`
|
||||
Usage openaiUsage `json:"usage"`
|
||||
}
|
||||
|
||||
type openaiChoice struct {
|
||||
Index int `json:"index"`
|
||||
Message openaiMessage `json:"message"`
|
||||
FinishReason string `json:"finish_reason"`
|
||||
}
|
||||
|
||||
type openaiUsage struct {
|
||||
PromptTokens int `json:"prompt_tokens"`
|
||||
CompletionTokens int `json:"completion_tokens"`
|
||||
TotalTokens int `json:"total_tokens"`
|
||||
}
|
||||
|
||||
type openaiError struct {
|
||||
Error openaiErrorDetail `json:"error"`
|
||||
}
|
||||
|
||||
type openaiErrorDetail struct {
|
||||
Message string `json:"message"`
|
||||
Type string `json:"type"`
|
||||
Code string `json:"code"`
|
||||
}
|
||||
|
||||
// Chat sends a chat request to the OpenAI API
|
||||
func (c *OpenAIClient) Chat(ctx context.Context, req ChatRequest) (*ChatResponse, error) {
|
||||
// Convert messages to OpenAI format
|
||||
messages := make([]openaiMessage, 0, len(req.Messages)+1)
|
||||
|
||||
// Add system message if provided
|
||||
if req.System != "" {
|
||||
messages = append(messages, openaiMessage{
|
||||
Role: "system",
|
||||
Content: req.System,
|
||||
})
|
||||
}
|
||||
|
||||
for _, m := range req.Messages {
|
||||
messages = append(messages, openaiMessage{
|
||||
Role: m.Role,
|
||||
Content: m.Content,
|
||||
})
|
||||
}
|
||||
|
||||
// Use provided model or fall back to client default
|
||||
model := req.Model
|
||||
if model == "" {
|
||||
model = c.model
|
||||
}
|
||||
|
||||
openaiReq := openaiRequest{
|
||||
Model: model,
|
||||
Messages: messages,
|
||||
}
|
||||
|
||||
if req.MaxTokens > 0 {
|
||||
openaiReq.MaxTokens = req.MaxTokens
|
||||
}
|
||||
|
||||
if req.Temperature > 0 {
|
||||
openaiReq.Temperature = req.Temperature
|
||||
}
|
||||
|
||||
body, err := json.Marshal(openaiReq)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal request: %w", err)
|
||||
}
|
||||
|
||||
httpReq, err := http.NewRequestWithContext(ctx, "POST", c.baseURL, bytes.NewReader(body))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create request: %w", err)
|
||||
}
|
||||
|
||||
httpReq.Header.Set("Content-Type", "application/json")
|
||||
httpReq.Header.Set("Authorization", "Bearer "+c.apiKey)
|
||||
|
||||
resp, err := c.client.Do(httpReq)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("request failed: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
respBody, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read response: %w", err)
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
var errResp openaiError
|
||||
if err := json.Unmarshal(respBody, &errResp); err == nil && errResp.Error.Message != "" {
|
||||
return nil, fmt.Errorf("API error (%d): %s", resp.StatusCode, errResp.Error.Message)
|
||||
}
|
||||
return nil, fmt.Errorf("API error (%d): %s", resp.StatusCode, string(respBody))
|
||||
}
|
||||
|
||||
var openaiResp openaiResponse
|
||||
if err := json.Unmarshal(respBody, &openaiResp); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse response: %w", err)
|
||||
}
|
||||
|
||||
if len(openaiResp.Choices) == 0 {
|
||||
return nil, fmt.Errorf("no response choices returned")
|
||||
}
|
||||
|
||||
return &ChatResponse{
|
||||
Content: openaiResp.Choices[0].Message.Content,
|
||||
Model: openaiResp.Model,
|
||||
StopReason: openaiResp.Choices[0].FinishReason,
|
||||
InputTokens: openaiResp.Usage.PromptTokens,
|
||||
OutputTokens: openaiResp.Usage.CompletionTokens,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// TestConnection validates the API key by making a minimal request
|
||||
func (c *OpenAIClient) TestConnection(ctx context.Context) error {
|
||||
_, err := c.Chat(ctx, ChatRequest{
|
||||
Messages: []Message{
|
||||
{Role: "user", Content: "Hi"},
|
||||
},
|
||||
MaxTokens: 10,
|
||||
})
|
||||
return err
|
||||
}
|
||||
67
internal/ai/providers/provider.go
Normal file
67
internal/ai/providers/provider.go
Normal file
@@ -0,0 +1,67 @@
|
||||
// Package providers contains AI provider client implementations
|
||||
package providers
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
// Message represents a chat message
|
||||
type Message struct {
|
||||
Role string `json:"role"` // "user", "assistant", "system"
|
||||
Content string `json:"content"` // Text content (simple case)
|
||||
ToolCalls []ToolCall `json:"tool_calls,omitempty"` // For assistant messages with tool calls
|
||||
ToolResult *ToolResult `json:"tool_result,omitempty"` // For user messages with tool results
|
||||
}
|
||||
|
||||
// ToolCall represents a tool invocation from the AI
|
||||
type ToolCall struct {
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Input map[string]interface{} `json:"input"`
|
||||
}
|
||||
|
||||
// ToolResult represents the result of a tool execution
|
||||
type ToolResult struct {
|
||||
ToolUseID string `json:"tool_use_id"`
|
||||
Content string `json:"content"`
|
||||
IsError bool `json:"is_error,omitempty"`
|
||||
}
|
||||
|
||||
// Tool represents an AI tool definition
|
||||
type Tool struct {
|
||||
Name string `json:"name"`
|
||||
Description string `json:"description"`
|
||||
InputSchema map[string]interface{} `json:"input_schema"`
|
||||
}
|
||||
|
||||
// ChatRequest represents a request to the AI provider
|
||||
type ChatRequest struct {
|
||||
Messages []Message `json:"messages"`
|
||||
Model string `json:"model"`
|
||||
MaxTokens int `json:"max_tokens,omitempty"`
|
||||
Temperature float64 `json:"temperature,omitempty"`
|
||||
System string `json:"system,omitempty"` // System prompt (Anthropic style)
|
||||
Tools []Tool `json:"tools,omitempty"` // Available tools
|
||||
}
|
||||
|
||||
// ChatResponse represents a response from the AI provider
|
||||
type ChatResponse struct {
|
||||
Content string `json:"content"`
|
||||
Model string `json:"model"`
|
||||
StopReason string `json:"stop_reason,omitempty"` // "end_turn", "tool_use"
|
||||
ToolCalls []ToolCall `json:"tool_calls,omitempty"` // Tool invocations
|
||||
InputTokens int `json:"input_tokens,omitempty"`
|
||||
OutputTokens int `json:"output_tokens,omitempty"`
|
||||
}
|
||||
|
||||
// Provider defines the interface for AI providers
|
||||
type Provider interface {
|
||||
// Chat sends a chat request and returns the response
|
||||
Chat(ctx context.Context, req ChatRequest) (*ChatResponse, error)
|
||||
|
||||
// TestConnection validates the API key and connectivity
|
||||
TestConnection(ctx context.Context) error
|
||||
|
||||
// Name returns the provider name
|
||||
Name() string
|
||||
}
|
||||
1617
internal/ai/service.go
Normal file
1617
internal/ai/service.go
Normal file
File diff suppressed because it is too large
Load Diff
614
internal/api/ai_handlers.go
Normal file
614
internal/api/ai_handlers.go
Normal file
@@ -0,0 +1,614 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rcourtman/pulse-go-rewrite/internal/agentexec"
|
||||
"github.com/rcourtman/pulse-go-rewrite/internal/ai"
|
||||
"github.com/rcourtman/pulse-go-rewrite/internal/config"
|
||||
"github.com/rcourtman/pulse-go-rewrite/internal/utils"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
// AISettingsHandler handles AI settings endpoints
|
||||
type AISettingsHandler struct {
|
||||
config *config.Config
|
||||
persistence *config.ConfigPersistence
|
||||
aiService *ai.Service
|
||||
agentServer *agentexec.Server
|
||||
}
|
||||
|
||||
// NewAISettingsHandler creates a new AI settings handler
|
||||
func NewAISettingsHandler(cfg *config.Config, persistence *config.ConfigPersistence, agentServer *agentexec.Server) *AISettingsHandler {
|
||||
aiService := ai.NewService(persistence, agentServer)
|
||||
if err := aiService.LoadConfig(); err != nil {
|
||||
log.Warn().Err(err).Msg("Failed to load AI config on startup")
|
||||
}
|
||||
|
||||
return &AISettingsHandler{
|
||||
config: cfg,
|
||||
persistence: persistence,
|
||||
aiService: aiService,
|
||||
agentServer: agentServer,
|
||||
}
|
||||
}
|
||||
|
||||
// SetConfig updates the configuration reference used by the handler.
|
||||
func (h *AISettingsHandler) SetConfig(cfg *config.Config) {
|
||||
if cfg == nil {
|
||||
return
|
||||
}
|
||||
h.config = cfg
|
||||
}
|
||||
|
||||
// SetStateProvider sets the state provider for infrastructure context
|
||||
func (h *AISettingsHandler) SetStateProvider(sp ai.StateProvider) {
|
||||
h.aiService.SetStateProvider(sp)
|
||||
}
|
||||
|
||||
// AISettingsResponse is returned by GET /api/settings/ai
|
||||
// API key is masked for security
|
||||
type AISettingsResponse struct {
|
||||
Enabled bool `json:"enabled"`
|
||||
Provider string `json:"provider"`
|
||||
APIKeySet bool `json:"api_key_set"` // true if API key is configured (never expose actual key)
|
||||
Model string `json:"model"`
|
||||
BaseURL string `json:"base_url,omitempty"`
|
||||
Configured bool `json:"configured"` // true if AI is ready to use
|
||||
AutonomousMode bool `json:"autonomous_mode"` // true if AI can execute without approval
|
||||
CustomContext string `json:"custom_context"` // user-provided infrastructure context
|
||||
}
|
||||
|
||||
// AISettingsUpdateRequest is the request body for PUT /api/settings/ai
|
||||
type AISettingsUpdateRequest struct {
|
||||
Enabled *bool `json:"enabled,omitempty"`
|
||||
Provider *string `json:"provider,omitempty"`
|
||||
APIKey *string `json:"api_key,omitempty"` // empty string clears, null preserves
|
||||
Model *string `json:"model,omitempty"`
|
||||
BaseURL *string `json:"base_url,omitempty"`
|
||||
AutonomousMode *bool `json:"autonomous_mode,omitempty"`
|
||||
CustomContext *string `json:"custom_context,omitempty"` // user-provided infrastructure context
|
||||
}
|
||||
|
||||
// HandleGetAISettings returns the current AI settings (GET /api/settings/ai)
|
||||
func (h *AISettingsHandler) HandleGetAISettings(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != http.MethodGet {
|
||||
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
|
||||
settings, err := h.persistence.LoadAIConfig()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("Failed to load AI settings")
|
||||
http.Error(w, "Failed to load AI settings", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
if settings == nil {
|
||||
settings = config.NewDefaultAIConfig()
|
||||
}
|
||||
|
||||
response := AISettingsResponse{
|
||||
Enabled: settings.Enabled,
|
||||
Provider: settings.Provider,
|
||||
APIKeySet: settings.APIKey != "",
|
||||
Model: settings.GetModel(),
|
||||
BaseURL: settings.BaseURL,
|
||||
Configured: settings.IsConfigured(),
|
||||
AutonomousMode: settings.AutonomousMode,
|
||||
CustomContext: settings.CustomContext,
|
||||
}
|
||||
|
||||
if err := utils.WriteJSONResponse(w, response); err != nil {
|
||||
log.Error().Err(err).Msg("Failed to write AI settings response")
|
||||
}
|
||||
}
|
||||
|
||||
// HandleUpdateAISettings updates AI settings (PUT /api/settings/ai)
|
||||
func (h *AISettingsHandler) HandleUpdateAISettings(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != http.MethodPut && r.Method != http.MethodPost {
|
||||
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
|
||||
// Require admin authentication
|
||||
if !CheckAuth(h.config, w, r) {
|
||||
return
|
||||
}
|
||||
|
||||
// Check proxy auth admin status if applicable
|
||||
if h.config.ProxyAuthSecret != "" {
|
||||
if valid, username, isAdmin := CheckProxyAuth(h.config, r); valid && !isAdmin {
|
||||
log.Warn().
|
||||
Str("ip", r.RemoteAddr).
|
||||
Str("path", r.URL.Path).
|
||||
Str("method", r.Method).
|
||||
Str("username", username).
|
||||
Msg("Non-admin user attempted to update AI settings")
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusForbidden)
|
||||
_ = json.NewEncoder(w).Encode(map[string]string{"error": "Admin privileges required"})
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Load existing settings
|
||||
settings, err := h.persistence.LoadAIConfig()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("Failed to load existing AI settings")
|
||||
settings = config.NewDefaultAIConfig()
|
||||
}
|
||||
if settings == nil {
|
||||
settings = config.NewDefaultAIConfig()
|
||||
}
|
||||
|
||||
// Parse request
|
||||
r.Body = http.MaxBytesReader(w, r.Body, 16*1024)
|
||||
var req AISettingsUpdateRequest
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
http.Error(w, "Invalid request body", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
// Validate and apply updates
|
||||
if req.Provider != nil {
|
||||
provider := strings.ToLower(strings.TrimSpace(*req.Provider))
|
||||
switch provider {
|
||||
case config.AIProviderAnthropic, config.AIProviderOpenAI, config.AIProviderOllama:
|
||||
settings.Provider = provider
|
||||
default:
|
||||
http.Error(w, "Invalid provider. Must be 'anthropic', 'openai', or 'ollama'", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if req.APIKey != nil {
|
||||
// Empty string clears the API key
|
||||
settings.APIKey = strings.TrimSpace(*req.APIKey)
|
||||
}
|
||||
|
||||
if req.Model != nil {
|
||||
settings.Model = strings.TrimSpace(*req.Model)
|
||||
}
|
||||
|
||||
if req.BaseURL != nil {
|
||||
settings.BaseURL = strings.TrimSpace(*req.BaseURL)
|
||||
}
|
||||
|
||||
if req.AutonomousMode != nil {
|
||||
settings.AutonomousMode = *req.AutonomousMode
|
||||
}
|
||||
|
||||
if req.CustomContext != nil {
|
||||
settings.CustomContext = strings.TrimSpace(*req.CustomContext)
|
||||
}
|
||||
|
||||
if req.Enabled != nil {
|
||||
// Only allow enabling if properly configured
|
||||
if *req.Enabled {
|
||||
switch settings.Provider {
|
||||
case config.AIProviderAnthropic, config.AIProviderOpenAI:
|
||||
if settings.APIKey == "" {
|
||||
http.Error(w, "Cannot enable AI: API key is required for "+settings.Provider, http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
case config.AIProviderOllama:
|
||||
// Ollama doesn't need API key, but needs base URL (or will use default)
|
||||
if settings.BaseURL == "" {
|
||||
settings.BaseURL = config.DefaultOllamaBaseURL
|
||||
}
|
||||
}
|
||||
}
|
||||
settings.Enabled = *req.Enabled
|
||||
}
|
||||
|
||||
// Save settings
|
||||
if err := h.persistence.SaveAIConfig(*settings); err != nil {
|
||||
log.Error().Err(err).Msg("Failed to save AI settings")
|
||||
http.Error(w, "Failed to save settings", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
// Reload the AI service with new settings
|
||||
if err := h.aiService.Reload(); err != nil {
|
||||
log.Warn().Err(err).Msg("Failed to reload AI service after settings update")
|
||||
}
|
||||
|
||||
log.Info().
|
||||
Bool("enabled", settings.Enabled).
|
||||
Str("provider", settings.Provider).
|
||||
Str("model", settings.GetModel()).
|
||||
Msg("AI settings updated")
|
||||
|
||||
// Return updated settings
|
||||
response := AISettingsResponse{
|
||||
Enabled: settings.Enabled,
|
||||
Provider: settings.Provider,
|
||||
APIKeySet: settings.APIKey != "",
|
||||
Model: settings.GetModel(),
|
||||
BaseURL: settings.BaseURL,
|
||||
Configured: settings.IsConfigured(),
|
||||
AutonomousMode: settings.AutonomousMode,
|
||||
CustomContext: settings.CustomContext,
|
||||
}
|
||||
|
||||
if err := utils.WriteJSONResponse(w, response); err != nil {
|
||||
log.Error().Err(err).Msg("Failed to write AI settings update response")
|
||||
}
|
||||
}
|
||||
|
||||
// HandleTestAIConnection tests the AI provider connection (POST /api/ai/test)
|
||||
func (h *AISettingsHandler) HandleTestAIConnection(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != http.MethodPost {
|
||||
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
|
||||
// Require admin authentication
|
||||
if !CheckAuth(h.config, w, r) {
|
||||
return
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(r.Context(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
var testResult struct {
|
||||
Success bool `json:"success"`
|
||||
Message string `json:"message"`
|
||||
Model string `json:"model,omitempty"`
|
||||
}
|
||||
|
||||
err := h.aiService.TestConnection(ctx)
|
||||
if err != nil {
|
||||
testResult.Success = false
|
||||
testResult.Message = err.Error()
|
||||
} else {
|
||||
cfg := h.aiService.GetConfig()
|
||||
testResult.Success = true
|
||||
testResult.Message = "Connection successful"
|
||||
if cfg != nil {
|
||||
testResult.Model = cfg.GetModel()
|
||||
}
|
||||
}
|
||||
|
||||
if err := utils.WriteJSONResponse(w, testResult); err != nil {
|
||||
log.Error().Err(err).Msg("Failed to write AI test response")
|
||||
}
|
||||
}
|
||||
|
||||
// AIExecuteRequest is the request body for POST /api/ai/execute
|
||||
// AIConversationMessage represents a message in conversation history
|
||||
type AIConversationMessage struct {
|
||||
Role string `json:"role"` // "user" or "assistant"
|
||||
Content string `json:"content"`
|
||||
}
|
||||
|
||||
type AIExecuteRequest struct {
|
||||
Prompt string `json:"prompt"`
|
||||
TargetType string `json:"target_type,omitempty"` // "host", "container", "vm", "node"
|
||||
TargetID string `json:"target_id,omitempty"`
|
||||
Context map[string]interface{} `json:"context,omitempty"` // Current metrics, state, etc.
|
||||
History []AIConversationMessage `json:"history,omitempty"` // Previous conversation messages
|
||||
}
|
||||
|
||||
// AIExecuteResponse is the response from POST /api/ai/execute
|
||||
type AIExecuteResponse struct {
|
||||
Content string `json:"content"`
|
||||
Model string `json:"model"`
|
||||
InputTokens int `json:"input_tokens"`
|
||||
OutputTokens int `json:"output_tokens"`
|
||||
ToolCalls []ai.ToolExecution `json:"tool_calls,omitempty"` // Commands that were executed
|
||||
}
|
||||
|
||||
// HandleExecute executes an AI prompt (POST /api/ai/execute)
|
||||
func (h *AISettingsHandler) HandleExecute(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != http.MethodPost {
|
||||
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
|
||||
// Require authentication
|
||||
if !CheckAuth(h.config, w, r) {
|
||||
return
|
||||
}
|
||||
|
||||
// Check if AI is enabled
|
||||
if !h.aiService.IsEnabled() {
|
||||
http.Error(w, "AI is not enabled or configured", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
// Parse request
|
||||
r.Body = http.MaxBytesReader(w, r.Body, 64*1024) // 64KB max
|
||||
var req AIExecuteRequest
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
http.Error(w, "Invalid request body", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
if strings.TrimSpace(req.Prompt) == "" {
|
||||
http.Error(w, "Prompt is required", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
// Execute the prompt with a timeout
|
||||
ctx, cancel := context.WithTimeout(r.Context(), 120*time.Second)
|
||||
defer cancel()
|
||||
|
||||
resp, err := h.aiService.Execute(ctx, ai.ExecuteRequest{
|
||||
Prompt: req.Prompt,
|
||||
TargetType: req.TargetType,
|
||||
TargetID: req.TargetID,
|
||||
Context: req.Context,
|
||||
})
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("AI execution failed")
|
||||
http.Error(w, "AI request failed: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
response := AIExecuteResponse{
|
||||
Content: resp.Content,
|
||||
Model: resp.Model,
|
||||
InputTokens: resp.InputTokens,
|
||||
OutputTokens: resp.OutputTokens,
|
||||
ToolCalls: resp.ToolCalls,
|
||||
}
|
||||
|
||||
if err := utils.WriteJSONResponse(w, response); err != nil {
|
||||
log.Error().Err(err).Msg("Failed to write AI execute response")
|
||||
}
|
||||
}
|
||||
|
||||
// HandleExecuteStream executes an AI prompt with SSE streaming (POST /api/ai/execute/stream)
|
||||
func (h *AISettingsHandler) HandleExecuteStream(w http.ResponseWriter, r *http.Request) {
|
||||
// Handle CORS for dev mode (frontend on different port)
|
||||
origin := r.Header.Get("Origin")
|
||||
if origin != "" {
|
||||
// Must use specific origin (not *) when credentials are included
|
||||
w.Header().Set("Access-Control-Allow-Origin", origin)
|
||||
w.Header().Set("Access-Control-Allow-Credentials", "true")
|
||||
w.Header().Set("Access-Control-Allow-Methods", "POST, OPTIONS")
|
||||
w.Header().Set("Access-Control-Allow-Headers", "Content-Type, Accept, Cookie")
|
||||
w.Header().Set("Vary", "Origin")
|
||||
}
|
||||
|
||||
// Handle preflight
|
||||
if r.Method == http.MethodOptions {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
return
|
||||
}
|
||||
|
||||
if r.Method != http.MethodPost {
|
||||
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
|
||||
// Require authentication
|
||||
if !CheckAuth(h.config, w, r) {
|
||||
return
|
||||
}
|
||||
|
||||
// Check if AI is enabled
|
||||
if !h.aiService.IsEnabled() {
|
||||
http.Error(w, "AI is not enabled or configured", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
// Parse request
|
||||
r.Body = http.MaxBytesReader(w, r.Body, 64*1024) // 64KB max
|
||||
var req AIExecuteRequest
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
http.Error(w, "Invalid request body", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
if strings.TrimSpace(req.Prompt) == "" {
|
||||
http.Error(w, "Prompt is required", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
log.Info().
|
||||
Str("prompt", req.Prompt).
|
||||
Str("target_type", req.TargetType).
|
||||
Str("target_id", req.TargetID).
|
||||
Msg("AI streaming request started")
|
||||
|
||||
// Set up SSE headers
|
||||
// IMPORTANT: Set headers BEFORE any writes to prevent Go from auto-adding Transfer-Encoding: chunked
|
||||
w.Header().Set("Content-Type", "text/event-stream")
|
||||
w.Header().Set("Cache-Control", "no-cache")
|
||||
w.Header().Set("Connection", "keep-alive")
|
||||
w.Header().Set("X-Accel-Buffering", "no") // Disable nginx buffering
|
||||
// Prevent chunked encoding which causes "Invalid character in chunk size" errors in Vite proxy
|
||||
w.Header().Set("Transfer-Encoding", "identity")
|
||||
|
||||
flusher, ok := w.(http.Flusher)
|
||||
if !ok {
|
||||
http.Error(w, "Streaming not supported", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
// Disable the server's write deadline for this SSE connection
|
||||
// This is critical for long-running AI requests that can take several minutes
|
||||
rc := http.NewResponseController(w)
|
||||
if err := rc.SetWriteDeadline(time.Time{}); err != nil {
|
||||
log.Warn().Err(err).Msg("Failed to disable write deadline for SSE")
|
||||
// Continue anyway - heartbeats should help keep connection alive
|
||||
}
|
||||
|
||||
// Flush headers immediately
|
||||
flusher.Flush()
|
||||
|
||||
// Create context with timeout (5 minutes for complex analysis with multiple tool calls)
|
||||
// Use background context to avoid browser disconnect canceling the request
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 300*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// Set up heartbeat to keep connection alive during long tool executions
|
||||
// NOTE: We don't check r.Context().Done() because Vite proxy may close
|
||||
// the request context prematurely. We detect real disconnection via write failures.
|
||||
heartbeatDone := make(chan struct{})
|
||||
go func() {
|
||||
ticker := time.NewTicker(5 * time.Second)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
// Send SSE comment as heartbeat
|
||||
_, err := w.Write([]byte(": heartbeat\n\n"))
|
||||
if err != nil {
|
||||
log.Debug().Err(err).Msg("Heartbeat write failed, client disconnected")
|
||||
cancel() // Cancel the AI request
|
||||
return
|
||||
}
|
||||
flusher.Flush()
|
||||
log.Debug().Msg("Sent SSE heartbeat")
|
||||
case <-heartbeatDone:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
defer close(heartbeatDone)
|
||||
|
||||
// Stream callback - write SSE events
|
||||
callback := func(event ai.StreamEvent) {
|
||||
data, err := json.Marshal(event)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("Failed to marshal stream event")
|
||||
return
|
||||
}
|
||||
|
||||
log.Debug().
|
||||
Str("event_type", event.Type).
|
||||
Msg("Streaming AI event")
|
||||
|
||||
// SSE format: data: <json>\n\n
|
||||
_, writeErr := w.Write([]byte("data: " + string(data) + "\n\n"))
|
||||
if writeErr != nil {
|
||||
log.Debug().Err(writeErr).Msg("Failed to write SSE event (client may have disconnected)")
|
||||
return
|
||||
}
|
||||
flusher.Flush()
|
||||
}
|
||||
|
||||
// Convert history from API type to service type
|
||||
var history []ai.ConversationMessage
|
||||
for _, msg := range req.History {
|
||||
history = append(history, ai.ConversationMessage{
|
||||
Role: msg.Role,
|
||||
Content: msg.Content,
|
||||
})
|
||||
}
|
||||
|
||||
// Execute with streaming
|
||||
resp, err := h.aiService.ExecuteStream(ctx, ai.ExecuteRequest{
|
||||
Prompt: req.Prompt,
|
||||
TargetType: req.TargetType,
|
||||
TargetID: req.TargetID,
|
||||
Context: req.Context,
|
||||
History: history,
|
||||
}, callback)
|
||||
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("AI streaming execution failed")
|
||||
// Send error event
|
||||
errEvent := ai.StreamEvent{Type: "error", Data: err.Error()}
|
||||
data, _ := json.Marshal(errEvent)
|
||||
_, _ = w.Write([]byte("data: " + string(data) + "\n\n"))
|
||||
flusher.Flush()
|
||||
return
|
||||
}
|
||||
|
||||
log.Info().
|
||||
Str("model", resp.Model).
|
||||
Int("input_tokens", resp.InputTokens).
|
||||
Int("output_tokens", resp.OutputTokens).
|
||||
Int("tool_calls", len(resp.ToolCalls)).
|
||||
Msg("AI streaming request completed")
|
||||
|
||||
// Send final response with metadata
|
||||
finalEvent := struct {
|
||||
Type string `json:"type"`
|
||||
Model string `json:"model"`
|
||||
InputTokens int `json:"input_tokens"`
|
||||
OutputTokens int `json:"output_tokens"`
|
||||
ToolCalls []ai.ToolExecution `json:"tool_calls,omitempty"`
|
||||
}{
|
||||
Type: "complete",
|
||||
Model: resp.Model,
|
||||
InputTokens: resp.InputTokens,
|
||||
OutputTokens: resp.OutputTokens,
|
||||
ToolCalls: resp.ToolCalls,
|
||||
}
|
||||
data, _ := json.Marshal(finalEvent)
|
||||
_, _ = w.Write([]byte("data: " + string(data) + "\n\n"))
|
||||
flusher.Flush()
|
||||
}
|
||||
|
||||
// AIRunCommandRequest is the request body for POST /api/ai/run-command
|
||||
type AIRunCommandRequest struct {
|
||||
Command string `json:"command"`
|
||||
TargetType string `json:"target_type"`
|
||||
TargetID string `json:"target_id"`
|
||||
RunOnHost bool `json:"run_on_host"`
|
||||
VMID string `json:"vmid,omitempty"`
|
||||
}
|
||||
|
||||
// HandleRunCommand executes a single approved command (POST /api/ai/run-command)
|
||||
func (h *AISettingsHandler) HandleRunCommand(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != http.MethodPost {
|
||||
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
|
||||
// Require authentication
|
||||
if !CheckAuth(h.config, w, r) {
|
||||
return
|
||||
}
|
||||
|
||||
// Parse request
|
||||
r.Body = http.MaxBytesReader(w, r.Body, 16*1024)
|
||||
var req AIRunCommandRequest
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
http.Error(w, "Invalid request body", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
if strings.TrimSpace(req.Command) == "" {
|
||||
http.Error(w, "Command is required", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
log.Info().
|
||||
Str("command", req.Command).
|
||||
Str("target_type", req.TargetType).
|
||||
Str("target_id", req.TargetID).
|
||||
Bool("run_on_host", req.RunOnHost).
|
||||
Msg("Executing approved command")
|
||||
|
||||
// Execute with timeout
|
||||
ctx, cancel := context.WithTimeout(r.Context(), 120*time.Second)
|
||||
defer cancel()
|
||||
|
||||
resp, err := h.aiService.RunCommand(ctx, ai.RunCommandRequest{
|
||||
Command: req.Command,
|
||||
TargetType: req.TargetType,
|
||||
TargetID: req.TargetID,
|
||||
RunOnHost: req.RunOnHost,
|
||||
VMID: req.VMID,
|
||||
})
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("Failed to execute command")
|
||||
http.Error(w, "Failed to execute command: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
if err := utils.WriteJSONResponse(w, resp); err != nil {
|
||||
log.Error().Err(err).Msg("Failed to write run command response")
|
||||
}
|
||||
}
|
||||
@@ -26,6 +26,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/rcourtman/pulse-go-rewrite/internal/agentbinaries"
|
||||
"github.com/rcourtman/pulse-go-rewrite/internal/agentexec"
|
||||
"github.com/rcourtman/pulse-go-rewrite/internal/auth"
|
||||
"github.com/rcourtman/pulse-go-rewrite/internal/config"
|
||||
"github.com/rcourtman/pulse-go-rewrite/internal/models"
|
||||
@@ -51,6 +52,8 @@ type Router struct {
|
||||
hostAgentHandlers *HostAgentHandlers
|
||||
temperatureProxyHandlers *TemperatureProxyHandlers
|
||||
systemSettingsHandler *SystemSettingsHandler
|
||||
aiSettingsHandler *AISettingsHandler
|
||||
agentExecServer *agentexec.Server
|
||||
wsHub *websocket.Hub
|
||||
reloadFunc func() error
|
||||
updateManager *updates.Manager
|
||||
@@ -994,6 +997,39 @@ func (r *Router) setupRoutes() {
|
||||
r.mux.HandleFunc("/api/system/proxy-public-key", r.handleProxyPublicKey)
|
||||
// Old API token endpoints removed - now using /api/security/regenerate-token
|
||||
|
||||
// Agent execution server for AI tool use
|
||||
r.agentExecServer = agentexec.NewServer(func(token string) bool {
|
||||
// Validate agent tokens using the API tokens system
|
||||
if r.config == nil {
|
||||
return false
|
||||
}
|
||||
// First check the new API tokens system
|
||||
if _, ok := r.config.ValidateAPIToken(token); ok {
|
||||
return true
|
||||
}
|
||||
// Fall back to legacy single token if set
|
||||
if r.config.APIToken != "" {
|
||||
return auth.CompareAPIToken(token, r.config.APIToken)
|
||||
}
|
||||
return false
|
||||
})
|
||||
|
||||
// AI settings endpoints
|
||||
r.aiSettingsHandler = NewAISettingsHandler(r.config, r.persistence, r.agentExecServer)
|
||||
// Inject state provider so AI has access to full infrastructure context (VMs, containers, IPs)
|
||||
if r.monitor != nil {
|
||||
r.aiSettingsHandler.SetStateProvider(r.monitor)
|
||||
}
|
||||
r.mux.HandleFunc("/api/settings/ai", RequireAdmin(r.config, RequireScope(config.ScopeSettingsRead, r.aiSettingsHandler.HandleGetAISettings)))
|
||||
r.mux.HandleFunc("/api/settings/ai/update", RequireAdmin(r.config, RequireScope(config.ScopeSettingsWrite, r.aiSettingsHandler.HandleUpdateAISettings)))
|
||||
r.mux.HandleFunc("/api/ai/test", RequireAdmin(r.config, RequireScope(config.ScopeSettingsWrite, r.aiSettingsHandler.HandleTestAIConnection)))
|
||||
r.mux.HandleFunc("/api/ai/execute", RequireAuth(r.config, r.aiSettingsHandler.HandleExecute))
|
||||
r.mux.HandleFunc("/api/ai/execute/stream", RequireAuth(r.config, r.aiSettingsHandler.HandleExecuteStream))
|
||||
r.mux.HandleFunc("/api/ai/run-command", RequireAuth(r.config, r.aiSettingsHandler.HandleRunCommand))
|
||||
|
||||
// Agent WebSocket for AI command execution
|
||||
r.mux.HandleFunc("/api/agent/ws", r.handleAgentWebSocket)
|
||||
|
||||
// Docker agent download endpoints
|
||||
r.mux.HandleFunc("/install-docker-agent.sh", r.handleDownloadInstallScript) // Serves the Docker agent install script
|
||||
r.mux.HandleFunc("/install-container-agent.sh", r.handleDownloadContainerAgentInstallScript)
|
||||
@@ -1029,6 +1065,15 @@ func (r *Router) setupRoutes() {
|
||||
|
||||
}
|
||||
|
||||
// handleAgentWebSocket handles WebSocket connections from agents for AI command execution
|
||||
func (r *Router) handleAgentWebSocket(w http.ResponseWriter, req *http.Request) {
|
||||
if r.agentExecServer == nil {
|
||||
http.Error(w, "Agent execution not available", http.StatusServiceUnavailable)
|
||||
return
|
||||
}
|
||||
r.agentExecServer.HandleWebSocket(w, req)
|
||||
}
|
||||
|
||||
func (r *Router) handleVerifyTemperatureSSH(w http.ResponseWriter, req *http.Request) {
|
||||
if r.configHandlers == nil {
|
||||
http.Error(w, "Service unavailable", http.StatusServiceUnavailable)
|
||||
@@ -1289,6 +1334,10 @@ func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) {
|
||||
w.Header().Set("Access-Control-Allow-Methods", "GET, POST, PUT, DELETE, OPTIONS")
|
||||
w.Header().Set("Access-Control-Allow-Headers", "Content-Type, Authorization, X-API-Token, X-CSRF-Token, X-Setup-Token")
|
||||
w.Header().Set("Access-Control-Expose-Headers", "X-CSRF-Token, X-Authenticated-User, X-Auth-Method")
|
||||
// Allow credentials when origin is specific (not *)
|
||||
if r.config.AllowedOrigins != "*" {
|
||||
w.Header().Set("Access-Control-Allow-Credentials", "true")
|
||||
}
|
||||
}
|
||||
|
||||
// Handle preflight requests
|
||||
@@ -1351,6 +1400,7 @@ func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) {
|
||||
"/install.ps1", // Unified agent Windows installer
|
||||
"/download/pulse-agent", // Unified agent binary
|
||||
"/api/agent/version", // Agent update checks need to work before auth
|
||||
"/api/agent/ws", // Agent WebSocket has its own auth via registration
|
||||
"/api/server/info", // Server info for installer script
|
||||
"/api/install/install-sensor-proxy.sh", // Temperature proxy installer fallback
|
||||
"/api/install/pulse-sensor-proxy", // Temperature proxy binary fallback
|
||||
|
||||
82
internal/config/ai.go
Normal file
82
internal/config/ai.go
Normal file
@@ -0,0 +1,82 @@
|
||||
package config
|
||||
|
||||
// AIConfig holds AI feature configuration
|
||||
// This is stored in ai.enc (encrypted) in the config directory
|
||||
type AIConfig struct {
|
||||
Enabled bool `json:"enabled"`
|
||||
Provider string `json:"provider"` // "anthropic", "openai", "ollama"
|
||||
APIKey string `json:"api_key"` // encrypted at rest (not needed for ollama)
|
||||
Model string `json:"model"` // e.g., "claude-opus-4-5-20250514", "gpt-4o", "llama3"
|
||||
BaseURL string `json:"base_url"` // custom endpoint (required for ollama, optional for openai)
|
||||
AutonomousMode bool `json:"autonomous_mode"` // when true, AI executes commands without approval
|
||||
CustomContext string `json:"custom_context"` // user-provided context about their infrastructure
|
||||
}
|
||||
|
||||
// AIProvider constants
|
||||
const (
|
||||
AIProviderAnthropic = "anthropic"
|
||||
AIProviderOpenAI = "openai"
|
||||
AIProviderOllama = "ollama"
|
||||
)
|
||||
|
||||
// Default models per provider
|
||||
const (
|
||||
DefaultAIModelAnthropic = "claude-opus-4-5-20251101"
|
||||
DefaultAIModelOpenAI = "gpt-4o"
|
||||
DefaultAIModelOllama = "llama3"
|
||||
DefaultOllamaBaseURL = "http://localhost:11434"
|
||||
)
|
||||
|
||||
// NewDefaultAIConfig returns an AIConfig with sensible defaults
|
||||
func NewDefaultAIConfig() *AIConfig {
|
||||
return &AIConfig{
|
||||
Enabled: false,
|
||||
Provider: AIProviderAnthropic,
|
||||
Model: DefaultAIModelAnthropic,
|
||||
}
|
||||
}
|
||||
|
||||
// IsConfigured returns true if the AI config has enough info to make API calls
|
||||
func (c *AIConfig) IsConfigured() bool {
|
||||
if !c.Enabled {
|
||||
return false
|
||||
}
|
||||
|
||||
switch c.Provider {
|
||||
case AIProviderAnthropic, AIProviderOpenAI:
|
||||
return c.APIKey != ""
|
||||
case AIProviderOllama:
|
||||
// Ollama doesn't need an API key
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// GetBaseURL returns the base URL, using defaults where appropriate
|
||||
func (c *AIConfig) GetBaseURL() string {
|
||||
if c.BaseURL != "" {
|
||||
return c.BaseURL
|
||||
}
|
||||
if c.Provider == AIProviderOllama {
|
||||
return DefaultOllamaBaseURL
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// GetModel returns the model, using defaults where appropriate
|
||||
func (c *AIConfig) GetModel() string {
|
||||
if c.Model != "" {
|
||||
return c.Model
|
||||
}
|
||||
switch c.Provider {
|
||||
case AIProviderAnthropic:
|
||||
return DefaultAIModelAnthropic
|
||||
case AIProviderOpenAI:
|
||||
return DefaultAIModelOpenAI
|
||||
case AIProviderOllama:
|
||||
return DefaultAIModelOllama
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
@@ -16,6 +16,7 @@ type DockerMetadata struct {
|
||||
CustomURL string `json:"customUrl"` // Custom URL for the resource
|
||||
Description string `json:"description"` // Optional description
|
||||
Tags []string `json:"tags"` // Optional tags for categorization
|
||||
Notes []string `json:"notes"` // User annotations for AI context
|
||||
}
|
||||
|
||||
// DockerHostMetadata holds additional metadata for a Docker host
|
||||
|
||||
@@ -16,6 +16,7 @@ type GuestMetadata struct {
|
||||
CustomURL string `json:"customUrl"` // Custom URL for the guest
|
||||
Description string `json:"description"` // Optional description
|
||||
Tags []string `json:"tags"` // Optional tags for categorization
|
||||
Notes []string `json:"notes"` // User annotations for AI context (e.g., "Runs PBS in Docker")
|
||||
// Last-known identity (persisted even after guest deletion)
|
||||
LastKnownName string `json:"lastKnownName,omitempty"` // Last known guest name
|
||||
LastKnownType string `json:"lastKnownType,omitempty"` // Last known guest type (qemu, lxc)
|
||||
|
||||
@@ -31,6 +31,7 @@ type ConfigPersistence struct {
|
||||
systemFile string
|
||||
oidcFile string
|
||||
apiTokensFile string
|
||||
aiFile string
|
||||
crypto *crypto.CryptoManager
|
||||
}
|
||||
|
||||
@@ -73,6 +74,7 @@ func newConfigPersistence(configDir string) (*ConfigPersistence, error) {
|
||||
systemFile: filepath.Join(configDir, "system.json"),
|
||||
oidcFile: filepath.Join(configDir, "oidc.enc"),
|
||||
apiTokensFile: filepath.Join(configDir, "api_tokens.json"),
|
||||
aiFile: filepath.Join(configDir, "ai.enc"),
|
||||
crypto: cryptoMgr,
|
||||
}
|
||||
|
||||
@@ -1291,6 +1293,67 @@ func (c *ConfigPersistence) LoadOIDCConfig() (*OIDCConfig, error) {
|
||||
return &settings, nil
|
||||
}
|
||||
|
||||
// SaveAIConfig stores AI settings, encrypting them when a crypto manager is available.
|
||||
func (c *ConfigPersistence) SaveAIConfig(settings AIConfig) error {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
if err := c.EnsureConfigDir(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
data, err := json.Marshal(settings)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if c.crypto != nil {
|
||||
encrypted, err := c.crypto.Encrypt(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
data = encrypted
|
||||
}
|
||||
|
||||
if err := c.writeConfigFileLocked(c.aiFile, data, 0600); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Info().Str("file", c.aiFile).Bool("enabled", settings.Enabled).Msg("AI configuration saved")
|
||||
return nil
|
||||
}
|
||||
|
||||
// LoadAIConfig retrieves the persisted AI settings. It returns default config when no configuration exists yet.
|
||||
func (c *ConfigPersistence) LoadAIConfig() (*AIConfig, error) {
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
|
||||
data, err := os.ReadFile(c.aiFile)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
// Return default config if file doesn't exist
|
||||
return NewDefaultAIConfig(), nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if c.crypto != nil {
|
||||
decrypted, err := c.crypto.Decrypt(data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
data = decrypted
|
||||
}
|
||||
|
||||
var settings AIConfig
|
||||
if err := json.Unmarshal(data, &settings); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Info().Str("file", c.aiFile).Bool("enabled", settings.Enabled).Msg("AI configuration loaded")
|
||||
return &settings, nil
|
||||
}
|
||||
|
||||
// LoadSystemSettings loads system settings from file
|
||||
func (c *ConfigPersistence) LoadSystemSettings() (*SystemSettings, error) {
|
||||
c.mu.RLock()
|
||||
@@ -1440,3 +1503,60 @@ func (c *ConfigPersistence) cleanupOldBackups(pattern string) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// LoadGuestMetadata loads all guest metadata from disk (for AI context)
|
||||
func (c *ConfigPersistence) LoadGuestMetadata() (map[string]*GuestMetadata, error) {
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
|
||||
filePath := filepath.Join(c.configDir, "guest_metadata.json")
|
||||
data, err := os.ReadFile(filePath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return make(map[string]*GuestMetadata), nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var metadata map[string]*GuestMetadata
|
||||
if err := json.Unmarshal(data, &metadata); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return metadata, nil
|
||||
}
|
||||
|
||||
// LoadDockerMetadata loads all docker metadata from disk (for AI context)
|
||||
func (c *ConfigPersistence) LoadDockerMetadata() (map[string]*DockerMetadata, error) {
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
|
||||
filePath := filepath.Join(c.configDir, "docker_metadata.json")
|
||||
data, err := os.ReadFile(filePath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return make(map[string]*DockerMetadata), nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Try versioned format first
|
||||
var fileData struct {
|
||||
Containers map[string]*DockerMetadata `json:"containers,omitempty"`
|
||||
}
|
||||
if err := json.Unmarshal(data, &fileData); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if fileData.Containers != nil {
|
||||
return fileData.Containers, nil
|
||||
}
|
||||
|
||||
// Fall back to legacy format (direct map)
|
||||
var metadata map[string]*DockerMetadata
|
||||
if err := json.Unmarshal(data, &metadata); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return metadata, nil
|
||||
}
|
||||
|
||||
@@ -57,6 +57,7 @@ type Agent struct {
|
||||
interval time.Duration
|
||||
trimmedPulseURL string
|
||||
reportBuffer *buffer.Queue[agentshost.Report]
|
||||
commandClient *CommandClient
|
||||
}
|
||||
|
||||
const defaultInterval = 30 * time.Second
|
||||
@@ -168,7 +169,7 @@ func New(cfg Config) (*Agent, error) {
|
||||
|
||||
const bufferCapacity = 60
|
||||
|
||||
return &Agent{
|
||||
agent := &Agent{
|
||||
cfg: cfg,
|
||||
logger: logger,
|
||||
httpClient: client,
|
||||
@@ -186,7 +187,12 @@ func New(cfg Config) (*Agent, error) {
|
||||
interval: cfg.Interval,
|
||||
trimmedPulseURL: pulseURL,
|
||||
reportBuffer: buffer.New[agentshost.Report](bufferCapacity),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Create command client for AI command execution
|
||||
agent.commandClient = NewCommandClient(cfg, agentID, hostname, platform, agentVersion)
|
||||
|
||||
return agent, nil
|
||||
}
|
||||
|
||||
// Run executes the agent until the context is cancelled.
|
||||
@@ -195,6 +201,15 @@ func (a *Agent) Run(ctx context.Context) error {
|
||||
return a.runOnce(ctx)
|
||||
}
|
||||
|
||||
// Start command client in background for AI command execution
|
||||
if a.commandClient != nil {
|
||||
go func() {
|
||||
if err := a.commandClient.Run(ctx); err != nil && !errors.Is(err, context.Canceled) {
|
||||
a.logger.Error().Err(err).Msg("Command client stopped with error")
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
ticker := time.NewTicker(a.interval)
|
||||
defer ticker.Stop()
|
||||
|
||||
|
||||
437
internal/hostagent/commands.go
Normal file
437
internal/hostagent/commands.go
Normal file
@@ -0,0 +1,437 @@
|
||||
package hostagent
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"os/exec"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/websocket"
|
||||
"github.com/rs/zerolog"
|
||||
)
|
||||
|
||||
// CommandClient handles WebSocket connection to Pulse for AI command execution
|
||||
type CommandClient struct {
|
||||
pulseURL string
|
||||
apiToken string
|
||||
agentID string
|
||||
hostname string
|
||||
platform string
|
||||
version string
|
||||
insecureSkipVerify bool
|
||||
logger zerolog.Logger
|
||||
|
||||
conn *websocket.Conn
|
||||
connMu sync.Mutex
|
||||
done chan struct{}
|
||||
}
|
||||
|
||||
// NewCommandClient creates a new command execution client
|
||||
func NewCommandClient(cfg Config, agentID, hostname, platform, version string) *CommandClient {
|
||||
logger := cfg.Logger.With().Str("component", "command-client").Logger()
|
||||
|
||||
return &CommandClient{
|
||||
pulseURL: strings.TrimRight(cfg.PulseURL, "/"),
|
||||
apiToken: cfg.APIToken,
|
||||
agentID: agentID,
|
||||
hostname: hostname,
|
||||
platform: platform,
|
||||
version: version,
|
||||
insecureSkipVerify: cfg.InsecureSkipVerify,
|
||||
logger: logger,
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
// Message types matching agentexec package
|
||||
type messageType string
|
||||
|
||||
const (
|
||||
msgTypeAgentRegister messageType = "agent_register"
|
||||
msgTypeAgentPing messageType = "agent_ping"
|
||||
msgTypeCommandResult messageType = "command_result"
|
||||
msgTypeRegistered messageType = "registered"
|
||||
msgTypePong messageType = "pong"
|
||||
msgTypeExecuteCmd messageType = "execute_command"
|
||||
msgTypeReadFile messageType = "read_file"
|
||||
)
|
||||
|
||||
type wsMessage struct {
|
||||
Type messageType `json:"type"`
|
||||
ID string `json:"id,omitempty"`
|
||||
Timestamp time.Time `json:"timestamp"`
|
||||
Payload json.RawMessage `json:"payload,omitempty"`
|
||||
}
|
||||
|
||||
type registerPayload struct {
|
||||
AgentID string `json:"agent_id"`
|
||||
Hostname string `json:"hostname"`
|
||||
Version string `json:"version"`
|
||||
Platform string `json:"platform"`
|
||||
Tags []string `json:"tags,omitempty"`
|
||||
Token string `json:"token"`
|
||||
}
|
||||
|
||||
type registeredPayload struct {
|
||||
Success bool `json:"success"`
|
||||
Message string `json:"message,omitempty"`
|
||||
}
|
||||
|
||||
type executeCommandPayload struct {
|
||||
RequestID string `json:"request_id"`
|
||||
Command string `json:"command"`
|
||||
TargetType string `json:"target_type"`
|
||||
TargetID string `json:"target_id,omitempty"`
|
||||
Timeout int `json:"timeout,omitempty"`
|
||||
}
|
||||
|
||||
type commandResultPayload struct {
|
||||
RequestID string `json:"request_id"`
|
||||
Success bool `json:"success"`
|
||||
Stdout string `json:"stdout,omitempty"`
|
||||
Stderr string `json:"stderr,omitempty"`
|
||||
ExitCode int `json:"exit_code"`
|
||||
Error string `json:"error,omitempty"`
|
||||
Duration int64 `json:"duration_ms"`
|
||||
}
|
||||
|
||||
// Run starts the command client and maintains the WebSocket connection
|
||||
func (c *CommandClient) Run(ctx context.Context) error {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
default:
|
||||
}
|
||||
|
||||
if err := c.connectAndHandle(ctx); err != nil {
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
c.logger.Warn().Err(err).Msg("WebSocket connection failed, reconnecting in 10s")
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-time.After(10 * time.Second):
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *CommandClient) connectAndHandle(ctx context.Context) error {
|
||||
// Build WebSocket URL
|
||||
wsURL, err := c.buildWebSocketURL()
|
||||
if err != nil {
|
||||
return fmt.Errorf("build websocket url: %w", err)
|
||||
}
|
||||
|
||||
c.logger.Info().Str("url", wsURL).Msg("Connecting to Pulse command server")
|
||||
|
||||
// Create dialer with TLS config
|
||||
dialer := websocket.Dialer{
|
||||
TLSClientConfig: &tls.Config{
|
||||
MinVersion: tls.VersionTLS12,
|
||||
InsecureSkipVerify: c.insecureSkipVerify,
|
||||
},
|
||||
HandshakeTimeout: 10 * time.Second,
|
||||
}
|
||||
|
||||
// Connect
|
||||
conn, _, err := dialer.DialContext(ctx, wsURL, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("dial websocket: %w", err)
|
||||
}
|
||||
|
||||
c.connMu.Lock()
|
||||
c.conn = conn
|
||||
c.connMu.Unlock()
|
||||
|
||||
defer func() {
|
||||
c.connMu.Lock()
|
||||
c.conn = nil
|
||||
c.connMu.Unlock()
|
||||
conn.Close()
|
||||
}()
|
||||
|
||||
// Send registration
|
||||
if err := c.sendRegistration(conn); err != nil {
|
||||
return fmt.Errorf("send registration: %w", err)
|
||||
}
|
||||
|
||||
// Wait for registration response
|
||||
if err := c.waitForRegistration(conn); err != nil {
|
||||
return fmt.Errorf("registration failed: %w", err)
|
||||
}
|
||||
|
||||
c.logger.Info().Msg("Connected and registered with Pulse command server")
|
||||
|
||||
// Start ping loop
|
||||
pingDone := make(chan struct{})
|
||||
go c.pingLoop(ctx, conn, pingDone)
|
||||
defer close(pingDone)
|
||||
|
||||
// Handle incoming messages
|
||||
return c.handleMessages(ctx, conn)
|
||||
}
|
||||
|
||||
func (c *CommandClient) buildWebSocketURL() (string, error) {
|
||||
parsed, err := url.Parse(c.pulseURL)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Convert http(s) to ws(s)
|
||||
switch parsed.Scheme {
|
||||
case "https":
|
||||
parsed.Scheme = "wss"
|
||||
case "http":
|
||||
parsed.Scheme = "ws"
|
||||
case "wss", "ws":
|
||||
// Already WebSocket scheme
|
||||
default:
|
||||
parsed.Scheme = "ws"
|
||||
}
|
||||
|
||||
parsed.Path = "/api/agent/ws"
|
||||
return parsed.String(), nil
|
||||
}
|
||||
|
||||
func (c *CommandClient) sendRegistration(conn *websocket.Conn) error {
|
||||
payload, _ := json.Marshal(registerPayload{
|
||||
AgentID: c.agentID,
|
||||
Hostname: c.hostname,
|
||||
Version: c.version,
|
||||
Platform: c.platform,
|
||||
Token: c.apiToken,
|
||||
})
|
||||
|
||||
msg := wsMessage{
|
||||
Type: msgTypeAgentRegister,
|
||||
Timestamp: time.Now(),
|
||||
Payload: payload,
|
||||
}
|
||||
|
||||
return conn.WriteJSON(msg)
|
||||
}
|
||||
|
||||
func (c *CommandClient) waitForRegistration(conn *websocket.Conn) error {
|
||||
conn.SetReadDeadline(time.Now().Add(30 * time.Second))
|
||||
defer conn.SetReadDeadline(time.Time{})
|
||||
|
||||
var msg wsMessage
|
||||
if err := conn.ReadJSON(&msg); err != nil {
|
||||
return fmt.Errorf("read registration response: %w", err)
|
||||
}
|
||||
|
||||
if msg.Type != msgTypeRegistered {
|
||||
return fmt.Errorf("unexpected message type: %s", msg.Type)
|
||||
}
|
||||
|
||||
var payload registeredPayload
|
||||
if err := json.Unmarshal(msg.Payload, &payload); err != nil {
|
||||
return fmt.Errorf("parse registration response: %w", err)
|
||||
}
|
||||
|
||||
if !payload.Success {
|
||||
return fmt.Errorf("registration rejected: %s", payload.Message)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *CommandClient) pingLoop(ctx context.Context, conn *websocket.Conn, done chan struct{}) {
|
||||
ticker := time.NewTicker(30 * time.Second)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-done:
|
||||
return
|
||||
case <-ticker.C:
|
||||
c.connMu.Lock()
|
||||
if c.conn != nil {
|
||||
msg := wsMessage{
|
||||
Type: msgTypeAgentPing,
|
||||
Timestamp: time.Now(),
|
||||
}
|
||||
if err := conn.WriteJSON(msg); err != nil {
|
||||
c.logger.Debug().Err(err).Msg("Failed to send ping")
|
||||
}
|
||||
}
|
||||
c.connMu.Unlock()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *CommandClient) handleMessages(ctx context.Context, conn *websocket.Conn) error {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
default:
|
||||
}
|
||||
|
||||
var msg wsMessage
|
||||
if err := conn.ReadJSON(&msg); err != nil {
|
||||
return fmt.Errorf("read message: %w", err)
|
||||
}
|
||||
|
||||
switch msg.Type {
|
||||
case msgTypePong:
|
||||
// Ignore pong responses
|
||||
|
||||
case msgTypeExecuteCmd:
|
||||
var payload executeCommandPayload
|
||||
if err := json.Unmarshal(msg.Payload, &payload); err != nil {
|
||||
c.logger.Error().Err(err).Msg("Failed to parse execute_command payload")
|
||||
continue
|
||||
}
|
||||
|
||||
// Execute command in background
|
||||
go c.handleExecuteCommand(ctx, conn, payload)
|
||||
|
||||
case msgTypeReadFile:
|
||||
// Handle read_file similarly (uses cat command internally)
|
||||
var payload executeCommandPayload
|
||||
if err := json.Unmarshal(msg.Payload, &payload); err != nil {
|
||||
c.logger.Error().Err(err).Msg("Failed to parse read_file payload")
|
||||
continue
|
||||
}
|
||||
go c.handleExecuteCommand(ctx, conn, payload)
|
||||
|
||||
default:
|
||||
c.logger.Debug().Str("type", string(msg.Type)).Msg("Unknown message type")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *CommandClient) handleExecuteCommand(ctx context.Context, conn *websocket.Conn, payload executeCommandPayload) {
|
||||
startTime := time.Now()
|
||||
|
||||
c.logger.Info().
|
||||
Str("request_id", payload.RequestID).
|
||||
Str("command", payload.Command).
|
||||
Str("target_type", payload.TargetType).
|
||||
Str("target_id", payload.TargetID).
|
||||
Msg("Executing command")
|
||||
|
||||
result := c.executeCommand(ctx, payload)
|
||||
result.Duration = time.Since(startTime).Milliseconds()
|
||||
|
||||
// Send result back
|
||||
resultPayload, _ := json.Marshal(result)
|
||||
msg := wsMessage{
|
||||
Type: msgTypeCommandResult,
|
||||
ID: payload.RequestID,
|
||||
Timestamp: time.Now(),
|
||||
Payload: resultPayload,
|
||||
}
|
||||
|
||||
c.connMu.Lock()
|
||||
err := conn.WriteJSON(msg)
|
||||
c.connMu.Unlock()
|
||||
|
||||
if err != nil {
|
||||
c.logger.Error().Err(err).Str("request_id", payload.RequestID).Msg("Failed to send command result")
|
||||
} else {
|
||||
c.logger.Info().
|
||||
Str("request_id", payload.RequestID).
|
||||
Bool("success", result.Success).
|
||||
Int("exit_code", result.ExitCode).
|
||||
Int64("duration_ms", result.Duration).
|
||||
Msg("Command completed")
|
||||
}
|
||||
}
|
||||
|
||||
func (c *CommandClient) executeCommand(ctx context.Context, payload executeCommandPayload) commandResultPayload {
|
||||
result := commandResultPayload{
|
||||
RequestID: payload.RequestID,
|
||||
}
|
||||
|
||||
// Determine timeout
|
||||
timeout := time.Duration(payload.Timeout) * time.Second
|
||||
if timeout <= 0 {
|
||||
timeout = 60 * time.Second
|
||||
}
|
||||
|
||||
cmdCtx, cancel := context.WithTimeout(ctx, timeout)
|
||||
defer cancel()
|
||||
|
||||
// Build the command based on target type
|
||||
command := payload.Command
|
||||
|
||||
// If targeting a container or VM, wrap the command
|
||||
if payload.TargetType == "container" && payload.TargetID != "" {
|
||||
// Use pct exec for LXC containers
|
||||
command = fmt.Sprintf("pct exec %s -- %s", payload.TargetID, payload.Command)
|
||||
} else if payload.TargetType == "vm" && payload.TargetID != "" {
|
||||
// Use qm guest exec for VMs (requires QEMU guest agent)
|
||||
command = fmt.Sprintf("qm guest exec %s -- %s", payload.TargetID, payload.Command)
|
||||
}
|
||||
|
||||
// Execute the command
|
||||
var cmd *exec.Cmd
|
||||
if runtime.GOOS == "windows" {
|
||||
cmd = exec.CommandContext(cmdCtx, "cmd", "/C", command)
|
||||
} else {
|
||||
cmd = exec.CommandContext(cmdCtx, "sh", "-c", command)
|
||||
}
|
||||
|
||||
var stdout, stderr bytes.Buffer
|
||||
cmd.Stdout = &stdout
|
||||
cmd.Stderr = &stderr
|
||||
|
||||
err := cmd.Run()
|
||||
|
||||
result.Stdout = stdout.String()
|
||||
result.Stderr = stderr.String()
|
||||
|
||||
if err != nil {
|
||||
if exitErr, ok := err.(*exec.ExitError); ok {
|
||||
result.ExitCode = exitErr.ExitCode()
|
||||
result.Success = false
|
||||
} else if cmdCtx.Err() == context.DeadlineExceeded {
|
||||
result.Error = "command timed out"
|
||||
result.ExitCode = -1
|
||||
result.Success = false
|
||||
} else {
|
||||
result.Error = err.Error()
|
||||
result.ExitCode = -1
|
||||
result.Success = false
|
||||
}
|
||||
} else {
|
||||
result.ExitCode = 0
|
||||
result.Success = true
|
||||
}
|
||||
|
||||
// Truncate output if too large (1MB limit)
|
||||
const maxOutputSize = 1024 * 1024
|
||||
if len(result.Stdout) > maxOutputSize {
|
||||
result.Stdout = result.Stdout[:maxOutputSize] + "\n... (output truncated)"
|
||||
}
|
||||
if len(result.Stderr) > maxOutputSize {
|
||||
result.Stderr = result.Stderr[:maxOutputSize] + "\n... (output truncated)"
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// Close closes the command client connection
|
||||
func (c *CommandClient) Close() error {
|
||||
c.connMu.Lock()
|
||||
defer c.connMu.Unlock()
|
||||
|
||||
if c.conn != nil {
|
||||
return c.conn.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
2
mock.env
2
mock.env
@@ -1,4 +1,4 @@
|
||||
PULSE_MOCK_MODE=true
|
||||
PULSE_MOCK_MODE=false
|
||||
PULSE_MOCK_NODES=7
|
||||
PULSE_MOCK_VMS_PER_NODE=5
|
||||
PULSE_MOCK_LXCS_PER_NODE=8
|
||||
|
||||
@@ -78,7 +78,9 @@ if [[ -z ${PULSE_DEV_WS_URL:-} ]]; then
|
||||
fi
|
||||
fi
|
||||
|
||||
ALLOWED_ORIGINS="*"
|
||||
# Set specific allowed origin for CORS with credentials
|
||||
# Use the frontend dev URL so cross-port SSE requests work with auth cookies
|
||||
ALLOWED_ORIGINS="http://${PULSE_DEV_API_HOST:-127.0.0.1}:${FRONTEND_DEV_PORT:-7655}"
|
||||
|
||||
export FRONTEND_PORT PORT
|
||||
export FRONTEND_DEV_HOST FRONTEND_DEV_PORT
|
||||
|
||||
@@ -325,18 +325,26 @@ chmod +x "${INSTALL_DIR}/${BINARY_NAME}"
|
||||
|
||||
# --- Legacy Cleanup ---
|
||||
# Remove old agents if they exist to prevent conflicts
|
||||
# This is critical because legacy agents use the same system ID and will cause
|
||||
# connection conflicts (rapid connect/disconnect cycles) with the unified agent.
|
||||
log_info "Checking for legacy agents..."
|
||||
|
||||
# Legacy Host Agent
|
||||
# Kill any running legacy agent processes first (even if started manually)
|
||||
# This prevents WebSocket connection conflicts during installation
|
||||
pkill -f "pulse-host-agent" 2>/dev/null || true
|
||||
pkill -f "pulse-docker-agent" 2>/dev/null || true
|
||||
sleep 1
|
||||
|
||||
# Legacy Host Agent - systemd cleanup
|
||||
if command -v systemctl >/dev/null 2>&1; then
|
||||
if systemctl is-active --quiet pulse-host-agent 2>/dev/null || systemctl is-enabled --quiet pulse-host-agent 2>/dev/null; then
|
||||
if systemctl is-active --quiet pulse-host-agent 2>/dev/null || systemctl is-enabled --quiet pulse-host-agent 2>/dev/null || [[ -f /etc/systemd/system/pulse-host-agent.service ]]; then
|
||||
log_warn "Removing legacy pulse-host-agent..."
|
||||
systemctl stop pulse-host-agent 2>/dev/null || true
|
||||
systemctl disable pulse-host-agent 2>/dev/null || true
|
||||
rm -f /etc/systemd/system/pulse-host-agent.service
|
||||
rm -f /usr/local/bin/pulse-host-agent
|
||||
fi
|
||||
if systemctl is-active --quiet pulse-docker-agent 2>/dev/null || systemctl is-enabled --quiet pulse-docker-agent 2>/dev/null; then
|
||||
if systemctl is-active --quiet pulse-docker-agent 2>/dev/null || systemctl is-enabled --quiet pulse-docker-agent 2>/dev/null || [[ -f /etc/systemd/system/pulse-docker-agent.service ]]; then
|
||||
log_warn "Removing legacy pulse-docker-agent..."
|
||||
systemctl stop pulse-docker-agent 2>/dev/null || true
|
||||
systemctl disable pulse-docker-agent 2>/dev/null || true
|
||||
|
||||
Reference in New Issue
Block a user