mirror of
https://github.com/rcourtman/Pulse.git
synced 2026-02-18 00:17:39 +01:00
feat: Add manual Docker update check button. Related to #955
This commit is contained in:
@@ -623,8 +623,51 @@ export class MonitoringAPI {
|
||||
throw new Error((err as Error).message || 'Failed to parse update container response');
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Triggers an immediate update check for all containers on a specific Docker host.
|
||||
*/
|
||||
static async checkDockerUpdates(hostId: string): Promise<{ success: boolean; commandId?: string }> {
|
||||
const url = `${this.baseUrl}/agents/docker/hosts/${encodeURIComponent(hostId)}/check-updates`;
|
||||
|
||||
const response = await apiFetch(url, {
|
||||
method: 'POST',
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
let message = `Failed with status ${response.status}`;
|
||||
try {
|
||||
const text = await response.text();
|
||||
if (text?.trim()) {
|
||||
try {
|
||||
const parsed = JSON.parse(text);
|
||||
if (typeof parsed?.error === 'string' && parsed.error.trim()) {
|
||||
message = parsed.error.trim();
|
||||
}
|
||||
} catch (_jsonErr) {
|
||||
message = text.trim();
|
||||
}
|
||||
}
|
||||
} catch (_err) {
|
||||
// ignore read error
|
||||
}
|
||||
throw new Error(message);
|
||||
}
|
||||
|
||||
const text = await response.text();
|
||||
if (!text?.trim()) {
|
||||
return { success: true };
|
||||
}
|
||||
|
||||
try {
|
||||
return JSON.parse(text) as { success: boolean; commandId?: string };
|
||||
} catch (err) {
|
||||
throw new Error((err as Error).message || 'Failed to parse check updates response');
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
export interface DeleteDockerHostResponse {
|
||||
success?: boolean;
|
||||
hostId?: string;
|
||||
|
||||
@@ -18,6 +18,8 @@ interface DockerFilterProps {
|
||||
onClearHost?: () => void;
|
||||
updateAvailableCount?: number;
|
||||
onUpdateAll?: () => void;
|
||||
onCheckUpdates?: (hostId: string) => void;
|
||||
activeHostId?: string | null;
|
||||
}
|
||||
|
||||
const UpdateAllButton: Component<{ count: number; onUpdate: () => void }> = (props) => {
|
||||
@@ -56,6 +58,50 @@ const UpdateAllButton: Component<{ count: number; onUpdate: () => void }> = (pro
|
||||
);
|
||||
};
|
||||
|
||||
const RefreshButton: Component<{ onRefresh: () => void }> = (props) => {
|
||||
const [loading, setLoading] = createSignal(false);
|
||||
|
||||
const handleClick = async () => {
|
||||
if (loading()) return;
|
||||
setLoading(true);
|
||||
try {
|
||||
await props.onRefresh();
|
||||
} finally {
|
||||
// Keep it loading for a bit to show it happened
|
||||
setTimeout(() => setLoading(false), 1000);
|
||||
}
|
||||
};
|
||||
|
||||
return (
|
||||
<button
|
||||
type="button"
|
||||
onClick={handleClick}
|
||||
disabled={loading()}
|
||||
class={`flex items-center gap-1.5 px-3 py-1 text-xs font-medium rounded-lg transition-all ${loading()
|
||||
? 'bg-gray-100 text-gray-400 dark:bg-gray-800 dark:text-gray-500 cursor-not-allowed'
|
||||
: 'bg-indigo-50 text-indigo-700 dark:bg-indigo-900/40 dark:text-indigo-300 hover:bg-indigo-100 dark:hover:bg-indigo-900/60'
|
||||
}`}
|
||||
title="Force check for container updates on this host"
|
||||
>
|
||||
<svg
|
||||
class={`h-3.5 w-3.5 ${loading() ? 'animate-spin' : ''}`}
|
||||
fill="none"
|
||||
viewBox="0 0 24 24"
|
||||
stroke="currentColor"
|
||||
>
|
||||
<path
|
||||
stroke-linecap="round"
|
||||
stroke-linejoin="round"
|
||||
stroke-width="2"
|
||||
d="M4 4v5h.582m15.356 2A8.001 8.001 0 004.582 9m0 0H9m11 11v-5h-.581m0 0a8.003 8.003 0 01-15.357-2m15.357 2H15"
|
||||
/>
|
||||
</svg>
|
||||
<span>{loading() ? 'Checking...' : 'Check Updates'}</span>
|
||||
</button>
|
||||
);
|
||||
};
|
||||
|
||||
|
||||
export const DockerFilter: Component<DockerFilterProps> = (props) => {
|
||||
const historyManager = createSearchHistoryManager(STORAGE_KEYS.DOCKER_SEARCH_HISTORY);
|
||||
const [searchHistory, setSearchHistory] = createSignal<string[]>([]);
|
||||
@@ -441,6 +487,7 @@ export const DockerFilter: Component<DockerFilterProps> = (props) => {
|
||||
</div>
|
||||
</Show>
|
||||
|
||||
{/* Metrics View Toggle */}
|
||||
{/* Metrics View Toggle */}
|
||||
<MetricsViewToggle />
|
||||
|
||||
@@ -452,7 +499,16 @@ export const DockerFilter: Component<DockerFilterProps> = (props) => {
|
||||
/>
|
||||
</Show>
|
||||
|
||||
|
||||
<Show when={props.onCheckUpdates && props.activeHostId}>
|
||||
<div class="h-5 w-px bg-gray-200 dark:bg-gray-600 hidden sm:block" aria-hidden="true"></div>
|
||||
<RefreshButton
|
||||
onRefresh={() => props.onCheckUpdates!(props.activeHostId!)}
|
||||
/>
|
||||
</Show>
|
||||
|
||||
<Show when={hasActiveFilters()}>
|
||||
|
||||
<div class="h-5 w-px bg-gray-200 dark:bg-gray-600 hidden sm:block" aria-hidden="true"></div>
|
||||
<button
|
||||
type="button"
|
||||
|
||||
@@ -368,6 +368,9 @@ export const DockerHosts: Component<DockerHostsProps> = (props) => {
|
||||
return containers;
|
||||
});
|
||||
|
||||
// Track batch update status: key is hostId:containerId
|
||||
const [batchUpdateState, setBatchUpdateState] = createStore<Record<string, 'updating' | 'queued' | 'error'>>({});
|
||||
|
||||
const handleUpdateAll = async () => {
|
||||
const targets = updateableContainers();
|
||||
if (targets.length === 0) return;
|
||||
@@ -380,6 +383,11 @@ export const DockerHosts: Component<DockerHostsProps> = (props) => {
|
||||
10000,
|
||||
);
|
||||
|
||||
// Mark all as updating
|
||||
targets.forEach(t => {
|
||||
setBatchUpdateState(`${t.hostId}:${t.containerId}`, 'updating');
|
||||
});
|
||||
|
||||
let successCount = 0;
|
||||
let failCount = 0;
|
||||
|
||||
@@ -388,19 +396,19 @@ export const DockerHosts: Component<DockerHostsProps> = (props) => {
|
||||
for (let i = 0; i < targets.length; i += chunkSize) {
|
||||
const chunk = targets.slice(i, i + chunkSize);
|
||||
|
||||
// Update progress toast (if we had a way to update existing toasts, which we might not have in this simple util, but we can emit new ones or just rely on the final report)
|
||||
// For now, let's just process.
|
||||
|
||||
await Promise.all(chunk.map(async (target) => {
|
||||
const key = `${target.hostId}:${target.containerId}`;
|
||||
try {
|
||||
await MonitoringAPI.updateDockerContainer(
|
||||
target.hostId,
|
||||
target.containerId,
|
||||
target.containerName,
|
||||
);
|
||||
setBatchUpdateState(key, 'queued');
|
||||
successCount++;
|
||||
} catch (err) {
|
||||
failCount++;
|
||||
setBatchUpdateState(key, 'error');
|
||||
logger.error(`Failed to trigger update for ${target.containerName}`, err);
|
||||
}
|
||||
}));
|
||||
@@ -408,6 +416,15 @@ export const DockerHosts: Component<DockerHostsProps> = (props) => {
|
||||
|
||||
if (failCount === 0) {
|
||||
showSuccess(`Successfully queued updates for all ${targets.length} containers.`);
|
||||
// Clear success states after delay
|
||||
setTimeout(() => {
|
||||
targets.forEach(t => {
|
||||
const key = `${t.hostId}:${t.containerId}`;
|
||||
if (batchUpdateState[key] === 'queued') {
|
||||
setBatchUpdateState(key, undefined as any);
|
||||
}
|
||||
});
|
||||
}, 5000);
|
||||
} else if (successCount === 0) {
|
||||
showError(`Failed to queue any updates. Check console for details.`);
|
||||
} else {
|
||||
@@ -415,6 +432,15 @@ export const DockerHosts: Component<DockerHostsProps> = (props) => {
|
||||
}
|
||||
};
|
||||
|
||||
const handleCheckUpdates = async (hostId: string) => {
|
||||
try {
|
||||
await MonitoringAPI.checkDockerUpdates(hostId);
|
||||
showSuccess('Update check triggered. The host will refresh container information shortly.');
|
||||
} catch (err) {
|
||||
showError(`Failed to trigger update check: ${err instanceof Error ? err.message : String(err)}`);
|
||||
}
|
||||
};
|
||||
|
||||
const renderFilter = () => (
|
||||
<DockerFilter
|
||||
search={search}
|
||||
@@ -431,9 +457,12 @@ export const DockerHosts: Component<DockerHostsProps> = (props) => {
|
||||
}}
|
||||
updateAvailableCount={updateableContainers().length}
|
||||
onUpdateAll={handleUpdateAll}
|
||||
onCheckUpdates={handleCheckUpdates}
|
||||
activeHostId={selectedHostId()}
|
||||
/>
|
||||
);
|
||||
|
||||
|
||||
return (
|
||||
<div class="space-y-0">
|
||||
<Show when={isLoading()}>
|
||||
@@ -558,6 +587,7 @@ export const DockerHosts: Component<DockerHostsProps> = (props) => {
|
||||
dockerMetadata={dockerMetadata()}
|
||||
dockerHostMetadata={dockerHostMetadata()}
|
||||
onCustomUrlUpdate={handleCustomUrlUpdate}
|
||||
batchUpdateState={batchUpdateState}
|
||||
/>
|
||||
</Show>
|
||||
</Show>
|
||||
|
||||
@@ -75,6 +75,7 @@ interface DockerUnifiedTableProps {
|
||||
dockerMetadata?: Record<string, DockerMetadata>;
|
||||
dockerHostMetadata?: Record<string, DockerHostMetadata>;
|
||||
onCustomUrlUpdate?: (resourceId: string, url: string) => void;
|
||||
batchUpdateState?: Record<string, 'updating' | 'queued' | 'error'>;
|
||||
}
|
||||
|
||||
type SortKey =
|
||||
@@ -802,6 +803,7 @@ const DockerContainerRow: Component<{
|
||||
resourceIndentClass?: string;
|
||||
aiEnabled?: boolean;
|
||||
initialNotes?: string[];
|
||||
batchUpdateState?: Record<string, 'updating' | 'queued' | 'error'>;
|
||||
}> = (props) => {
|
||||
const { host, container } = props.row;
|
||||
const runtimeInfo = resolveHostRuntime(host);
|
||||
@@ -822,6 +824,12 @@ const DockerContainerRow: Component<{
|
||||
});
|
||||
let urlInputRef: HTMLInputElement | undefined;
|
||||
|
||||
const batchState = createMemo(() => {
|
||||
if (!props.batchUpdateState) return undefined;
|
||||
const key = `${host.id}:${container.id}`;
|
||||
return props.batchUpdateState[key];
|
||||
});
|
||||
|
||||
// Annotations and AI state - use props passed from parent to avoid per-row API calls
|
||||
const aiEnabled = () => props.aiEnabled ?? false;
|
||||
// Check if this container is in AI context
|
||||
@@ -1310,6 +1318,7 @@ const DockerContainerRow: Component<{
|
||||
containerId={container.id}
|
||||
containerName={container.name}
|
||||
compact
|
||||
externalState={batchState()}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
@@ -2786,6 +2795,7 @@ const DockerUnifiedTable: Component<DockerUnifiedTableProps> = (props) => {
|
||||
resourceIndentClass={grouped ? GROUPED_RESOURCE_INDENT : UNGROUPED_RESOURCE_INDENT}
|
||||
aiEnabled={aiEnabled()}
|
||||
initialNotes={metadata?.notes}
|
||||
batchUpdateState={props.batchUpdateState}
|
||||
/>
|
||||
|
||||
) : (
|
||||
|
||||
@@ -1,7 +1,15 @@
|
||||
import { Component, Show, createSignal } from 'solid-js';
|
||||
import { Component, Show, createSignal, createEffect, createMemo } from 'solid-js';
|
||||
import type { DockerContainerUpdateStatus } from '@/types/api';
|
||||
import { showTooltip, hideTooltip } from '@/components/shared/Tooltip';
|
||||
import { MonitoringAPI } from '@/api/monitoring';
|
||||
import {
|
||||
getContainerUpdateState,
|
||||
markContainerQueued,
|
||||
markContainerUpdateSuccess,
|
||||
markContainerUpdateError,
|
||||
clearContainerUpdateState,
|
||||
updateStates
|
||||
} from '@/stores/containerUpdates';
|
||||
|
||||
|
||||
interface UpdateBadgeProps {
|
||||
@@ -115,48 +123,93 @@ interface UpdateButtonProps {
|
||||
containerName: string;
|
||||
compact?: boolean;
|
||||
onUpdateTriggered?: () => void;
|
||||
externalState?: 'updating' | 'queued' | 'error';
|
||||
}
|
||||
|
||||
type UpdateState = 'idle' | 'confirming' | 'updating' | 'success' | 'error';
|
||||
|
||||
/**
|
||||
* UpdateButton displays a clickable button to trigger container updates.
|
||||
* Includes confirmation, loading states, and error handling.
|
||||
* Uses a persistent store to maintain state across WebSocket refreshes.
|
||||
*/
|
||||
export const UpdateButton: Component<UpdateButtonProps> = (props) => {
|
||||
const [state, setState] = createSignal<UpdateState>('idle');
|
||||
const [localState, setLocalState] = createSignal<'idle' | 'confirming'>('idle');
|
||||
const [errorMessage, setErrorMessage] = createSignal<string>('');
|
||||
|
||||
const hasUpdate = () => props.updateStatus?.updateAvailable === true;
|
||||
// Get persistent state from store - this survives WebSocket updates
|
||||
const storeState = createMemo(() => {
|
||||
// Access updateStates() to create reactive dependency
|
||||
updateStates();
|
||||
return getContainerUpdateState(props.hostId, props.containerId);
|
||||
});
|
||||
|
||||
// Derived state: check store first, then external prop, then local state
|
||||
const currentState = (): UpdateState => {
|
||||
const stored = storeState();
|
||||
if (stored) {
|
||||
switch (stored.state) {
|
||||
case 'queued':
|
||||
case 'updating':
|
||||
return 'updating';
|
||||
case 'success':
|
||||
return 'success';
|
||||
case 'error':
|
||||
return 'error';
|
||||
}
|
||||
}
|
||||
if (props.externalState === 'updating') return 'updating';
|
||||
if (props.externalState === 'queued') return 'updating';
|
||||
if (props.externalState === 'error') return 'error';
|
||||
return localState();
|
||||
};
|
||||
|
||||
// Watch for update completion - when updateAvailable becomes false, the update succeeded
|
||||
createEffect(() => {
|
||||
const stored = storeState();
|
||||
if (stored && (stored.state === 'queued' || stored.state === 'updating')) {
|
||||
// If the container no longer has an update available, the update succeeded!
|
||||
if (props.updateStatus?.updateAvailable === false) {
|
||||
markContainerUpdateSuccess(props.hostId, props.containerId);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
const hasUpdate = () => props.updateStatus?.updateAvailable === true || currentState() !== 'idle';
|
||||
|
||||
const handleClick = async (e: MouseEvent) => {
|
||||
e.stopPropagation();
|
||||
e.preventDefault();
|
||||
|
||||
if (state() === 'idle') {
|
||||
const state = currentState();
|
||||
|
||||
// Prevent clicking if already updating
|
||||
if (state === 'updating' || state === 'success' || state === 'error') return;
|
||||
|
||||
if (state === 'idle') {
|
||||
// Show confirmation
|
||||
setState('confirming');
|
||||
setLocalState('confirming');
|
||||
return;
|
||||
}
|
||||
|
||||
if (state() === 'confirming') {
|
||||
if (state === 'confirming') {
|
||||
// User confirmed, trigger update
|
||||
setState('updating');
|
||||
// Immediately set store state so it persists
|
||||
markContainerQueued(props.hostId, props.containerId);
|
||||
setLocalState('idle'); // Reset local state
|
||||
|
||||
try {
|
||||
await MonitoringAPI.updateDockerContainer(
|
||||
props.hostId,
|
||||
props.containerId,
|
||||
props.containerName
|
||||
);
|
||||
setState('success');
|
||||
// Command queued successfully - store already has 'queued' state
|
||||
// The effect above will detect when updateAvailable becomes false
|
||||
props.onUpdateTriggered?.();
|
||||
// Reset after 3 seconds
|
||||
setTimeout(() => setState('idle'), 3000);
|
||||
} catch (err) {
|
||||
setErrorMessage((err as Error).message || 'Failed to trigger update');
|
||||
setState('error');
|
||||
// Reset after 5 seconds
|
||||
setTimeout(() => setState('idle'), 5000);
|
||||
const message = (err as Error).message || 'Failed to trigger update';
|
||||
setErrorMessage(message);
|
||||
markContainerUpdateError(props.hostId, props.containerId, message);
|
||||
}
|
||||
}
|
||||
};
|
||||
@@ -164,12 +217,14 @@ export const UpdateButton: Component<UpdateButtonProps> = (props) => {
|
||||
const handleCancel = (e: MouseEvent) => {
|
||||
e.stopPropagation();
|
||||
e.preventDefault();
|
||||
setState('idle');
|
||||
setLocalState('idle');
|
||||
// Also clear any store state if canceling
|
||||
clearContainerUpdateState(props.hostId, props.containerId);
|
||||
};
|
||||
|
||||
const getButtonClass = () => {
|
||||
const base = 'inline-flex items-center gap-1 rounded-full px-2 py-0.5 text-xs font-medium transition-all';
|
||||
switch (state()) {
|
||||
switch (currentState()) {
|
||||
case 'confirming':
|
||||
return `${base} bg-amber-100 text-amber-700 dark:bg-amber-900/40 dark:text-amber-300 cursor-pointer hover:bg-amber-200 dark:hover:bg-amber-900/60`;
|
||||
case 'updating':
|
||||
@@ -184,15 +239,23 @@ export const UpdateButton: Component<UpdateButtonProps> = (props) => {
|
||||
};
|
||||
|
||||
const getTooltip = () => {
|
||||
switch (state()) {
|
||||
const stored = storeState();
|
||||
switch (currentState()) {
|
||||
case 'confirming':
|
||||
return 'Click again to confirm update';
|
||||
case 'updating':
|
||||
return 'Update in progress...';
|
||||
case 'updating': {
|
||||
const elapsed = stored ? Math.round((Date.now() - stored.startedAt) / 1000) : 0;
|
||||
// Show the current step if available from backend
|
||||
const step = stored?.message || 'Processing...';
|
||||
if (elapsed > 60) {
|
||||
return `${step} (${Math.floor(elapsed / 60)}m ${elapsed % 60}s)`;
|
||||
}
|
||||
return `${step} (${elapsed}s)`;
|
||||
}
|
||||
case 'success':
|
||||
return 'Update command sent! Container will restart shortly.';
|
||||
return '✓ Update completed successfully!';
|
||||
case 'error':
|
||||
return `Error: ${errorMessage()}`;
|
||||
return `✗ Update failed: ${stored?.message || errorMessage() || 'Unknown error'}`;
|
||||
default:
|
||||
if (!props.updateStatus) return 'Update container';
|
||||
const current = props.updateStatus.currentDigest?.slice(0, 12) || 'unknown';
|
||||
@@ -209,7 +272,7 @@ export const UpdateButton: Component<UpdateButtonProps> = (props) => {
|
||||
class={getButtonClass()}
|
||||
onClick={handleClick}
|
||||
onMouseDown={(e) => { e.stopPropagation(); }}
|
||||
disabled={state() === 'updating'}
|
||||
disabled={currentState() === 'updating'}
|
||||
data-prevent-toggle
|
||||
onMouseEnter={(e) => {
|
||||
const rect = e.currentTarget.getBoundingClientRect();
|
||||
@@ -220,26 +283,26 @@ export const UpdateButton: Component<UpdateButtonProps> = (props) => {
|
||||
}}
|
||||
onMouseLeave={() => hideTooltip()}
|
||||
>
|
||||
<Show when={state() === 'updating'}>
|
||||
<Show when={currentState() === 'updating'}>
|
||||
{/* Spinner */}
|
||||
<svg class="w-3 h-3 animate-spin" fill="none" viewBox="0 0 24 24">
|
||||
<circle class="opacity-25" cx="12" cy="12" r="10" stroke="currentColor" stroke-width="4" />
|
||||
<path class="opacity-75" fill="currentColor" d="M4 12a8 8 0 018-8V0C5.373 0 0 5.373 0 12h4zm2 5.291A7.962 7.962 0 014 12H0c0 3.042 1.135 5.824 3 7.938l3-2.647z" />
|
||||
</svg>
|
||||
</Show>
|
||||
<Show when={state() === 'success'}>
|
||||
<Show when={currentState() === 'success'}>
|
||||
{/* Check icon */}
|
||||
<svg class="w-3 h-3" fill="none" viewBox="0 0 24 24" stroke="currentColor" stroke-width="2">
|
||||
<path stroke-linecap="round" stroke-linejoin="round" d="M5 13l4 4L19 7" />
|
||||
</svg>
|
||||
</Show>
|
||||
<Show when={state() === 'error'}>
|
||||
<Show when={currentState() === 'error'}>
|
||||
{/* X icon */}
|
||||
<svg class="w-3 h-3" fill="none" viewBox="0 0 24 24" stroke="currentColor" stroke-width="2">
|
||||
<path stroke-linecap="round" stroke-linejoin="round" d="M6 18L18 6M6 6l12 12" />
|
||||
</svg>
|
||||
</Show>
|
||||
<Show when={state() === 'idle' || state() === 'confirming'}>
|
||||
<Show when={currentState() === 'idle' || currentState() === 'confirming'}>
|
||||
{/* Upload/update icon */}
|
||||
<svg class="w-3 h-3" fill="none" viewBox="0 0 24 24" stroke="currentColor" stroke-width="2">
|
||||
<path stroke-linecap="round" stroke-linejoin="round" d="M4 16v1a3 3 0 003 3h10a3 3 0 003-3v-1m-4-8l-4-4m0 0L8 8m4-4v12" />
|
||||
@@ -247,14 +310,14 @@ export const UpdateButton: Component<UpdateButtonProps> = (props) => {
|
||||
</Show>
|
||||
<Show when={!props.compact}>
|
||||
<span>
|
||||
{state() === 'confirming' ? 'Confirm?' :
|
||||
state() === 'updating' ? 'Updating...' :
|
||||
state() === 'success' ? 'Queued!' :
|
||||
state() === 'error' ? 'Failed' : 'Update'}
|
||||
{currentState() === 'confirming' ? 'Confirm?' :
|
||||
currentState() === 'updating' ? 'Updating...' :
|
||||
currentState() === 'success' ? 'Queued!' :
|
||||
currentState() === 'error' ? 'Failed' : 'Update'}
|
||||
</span>
|
||||
</Show>
|
||||
</button>
|
||||
<Show when={state() === 'confirming'}>
|
||||
<Show when={currentState() === 'confirming'}>
|
||||
<button
|
||||
type="button"
|
||||
class="inline-flex items-center justify-center w-5 h-5 rounded-full bg-gray-200 text-gray-600 dark:bg-gray-700 dark:text-gray-300 hover:bg-gray-300 dark:hover:bg-gray-600 transition-colors"
|
||||
|
||||
184
frontend-modern/src/stores/containerUpdates.ts
Normal file
184
frontend-modern/src/stores/containerUpdates.ts
Normal file
@@ -0,0 +1,184 @@
|
||||
/**
|
||||
* Container Update Store
|
||||
*
|
||||
* Tracks the state of container updates across WebSocket refreshes.
|
||||
* This ensures the UI shows consistent "updating" state until the update completes.
|
||||
*/
|
||||
import { createSignal } from 'solid-js';
|
||||
import type { DockerHostCommand } from '@/types/api';
|
||||
|
||||
export type ContainerUpdateState = 'queued' | 'updating' | 'success' | 'error';
|
||||
|
||||
interface UpdateEntry {
|
||||
state: ContainerUpdateState;
|
||||
startedAt: number;
|
||||
message?: string;
|
||||
commandId?: string;
|
||||
// Real-time progress from backend
|
||||
backendStatus?: string;
|
||||
acknowledgedAt?: number;
|
||||
}
|
||||
|
||||
// Global store for container update states
|
||||
// Key format: "hostId:containerId"
|
||||
const [updateStates, setUpdateStates] = createSignal<Record<string, UpdateEntry>>({});
|
||||
|
||||
/**
|
||||
* Get the update state for a specific container
|
||||
*/
|
||||
export function getContainerUpdateState(hostId: string, containerId: string): UpdateEntry | undefined {
|
||||
const key = `${hostId}:${containerId}`;
|
||||
return updateStates()[key];
|
||||
}
|
||||
|
||||
/**
|
||||
* Mark a container as updating
|
||||
*/
|
||||
export function markContainerUpdating(hostId: string, containerId: string, commandId?: string): void {
|
||||
const key = `${hostId}:${containerId}`;
|
||||
setUpdateStates(prev => ({
|
||||
...prev,
|
||||
[key]: {
|
||||
state: 'updating',
|
||||
startedAt: Date.now(),
|
||||
commandId,
|
||||
}
|
||||
}));
|
||||
}
|
||||
|
||||
/**
|
||||
* Mark a container update as queued (command sent, waiting for agent)
|
||||
*/
|
||||
export function markContainerQueued(hostId: string, containerId: string, commandId?: string): void {
|
||||
const key = `${hostId}:${containerId}`;
|
||||
setUpdateStates(prev => ({
|
||||
...prev,
|
||||
[key]: {
|
||||
state: 'queued',
|
||||
startedAt: Date.now(),
|
||||
commandId,
|
||||
}
|
||||
}));
|
||||
}
|
||||
|
||||
/**
|
||||
* Sync container update state with backend command status from WebSocket.
|
||||
* This provides real-time progress tracking.
|
||||
*/
|
||||
export function syncWithHostCommand(hostId: string, command: DockerHostCommand | undefined): void {
|
||||
if (!command) return;
|
||||
|
||||
// Only update if we have a matching entry or if the command is for update_container
|
||||
if (command.type !== 'update_container') return;
|
||||
|
||||
const containerId = command.id.split(':')[1] || ''; // Extract containerId if encoded in commandID
|
||||
const key = `${hostId}:${containerId}`;
|
||||
|
||||
// Check if we're tracking this update
|
||||
const existing = updateStates()[key];
|
||||
if (!existing) return;
|
||||
|
||||
// Update based on backend status
|
||||
if (command.status === 'completed' || command.completedAt) {
|
||||
markContainerUpdateSuccess(hostId, containerId);
|
||||
} else if (command.status === 'failed' || command.failedAt) {
|
||||
markContainerUpdateError(hostId, containerId, command.failureReason || command.message);
|
||||
} else if (command.status === 'in_progress' || command.acknowledgedAt) {
|
||||
// Agent is actively working on the update - show the current step
|
||||
setUpdateStates(prev => ({
|
||||
...prev,
|
||||
[key]: {
|
||||
...prev[key],
|
||||
state: 'updating',
|
||||
backendStatus: command.status,
|
||||
acknowledgedAt: command.acknowledgedAt,
|
||||
message: command.message, // This contains the current step (e.g., "Pulling image...")
|
||||
}
|
||||
}));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Mark a container update as successful
|
||||
*/
|
||||
export function markContainerUpdateSuccess(hostId: string, containerId: string): void {
|
||||
const key = `${hostId}:${containerId}`;
|
||||
setUpdateStates(prev => ({
|
||||
...prev,
|
||||
[key]: {
|
||||
state: 'success',
|
||||
startedAt: prev[key]?.startedAt || Date.now(),
|
||||
}
|
||||
}));
|
||||
|
||||
// Auto-clear success state after 5 seconds
|
||||
setTimeout(() => {
|
||||
clearContainerUpdateState(hostId, containerId);
|
||||
}, 5000);
|
||||
}
|
||||
|
||||
/**
|
||||
* Mark a container update as failed
|
||||
*/
|
||||
export function markContainerUpdateError(hostId: string, containerId: string, message?: string): void {
|
||||
const key = `${hostId}:${containerId}`;
|
||||
setUpdateStates(prev => ({
|
||||
...prev,
|
||||
[key]: {
|
||||
state: 'error',
|
||||
startedAt: prev[key]?.startedAt || Date.now(),
|
||||
message,
|
||||
}
|
||||
}));
|
||||
|
||||
// Auto-clear error state after 10 seconds
|
||||
setTimeout(() => {
|
||||
clearContainerUpdateState(hostId, containerId);
|
||||
}, 10000);
|
||||
}
|
||||
|
||||
/**
|
||||
* Clear the update state for a container
|
||||
*/
|
||||
export function clearContainerUpdateState(hostId: string, containerId: string): void {
|
||||
const key = `${hostId}:${containerId}`;
|
||||
setUpdateStates(prev => {
|
||||
const next = { ...prev };
|
||||
delete next[key];
|
||||
return next;
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if update state is stale (older than 5 minutes) and should be auto-cleared
|
||||
*/
|
||||
export function cleanupStaleUpdates(): void {
|
||||
const now = Date.now();
|
||||
const staleThreshold = 5 * 60 * 1000; // 5 minutes
|
||||
|
||||
setUpdateStates(prev => {
|
||||
const next: Record<string, UpdateEntry> = {};
|
||||
for (const [key, entry] of Object.entries(prev)) {
|
||||
if (now - entry.startedAt < staleThreshold) {
|
||||
next[key] = entry;
|
||||
}
|
||||
}
|
||||
return next;
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all current update states (useful for debugging)
|
||||
*/
|
||||
export function getAllUpdateStates(): Record<string, UpdateEntry> {
|
||||
return updateStates();
|
||||
}
|
||||
|
||||
/**
|
||||
* Reactive accessor for all update states
|
||||
*/
|
||||
export { updateStates };
|
||||
|
||||
// Cleanup stale entries every minute
|
||||
setInterval(cleanupStaleUpdates, 60000);
|
||||
|
||||
@@ -22,6 +22,7 @@ import { eventBus } from './events';
|
||||
import { ALERTS_ACTIVATION_EVENT, isAlertsActivationEnabled } from '@/utils/alertsActivation';
|
||||
import { pruneMetricsByPrefix } from './metricsHistory';
|
||||
import { getMetricKeyPrefix } from '@/utils/metricsKeys';
|
||||
import { syncWithHostCommand } from './containerUpdates';
|
||||
|
||||
// Type-safe WebSocket store
|
||||
export function createWebSocketStore(url: string) {
|
||||
@@ -581,6 +582,15 @@ export function createWebSocketStore(url: string) {
|
||||
setState('hosts', reconcile(processedHosts, { key: 'id' }));
|
||||
}
|
||||
}
|
||||
|
||||
// Sync container update states with host command statuses for real-time progress
|
||||
if (shouldApplyDockerHosts && processedDockerHosts !== null) {
|
||||
processedDockerHosts.forEach((host: DockerHost) => {
|
||||
if (host.command) {
|
||||
syncWithHostCommand(host.id, host.command);
|
||||
}
|
||||
});
|
||||
}
|
||||
if (message.data.removedDockerHosts !== undefined) {
|
||||
const removed = Array.isArray(message.data.removedDockerHosts)
|
||||
? (message.data.removedDockerHosts as RemovedDockerHost[])
|
||||
|
||||
@@ -145,6 +145,12 @@ func (h *DockerAgentHandlers) HandleDockerHostActions(w http.ResponseWriter, r *
|
||||
h.HandleSetCustomDisplayName(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
// Check if this is a check updates request
|
||||
if strings.HasSuffix(r.URL.Path, "/check-updates") && r.Method == http.MethodPost {
|
||||
h.HandleCheckUpdates(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
// Otherwise, handle as delete/hide request
|
||||
if r.Method == http.MethodDelete {
|
||||
@@ -514,3 +520,40 @@ func (h *DockerAgentHandlers) HandleContainerUpdate(w http.ResponseWriter, r *ht
|
||||
}
|
||||
}
|
||||
|
||||
// HandleCheckUpdates triggers an immediate update check for all containers on a Docker host.
|
||||
// POST /api/agents/docker/hosts/{hostId}/check-updates
|
||||
func (h *DockerAgentHandlers) HandleCheckUpdates(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != http.MethodPost {
|
||||
writeErrorResponse(w, http.StatusMethodNotAllowed, "method_not_allowed", "Only POST is allowed", nil)
|
||||
return
|
||||
}
|
||||
|
||||
trimmedPath := strings.TrimPrefix(r.URL.Path, "/api/agents/docker/hosts/")
|
||||
trimmedPath = strings.TrimSuffix(trimmedPath, "/check-updates")
|
||||
hostID := strings.TrimSpace(trimmedPath)
|
||||
if hostID == "" {
|
||||
writeErrorResponse(w, http.StatusBadRequest, "missing_host_id", "Docker host ID is required", nil)
|
||||
return
|
||||
}
|
||||
|
||||
// Queue the check updates command
|
||||
commandStatus, err := h.monitor.QueueDockerCheckUpdatesCommand(hostID)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, http.StatusBadRequest, "check_updates_command_failed", err.Error(), nil)
|
||||
return
|
||||
}
|
||||
|
||||
go h.wsHub.BroadcastState(h.monitor.GetState().ToFrontend())
|
||||
|
||||
if err := utils.WriteJSONResponse(w, map[string]any{
|
||||
"success": true,
|
||||
"commandId": commandStatus.ID,
|
||||
"hostId": hostID,
|
||||
"message": "Check for updates command queued",
|
||||
"note": "The agent will clear its registry cache and check for updates on the next report cycle",
|
||||
}); err != nil {
|
||||
log.Error().Err(err).Msg("Failed to serialize check updates response")
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -182,26 +182,63 @@ func (h *UpdateDetectionHandlers) HandleTriggerInfraUpdateCheck(w http.ResponseW
|
||||
return
|
||||
}
|
||||
|
||||
// For now, return a placeholder response - the actual check will be performed
|
||||
// by agents on their next cycle
|
||||
response := map[string]any{
|
||||
"success": true,
|
||||
"message": "Update check will be performed on next agent report cycle",
|
||||
"note": "Docker agents check registries periodically (every 6 hours by default)",
|
||||
}
|
||||
|
||||
// Handle host-level check
|
||||
if req.HostID != "" {
|
||||
response["hostId"] = req.HostID
|
||||
}
|
||||
if req.ResourceID != "" {
|
||||
response["resourceId"] = req.ResourceID
|
||||
commandStatus, err := h.monitor.QueueDockerCheckUpdatesCommand(req.HostID)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, http.StatusBadRequest, "check_updates_failed", err.Error(), nil)
|
||||
return
|
||||
}
|
||||
|
||||
if err := utils.WriteJSONResponse(w, map[string]any{
|
||||
"success": true,
|
||||
"commandId": commandStatus.ID,
|
||||
"hostId": req.HostID,
|
||||
"message": "Update check command queued for host",
|
||||
}); err != nil {
|
||||
log.Error().Err(err).Msg("Failed to serialize check response")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if err := utils.WriteJSONResponse(w, response); err != nil {
|
||||
log.Error().Err(err).Msg("Failed to serialize check response")
|
||||
// Handle resource-level check (currently we just check the whole host)
|
||||
if req.ResourceID != "" {
|
||||
// Try to find which host this resource belongs to
|
||||
updates := h.collectDockerUpdates("")
|
||||
var hostID string
|
||||
for _, update := range updates {
|
||||
if update.ContainerID == req.ResourceID || ("docker:"+update.HostID+"/"+update.ContainerID) == req.ResourceID {
|
||||
hostID = update.HostID
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if hostID == "" {
|
||||
writeErrorResponse(w, http.StatusNotFound, "not_found", "Resource not found or has no update status", nil)
|
||||
return
|
||||
}
|
||||
|
||||
commandStatus, err := h.monitor.QueueDockerCheckUpdatesCommand(hostID)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, http.StatusBadRequest, "check_updates_failed", err.Error(), nil)
|
||||
return
|
||||
}
|
||||
|
||||
if err := utils.WriteJSONResponse(w, map[string]any{
|
||||
"success": true,
|
||||
"commandId": commandStatus.ID,
|
||||
"hostId": hostID,
|
||||
"message": "Update check command queued for host",
|
||||
}); err != nil {
|
||||
log.Error().Err(err).Msg("Failed to serialize check response")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
writeErrorResponse(w, http.StatusBadRequest, "missing_params", "Either hostId or resourceId is required", nil)
|
||||
}
|
||||
|
||||
|
||||
// HandleGetInfraUpdatesForHost returns all updates for a specific host.
|
||||
// GET /api/infra-updates/host/{hostId}
|
||||
func (h *UpdateDetectionHandlers) HandleGetInfraUpdatesForHost(w http.ResponseWriter, r *http.Request, hostID string) {
|
||||
|
||||
@@ -530,6 +530,10 @@ func (a *Agent) Run(ctx context.Context) error {
|
||||
updateTimer := newTimerFn(initialDelay)
|
||||
defer stopTimer(updateTimer)
|
||||
|
||||
// Periodic cleanup of orphaned backups (every 15 minutes)
|
||||
cleanupTicker := newTickerFn(15 * time.Minute)
|
||||
defer cleanupTicker.Stop()
|
||||
|
||||
// Perform cleanup of orphaned backup containers on startup
|
||||
go a.cleanupOrphanedBackups(ctx)
|
||||
|
||||
@@ -559,6 +563,8 @@ func (a *Agent) Run(ctx context.Context) error {
|
||||
nextDelay = updateInterval
|
||||
}
|
||||
updateTimer.Reset(nextDelay)
|
||||
case <-cleanupTicker.C:
|
||||
go a.cleanupOrphanedBackups(ctx)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -730,12 +736,37 @@ func (a *Agent) handleCommand(ctx context.Context, target TargetConfig, command
|
||||
return a.handleStopCommand(ctx, target, command)
|
||||
case agentsdocker.CommandTypeUpdateContainer:
|
||||
return a.handleUpdateContainerCommand(ctx, target, command)
|
||||
case agentsdocker.CommandTypeCheckUpdates:
|
||||
return a.handleCheckUpdatesCommand(ctx, target, command)
|
||||
default:
|
||||
a.logger.Warn().Str("command", command.Type).Msg("Received unsupported control command")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Agent) handleCheckUpdatesCommand(ctx context.Context, target TargetConfig, command agentsdocker.Command) error {
|
||||
a.logger.Info().Str("commandID", command.ID).Msg("Received check updates command from Pulse")
|
||||
|
||||
if a.registryChecker != nil {
|
||||
a.registryChecker.ForceCheck()
|
||||
}
|
||||
|
||||
// Send intermediate completion ack
|
||||
if err := a.sendCommandAck(ctx, target, command.ID, agentsdocker.CommandStatusCompleted, "Registry cache cleared; checking for updates on next report cycle"); err != nil {
|
||||
return fmt.Errorf("send check updates acknowledgement: %w", err)
|
||||
}
|
||||
|
||||
// Trigger an immediate collection cycle to report updates
|
||||
go func() {
|
||||
// Small delay to ensure the ack response completes
|
||||
sleepFn(1 * time.Second)
|
||||
a.collectOnce(ctx)
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
func (a *Agent) handleStopCommand(ctx context.Context, target TargetConfig, command agentsdocker.Command) error {
|
||||
a.logger.Info().Str("commandID", command.ID).Msg("Received stop command from Pulse")
|
||||
|
||||
@@ -936,12 +967,29 @@ func readMachineID() (string, error) {
|
||||
for _, path := range machineIDPaths {
|
||||
data, err := osReadFileFn(path)
|
||||
if err == nil {
|
||||
return strings.TrimSpace(string(data)), nil
|
||||
id := strings.TrimSpace(string(data))
|
||||
// Format as UUID if it's a 32-char hex string (like machine-id typically is),
|
||||
// to match the behavior of the host agent.
|
||||
if len(id) == 32 && isHexString(id) {
|
||||
return fmt.Sprintf("%s-%s-%s-%s-%s",
|
||||
id[0:8], id[8:12], id[12:16],
|
||||
id[16:20], id[20:32]), nil
|
||||
}
|
||||
return id, nil
|
||||
}
|
||||
}
|
||||
return "", errors.New("machine-id not found")
|
||||
}
|
||||
|
||||
func isHexString(s string) bool {
|
||||
for _, c := range s {
|
||||
if !((c >= '0' && c <= '9') || (c >= 'a' && c <= 'f') || (c >= 'A' && c <= 'F')) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func readSystemUptime() int64 {
|
||||
seconds, err := readProcUptime()
|
||||
if err != nil {
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
)
|
||||
|
||||
// cleanupOrphanedBackups searches for and removes any Pulse backup containers
|
||||
// (created during updates) that are older than 1 hour.
|
||||
// (created during updates) that are older than 15 minutes.
|
||||
func (a *Agent) cleanupOrphanedBackups(ctx context.Context) {
|
||||
a.logger.Debug().Msg("Checking for orphaned backup containers")
|
||||
|
||||
@@ -37,12 +37,28 @@ func (a *Agent) cleanupOrphanedBackups(ctx context.Context) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Check age
|
||||
created := time.Unix(c.Created, 0)
|
||||
if time.Since(created) > 1*time.Hour {
|
||||
// Check age based on the timestamp in the name, not the container's creation date
|
||||
// (Renaming a container does not change its creation date)
|
||||
parts := strings.Split(c.Names[0], "_pulse_backup_")
|
||||
if len(parts) < 2 {
|
||||
continue
|
||||
}
|
||||
|
||||
timestampStr := parts[len(parts)-1]
|
||||
backupTime, err := time.Parse("20060102_150405", timestampStr)
|
||||
if err != nil {
|
||||
// If we can't parse the timestamp, fall back to creation date as a safety
|
||||
created := time.Unix(c.Created, 0)
|
||||
if time.Since(created) < 15*time.Minute {
|
||||
continue
|
||||
}
|
||||
backupTime = created
|
||||
}
|
||||
|
||||
if time.Since(backupTime) > 15*time.Minute {
|
||||
a.logger.Info().
|
||||
Str("container", c.Names[0]).
|
||||
Time("created", created).
|
||||
Time("backupTime", backupTime).
|
||||
Msg("Removing orphaned backup container")
|
||||
|
||||
if err := a.docker.ContainerRemove(ctx, c.ID, container.RemoveOptions{Force: true}); err != nil {
|
||||
|
||||
@@ -55,10 +55,19 @@ func (a *Agent) buildReport(ctx context.Context) (agentsdocker.Report, error) {
|
||||
|
||||
agentID := a.cfg.AgentID
|
||||
if agentID == "" {
|
||||
// In unified mode, prefer machineID (which matches what hostagent uses)
|
||||
// over daemonID to ensure a single agent entry in the backend.
|
||||
// For standalone mode, we prefer daemonID for backward compatibility.
|
||||
if a.cfg.AgentType == "unified" {
|
||||
agentID = a.machineID
|
||||
}
|
||||
|
||||
// Use cached daemon ID from init rather than info.ID from current call.
|
||||
// Podman can return different/empty IDs across calls, causing token
|
||||
// binding conflicts on the server.
|
||||
agentID = a.daemonID
|
||||
if agentID == "" {
|
||||
agentID = a.daemonID
|
||||
}
|
||||
}
|
||||
if agentID == "" {
|
||||
agentID = a.machineID
|
||||
@@ -172,6 +181,18 @@ func (a *Agent) collectContainers(ctx context.Context) ([]agentsdocker.Container
|
||||
}
|
||||
}
|
||||
|
||||
// Skip backup containers created during updates - they're temporary
|
||||
isBackup := false
|
||||
for _, name := range summary.Names {
|
||||
if strings.Contains(name, "_pulse_backup_") {
|
||||
isBackup = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if isBackup {
|
||||
continue
|
||||
}
|
||||
|
||||
active[summary.ID] = struct{}{}
|
||||
|
||||
container, err := a.collectContainer(ctx, summary)
|
||||
@@ -360,19 +381,46 @@ func (a *Agent) collectContainer(ctx context.Context, summary containertypes.Sum
|
||||
// We also get the architecture details to correctly resolve manifest lists from the registry.
|
||||
digestForComparison, arch, os, variant := a.getImageRepoDigest(containerCtx, summary.ImageID, summary.Image)
|
||||
|
||||
if digestForComparison == "" {
|
||||
// Fall back to ImageID if we can't get RepoDigest (shouldn't compare as equal)
|
||||
digestForComparison = summary.ImageID
|
||||
var imageToCheck string
|
||||
// Always prefer the image name from inspect config as it's the authoritative source
|
||||
// and avoids issues with short IDs or digests in summary.
|
||||
// HOWEVER, if the config image IS a digest (starts with sha256:), fall back to container.Image
|
||||
// which usually contains the human-readable tag/name.
|
||||
imageToCheck = container.Image
|
||||
if inspect.Config != nil && inspect.Config.Image != "" {
|
||||
if !strings.HasPrefix(inspect.Config.Image, "sha256:") {
|
||||
imageToCheck = inspect.Config.Image
|
||||
}
|
||||
}
|
||||
|
||||
result := a.registryChecker.CheckImageUpdate(ctx, container.Image, digestForComparison, arch, os, variant)
|
||||
if result != nil {
|
||||
|
||||
// Additional safety: if imageToCheck is still a SHA, we can't check it
|
||||
if strings.HasPrefix(imageToCheck, "sha256:") {
|
||||
container.UpdateStatus = &agentsdocker.UpdateStatus{
|
||||
UpdateAvailable: result.UpdateAvailable,
|
||||
CurrentDigest: result.CurrentDigest,
|
||||
LatestDigest: result.LatestDigest,
|
||||
LastChecked: result.CheckedAt,
|
||||
Error: result.Error,
|
||||
UpdateAvailable: false,
|
||||
CurrentDigest: digestForComparison,
|
||||
LastChecked: time.Now(),
|
||||
Error: "digest-pinned image",
|
||||
}
|
||||
// Skip to end of update check block - don't call registry
|
||||
} else {
|
||||
a.logger.Debug().
|
||||
Str("container", container.Name).
|
||||
Str("image", imageToCheck).
|
||||
Str("compareDigest", digestForComparison).
|
||||
Str("arch", arch).
|
||||
Str("os", os).
|
||||
Str("variant", variant).
|
||||
Msg("Checking update for container")
|
||||
|
||||
result := a.registryChecker.CheckImageUpdate(ctx, imageToCheck, digestForComparison, arch, os, variant)
|
||||
if result != nil {
|
||||
container.UpdateStatus = &agentsdocker.UpdateStatus{
|
||||
UpdateAvailable: result.UpdateAvailable,
|
||||
CurrentDigest: result.CurrentDigest,
|
||||
LatestDigest: result.LatestDigest,
|
||||
LastChecked: result.CheckedAt,
|
||||
Error: result.Error,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -47,8 +47,16 @@ func (a *Agent) handleUpdateContainerCommand(ctx context.Context, target TargetC
|
||||
return nil
|
||||
}
|
||||
|
||||
// Perform the update
|
||||
result := a.updateContainer(ctx, containerID)
|
||||
// Create a progress callback to send step updates to Pulse
|
||||
progressFn := func(step string) {
|
||||
// Send progress update (using "in_progress" status with step message)
|
||||
if err := a.sendCommandAck(ctx, target, command.ID, agentsdocker.CommandStatusInProgress, step); err != nil {
|
||||
a.logger.Warn().Err(err).Str("step", step).Msg("Failed to send progress update")
|
||||
}
|
||||
}
|
||||
|
||||
// Perform the update with progress tracking
|
||||
result := a.updateContainerWithProgress(ctx, containerID, progressFn)
|
||||
|
||||
// Send completion status
|
||||
status := agentsdocker.CommandStatusCompleted
|
||||
@@ -65,7 +73,7 @@ func (a *Agent) handleUpdateContainerCommand(ctx context.Context, target TargetC
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateContainer performs the actual container update operation.
|
||||
// updateContainerWithProgress performs the actual container update operation with progress reporting.
|
||||
// This is the core logic that:
|
||||
// 1. Inspects the current container configuration
|
||||
// 2. Pulls the latest image
|
||||
@@ -73,11 +81,20 @@ func (a *Agent) handleUpdateContainerCommand(ctx context.Context, target TargetC
|
||||
// 4. Creates a new container with the same config
|
||||
// 5. Starts the new container
|
||||
// 6. Cleans up on success or rolls back on failure
|
||||
func (a *Agent) updateContainer(ctx context.Context, containerID string) ContainerUpdateResult {
|
||||
//
|
||||
// The progressFn callback is called at each step to report progress to Pulse.
|
||||
func (a *Agent) updateContainerWithProgress(ctx context.Context, containerID string, progressFn func(step string)) ContainerUpdateResult {
|
||||
result := ContainerUpdateResult{
|
||||
ContainerID: containerID,
|
||||
}
|
||||
|
||||
// Helper to report progress (handles nil progressFn)
|
||||
reportProgress := func(step string) {
|
||||
if progressFn != nil {
|
||||
progressFn(step)
|
||||
}
|
||||
}
|
||||
|
||||
// 1. Inspect the current container to get its full configuration
|
||||
inspect, err := a.docker.ContainerInspect(ctx, containerID)
|
||||
if err != nil {
|
||||
@@ -89,6 +106,15 @@ func (a *Agent) updateContainer(ctx context.Context, containerID string) Contain
|
||||
result.ContainerName = strings.TrimPrefix(inspect.Name, "/")
|
||||
result.OldImageDigest = inspect.Image
|
||||
|
||||
// Reject updates for backup containers (created during previous updates)
|
||||
if strings.Contains(result.ContainerName, "_pulse_backup_") {
|
||||
result.Error = "Cannot update backup containers - these are temporary and should be cleaned up"
|
||||
a.logger.Warn().
|
||||
Str("container", result.ContainerName).
|
||||
Msg("Rejecting update request for backup container")
|
||||
return result
|
||||
}
|
||||
|
||||
a.logger.Info().
|
||||
Str("container", result.ContainerName).
|
||||
Str("image", inspect.Config.Image).
|
||||
@@ -96,6 +122,7 @@ func (a *Agent) updateContainer(ctx context.Context, containerID string) Contain
|
||||
|
||||
// 2. Pull the latest image
|
||||
imageName := inspect.Config.Image
|
||||
reportProgress(fmt.Sprintf("Pulling image %s...", imageName))
|
||||
a.logger.Info().Str("image", imageName).Msg("Pulling latest image")
|
||||
|
||||
pullResp, err := a.docker.ImagePull(ctx, imageName, image.PullOptions{})
|
||||
@@ -111,6 +138,7 @@ func (a *Agent) updateContainer(ctx context.Context, containerID string) Contain
|
||||
a.logger.Info().Str("image", imageName).Msg("Successfully pulled latest image")
|
||||
|
||||
// 3. Stop the current container
|
||||
reportProgress(fmt.Sprintf("Stopping container %s...", result.ContainerName))
|
||||
stopTimeout := 30 // seconds
|
||||
if err := a.docker.ContainerStop(ctx, containerID, container.StopOptions{Timeout: &stopTimeout}); err != nil {
|
||||
result.Error = fmt.Sprintf("Failed to stop container: %v", err)
|
||||
@@ -134,6 +162,8 @@ func (a *Agent) updateContainer(ctx context.Context, containerID string) Contain
|
||||
result.BackupContainer = backupName
|
||||
a.logger.Info().Str("backup", backupName).Msg("Container renamed for backup")
|
||||
|
||||
reportProgress(fmt.Sprintf("Creating new container %s...", result.ContainerName))
|
||||
|
||||
// 5. Prepare network configuration
|
||||
// We need to handle network settings carefully
|
||||
var networkingConfig *network.NetworkingConfig
|
||||
@@ -199,6 +229,7 @@ func (a *Agent) updateContainer(ctx context.Context, containerID string) Contain
|
||||
}
|
||||
|
||||
// 8. Start the new container
|
||||
reportProgress(fmt.Sprintf("Starting container %s...", result.ContainerName))
|
||||
if err := a.docker.ContainerStart(ctx, newContainerID, container.StartOptions{}); err != nil {
|
||||
result.Error = fmt.Sprintf("Failed to start new container: %v", err)
|
||||
a.logger.Error().Err(err).Str("container", result.ContainerName).Msg("Failed to start new container")
|
||||
@@ -212,6 +243,7 @@ func (a *Agent) updateContainer(ctx context.Context, containerID string) Contain
|
||||
a.logger.Info().Str("container", result.ContainerName).Msg("New container started, verifying stability...")
|
||||
|
||||
// 9. Verify container stability
|
||||
reportProgress("Verifying container stability...")
|
||||
// Wait a few seconds to ensure it doesn't crash immediately
|
||||
sleepFn(5 * time.Second)
|
||||
|
||||
|
||||
@@ -116,6 +116,18 @@ func (r *RegistryChecker) MarkChecked() {
|
||||
r.lastFullCheck = time.Now()
|
||||
}
|
||||
|
||||
// ForceCheck clears the cache and resets the last check timestamp.
|
||||
func (r *RegistryChecker) ForceCheck() {
|
||||
r.mu.Lock()
|
||||
r.lastFullCheck = time.Time{}
|
||||
r.mu.Unlock()
|
||||
|
||||
r.cache.mu.Lock()
|
||||
defer r.cache.mu.Unlock()
|
||||
r.cache.entries = make(map[string]cacheEntry)
|
||||
}
|
||||
|
||||
|
||||
// CheckImageUpdate checks if a newer version of the image is available.
|
||||
func (r *RegistryChecker) CheckImageUpdate(ctx context.Context, image, currentDigest, arch, os, variant string) *ImageUpdateResult {
|
||||
if !r.Enabled() {
|
||||
@@ -135,10 +147,15 @@ func (r *RegistryChecker) CheckImageUpdate(ctx context.Context, image, currentDi
|
||||
}
|
||||
}
|
||||
|
||||
// Check cache first
|
||||
// internal/dockeragent/registry.go
|
||||
// Check cache first
|
||||
// internal/dockeragent/registry.go
|
||||
cacheKey := fmt.Sprintf("%s/%s:%s|%s/%s/%s", registry, repository, tag, arch, os, variant)
|
||||
r.logger.Debug().Str("image", image).Str("cacheKey", cacheKey).Msg("Checking update (internal)")
|
||||
|
||||
if cached := r.getCached(cacheKey); cached != nil {
|
||||
r.logger.Debug().Str("image", image).Msg("Cache hit for update check")
|
||||
if cached.err != "" {
|
||||
return &ImageUpdateResult{
|
||||
Image: image,
|
||||
@@ -158,7 +175,7 @@ func (r *RegistryChecker) CheckImageUpdate(ctx context.Context, image, currentDi
|
||||
}
|
||||
|
||||
// Fetch latest digest from registry
|
||||
latestDigest, err := r.fetchDigest(ctx, registry, repository, tag, arch, os, variant)
|
||||
latestDigest, headDigest, err := r.fetchDigest(ctx, registry, repository, tag, arch, os, variant)
|
||||
if err != nil {
|
||||
// Cache the error to avoid hammering the registry
|
||||
r.cacheError(cacheKey, err.Error())
|
||||
@@ -178,14 +195,33 @@ func (r *RegistryChecker) CheckImageUpdate(ctx context.Context, image, currentDi
|
||||
}
|
||||
}
|
||||
|
||||
// Store both digests in cache (comma separated) to allow matching against either
|
||||
cacheValue := latestDigest
|
||||
if headDigest != latestDigest && headDigest != "" {
|
||||
cacheValue = latestDigest + "," + headDigest
|
||||
}
|
||||
|
||||
// Cache the successful result
|
||||
r.cacheDigest(cacheKey, latestDigest)
|
||||
r.cacheDigest(cacheKey, cacheValue)
|
||||
|
||||
updateAvailable := r.digestsDiffer(currentDigest, cacheValue)
|
||||
|
||||
r.logger.Info().
|
||||
Str("image", image).
|
||||
Str("currentDigest", currentDigest).
|
||||
Str("latestDigest", latestDigest).
|
||||
Str("headDigest", headDigest).
|
||||
Str("arch", arch).
|
||||
Str("os", os).
|
||||
Str("variant", variant).
|
||||
Bool("updateAvailable", updateAvailable).
|
||||
Msg("Checked image update")
|
||||
|
||||
return &ImageUpdateResult{
|
||||
Image: image,
|
||||
CurrentDigest: currentDigest,
|
||||
LatestDigest: latestDigest,
|
||||
UpdateAvailable: r.digestsDiffer(currentDigest, latestDigest),
|
||||
UpdateAvailable: updateAvailable,
|
||||
CheckedAt: time.Now(),
|
||||
}
|
||||
}
|
||||
@@ -196,19 +232,27 @@ func (r *RegistryChecker) digestsDiffer(current, latest string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// Normalize digests - remove "sha256:" prefix for comparison if present in only one
|
||||
normCurrent := strings.TrimPrefix(current, "sha256:")
|
||||
normLatest := strings.TrimPrefix(latest, "sha256:")
|
||||
// Normalize digests - lowercase and remove "sha256:" prefix
|
||||
normCurrent := strings.ToLower(strings.TrimPrefix(current, "sha256:"))
|
||||
|
||||
return normCurrent != normLatest
|
||||
// latest may contain multiple comma-separated digests (resolved + head)
|
||||
for _, l := range strings.Split(latest, ",") {
|
||||
normLatest := strings.ToLower(strings.TrimPrefix(strings.TrimSpace(l), "sha256:"))
|
||||
if normCurrent == normLatest {
|
||||
return false // Match found
|
||||
}
|
||||
}
|
||||
|
||||
return true // No match found
|
||||
}
|
||||
|
||||
// fetchDigest retrieves the digest for an image from the registry.
|
||||
func (r *RegistryChecker) fetchDigest(ctx context.Context, registry, repository, tag, arch, os, variant string) (string, error) {
|
||||
// Returns the resolved platform-specific digest AND the raw HEAD digest (which might be a manifest list).
|
||||
func (r *RegistryChecker) fetchDigest(ctx context.Context, registry, repository, tag, arch, os, variant string) (string, string, error) {
|
||||
// Get auth token if needed
|
||||
token, err := r.getAuthToken(ctx, registry, repository)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("auth: %w", err)
|
||||
return "", "", fmt.Errorf("auth: %w", err)
|
||||
}
|
||||
|
||||
// Construct the manifest URL
|
||||
@@ -216,7 +260,7 @@ func (r *RegistryChecker) fetchDigest(ctx context.Context, registry, repository,
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodHead, manifestURL, nil)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("create request: %w", err)
|
||||
return "", "", fmt.Errorf("create request: %w", err)
|
||||
}
|
||||
|
||||
// Accept headers for multi-arch manifest support
|
||||
@@ -233,21 +277,21 @@ func (r *RegistryChecker) fetchDigest(ctx context.Context, registry, repository,
|
||||
|
||||
resp, err := r.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("request: %w", err)
|
||||
return "", "", fmt.Errorf("request: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode == http.StatusUnauthorized {
|
||||
return "", fmt.Errorf("authentication required")
|
||||
return "", "", fmt.Errorf("authentication required")
|
||||
}
|
||||
if resp.StatusCode == http.StatusNotFound {
|
||||
return "", fmt.Errorf("image not found")
|
||||
return "", "", fmt.Errorf("image not found")
|
||||
}
|
||||
if resp.StatusCode == http.StatusTooManyRequests {
|
||||
return "", fmt.Errorf("rate limited")
|
||||
return "", "", fmt.Errorf("rate limited")
|
||||
}
|
||||
if resp.StatusCode >= 400 {
|
||||
return "", fmt.Errorf("registry error: %d", resp.StatusCode)
|
||||
return "", "", fmt.Errorf("registry error: %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
// Get digest from Docker-Content-Digest header
|
||||
@@ -266,14 +310,15 @@ func (r *RegistryChecker) fetchDigest(ctx context.Context, registry, repository,
|
||||
|
||||
// If it's a manifest list and we have arch info, we need to resolve it
|
||||
if isManifestList && arch != "" && os != "" {
|
||||
return r.resolveManifestList(ctx, registry, repository, tag, arch, os, variant, token)
|
||||
resolved, err := r.resolveManifestList(ctx, registry, repository, tag, arch, os, variant, token)
|
||||
return resolved, digest, err
|
||||
}
|
||||
|
||||
if digest == "" {
|
||||
return "", fmt.Errorf("no digest in response")
|
||||
return "", "", fmt.Errorf("no digest in response")
|
||||
}
|
||||
|
||||
return digest, nil
|
||||
return digest, digest, nil
|
||||
}
|
||||
|
||||
// resolveManifestList fetches the manifest list and finds the matching digest for the architecture.
|
||||
@@ -327,6 +372,13 @@ func (r *RegistryChecker) resolveManifestList(ctx context.Context, registry, rep
|
||||
if variant != "" && m.Platform.Variant != "" && variant != m.Platform.Variant {
|
||||
continue
|
||||
}
|
||||
r.logger.Debug().
|
||||
Str("image", repository+":"+tag).
|
||||
Str("arch", arch).
|
||||
Str("variant", variant).
|
||||
Str("foundDigest", m.Digest).
|
||||
Str("foundVariant", m.Platform.Variant).
|
||||
Msg("Resolved manifest list digest")
|
||||
return m.Digest, nil
|
||||
}
|
||||
}
|
||||
@@ -452,11 +504,25 @@ func parseImageReference(image string) (registry, repository, tag string) {
|
||||
registry = "registry-1.docker.io"
|
||||
tag = "latest"
|
||||
|
||||
// Check if this is a digest-pinned image (image@sha256:...)
|
||||
if strings.Contains(image, "@sha256:") {
|
||||
// Check if this is a digest or digest-pinned image (image@sha256: or sha256:...)
|
||||
if strings.Contains(image, "@sha256:") || strings.HasPrefix(image, "sha256:") || isValidDigest(image) {
|
||||
return "", "", ""
|
||||
}
|
||||
|
||||
// Also check for 64-character hex strings (often used as image IDs)
|
||||
if len(image) == 64 {
|
||||
isHex := true
|
||||
for _, c := range image {
|
||||
if !((c >= '0' && c <= '9') || (c >= 'a' && c <= 'f') || (c >= 'A' && c <= 'F')) {
|
||||
isHex = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if isHex {
|
||||
return "", "", ""
|
||||
}
|
||||
}
|
||||
|
||||
// Split off the tag first
|
||||
parts := strings.Split(image, ":")
|
||||
if len(parts) > 1 {
|
||||
|
||||
@@ -30,6 +30,12 @@ func (a *Agent) checkForUpdates(ctx context.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
// Skip updates if running as part of the unified agent
|
||||
if a.cfg.AgentType == "unified" {
|
||||
a.logger.Debug().Msg("Skipping update check - running in unified agent mode")
|
||||
return
|
||||
}
|
||||
|
||||
a.logger.Debug().Msg("Checking for agent updates")
|
||||
|
||||
target := a.primaryTarget()
|
||||
|
||||
@@ -15,6 +15,8 @@ const (
|
||||
DockerCommandTypeStop = "stop"
|
||||
// DockerCommandTypeUpdateContainer instructs the agent to update a container to its latest image.
|
||||
DockerCommandTypeUpdateContainer = "update_container"
|
||||
// DockerCommandTypeCheckUpdates instructs the agent to check for container updates immediately.
|
||||
DockerCommandTypeCheckUpdates = "check_updates"
|
||||
|
||||
// DockerCommandStatusQueued indicates the command is queued and waiting to be dispatched.
|
||||
DockerCommandStatusQueued = "queued"
|
||||
@@ -26,6 +28,8 @@ const (
|
||||
DockerCommandStatusCompleted = "completed"
|
||||
// DockerCommandStatusFailed indicates the command failed.
|
||||
DockerCommandStatusFailed = "failed"
|
||||
// DockerCommandStatusInProgress indicates the command is actively running.
|
||||
DockerCommandStatusInProgress = "in_progress"
|
||||
// DockerCommandStatusExpired indicates Pulse abandoned the command.
|
||||
DockerCommandStatusExpired = "expired"
|
||||
)
|
||||
@@ -69,6 +73,15 @@ func (cmd *dockerHostCommand) markDispatched() {
|
||||
cmd.status.UpdatedAt = now
|
||||
}
|
||||
|
||||
func (cmd *dockerHostCommand) markInProgress(message string) {
|
||||
now := time.Now().UTC()
|
||||
cmd.status.Status = DockerCommandStatusInProgress
|
||||
cmd.status.UpdatedAt = now
|
||||
if message != "" {
|
||||
cmd.status.Message = message
|
||||
}
|
||||
}
|
||||
|
||||
func (cmd *dockerHostCommand) markAcknowledged(message string) {
|
||||
now := time.Now().UTC()
|
||||
cmd.status.Status = DockerCommandStatusAcknowledged
|
||||
@@ -195,6 +208,9 @@ func (m *Monitor) QueueDockerContainerUpdateCommand(hostID, containerID, contain
|
||||
}
|
||||
|
||||
cmd := newDockerHostCommand(DockerCommandTypeUpdateContainer, fmt.Sprintf("Updating container %s", containerName), dockerCommandDefaultTTL, payload)
|
||||
// Encode container ID in command ID so frontend can track it
|
||||
cmd.status.ID = fmt.Sprintf("%s:%s", cmd.status.ID, containerID)
|
||||
|
||||
if m.dockerCommands == nil {
|
||||
m.dockerCommands = make(map[string]*dockerHostCommand)
|
||||
}
|
||||
@@ -215,6 +231,57 @@ func (m *Monitor) QueueDockerContainerUpdateCommand(hostID, containerID, contain
|
||||
return cmd.status, nil
|
||||
}
|
||||
|
||||
// QueueDockerCheckUpdatesCommand queues a command to check for container updates on a Docker host.
|
||||
func (m *Monitor) QueueDockerCheckUpdatesCommand(hostID string) (models.DockerHostCommandStatus, error) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
hostID = normalizeDockerHostID(hostID)
|
||||
if hostID == "" {
|
||||
return models.DockerHostCommandStatus{}, fmt.Errorf("docker host id is required")
|
||||
}
|
||||
|
||||
// Ensure the host exists
|
||||
var hostExists bool
|
||||
for _, host := range m.state.GetDockerHosts() {
|
||||
if host.ID == hostID {
|
||||
hostExists = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !hostExists {
|
||||
return models.DockerHostCommandStatus{}, fmt.Errorf("docker host %q not found", hostID)
|
||||
}
|
||||
|
||||
// Check for existing commands in progress for this host
|
||||
if existing, ok := m.dockerCommands[hostID]; ok {
|
||||
switch existing.status.Status {
|
||||
case DockerCommandStatusQueued, DockerCommandStatusDispatched, DockerCommandStatusAcknowledged, DockerCommandStatusInProgress:
|
||||
return existing.status, fmt.Errorf("docker host %q already has a command in progress", hostID)
|
||||
}
|
||||
}
|
||||
|
||||
cmd := newDockerHostCommand(DockerCommandTypeCheckUpdates, "Checking for container updates", dockerCommandDefaultTTL, nil)
|
||||
|
||||
// Save the command
|
||||
if m.dockerCommands == nil {
|
||||
m.dockerCommands = make(map[string]*dockerHostCommand)
|
||||
}
|
||||
m.dockerCommands[hostID] = &cmd
|
||||
if m.dockerCommandIndex == nil {
|
||||
m.dockerCommandIndex = make(map[string]string)
|
||||
}
|
||||
m.dockerCommandIndex[cmd.status.ID] = hostID
|
||||
|
||||
m.state.SetDockerHostCommand(hostID, &cmd.status)
|
||||
log.Info().
|
||||
Str("dockerHostID", hostID).
|
||||
Str("commandID", cmd.status.ID).
|
||||
Msg("Queued docker check updates command")
|
||||
|
||||
return cmd.status, nil
|
||||
}
|
||||
|
||||
func (m *Monitor) getDockerCommandPayload(hostID string) (map[string]any, *models.DockerHostCommandStatus) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
@@ -290,6 +357,8 @@ func (m *Monitor) acknowledgeDockerCommand(commandID, hostID, status, message st
|
||||
switch status {
|
||||
case DockerCommandStatusAcknowledged:
|
||||
cmd.markAcknowledged(message)
|
||||
case DockerCommandStatusInProgress:
|
||||
cmd.markInProgress(message)
|
||||
case DockerCommandStatusCompleted:
|
||||
cmd.markAcknowledged(message)
|
||||
cmd.markCompleted(message)
|
||||
|
||||
@@ -1191,3 +1191,31 @@ func TestAcknowledgeDockerCommandErrorPaths(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestQueueDockerContainerUpdateCommand(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
monitor := newTestMonitorForCommands(t)
|
||||
host := models.DockerHost{
|
||||
ID: "host-1",
|
||||
Hostname: "node-1",
|
||||
Status: "online",
|
||||
}
|
||||
monitor.state.UpsertDockerHost(host)
|
||||
|
||||
containerID := "test-container"
|
||||
containerName := "my-app"
|
||||
|
||||
cmdStatus, err := monitor.QueueDockerContainerUpdateCommand(host.ID, containerID, containerName)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to queue update command: %v", err)
|
||||
}
|
||||
|
||||
if !strings.HasSuffix(cmdStatus.ID, ":"+containerID) {
|
||||
t.Errorf("Expected command ID to end with :%s, got %s", containerID, cmdStatus.ID)
|
||||
}
|
||||
|
||||
if cmdStatus.Type != DockerCommandTypeUpdateContainer {
|
||||
t.Errorf("Expected type %s, got %s", DockerCommandTypeUpdateContainer, cmdStatus.Type)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -25,9 +25,13 @@ const (
|
||||
CommandTypeStop = "stop"
|
||||
// CommandTypeUpdateContainer instructs the agent to update a specific container to its latest image.
|
||||
CommandTypeUpdateContainer = "update_container"
|
||||
// CommandTypeCheckUpdates instructs the agent to clear its registry cache and check for updates immediately.
|
||||
CommandTypeCheckUpdates = "check_updates"
|
||||
|
||||
// CommandStatusAcknowledged indicates a command was received and is in progress.
|
||||
CommandStatusAcknowledged = "acknowledged"
|
||||
// CommandStatusInProgress indicates an intermediate progress update during command execution.
|
||||
CommandStatusInProgress = "in_progress"
|
||||
// CommandStatusCompleted indicates the command completed successfully.
|
||||
CommandStatusCompleted = "completed"
|
||||
// CommandStatusFailed indicates the command failed.
|
||||
|
||||
Reference in New Issue
Block a user