diff --git a/internal/monitoring/diagnostic_snapshots_test.go b/internal/monitoring/diagnostic_snapshots_test.go index e4d259bc5..6d83eab7c 100644 --- a/internal/monitoring/diagnostic_snapshots_test.go +++ b/internal/monitoring/diagnostic_snapshots_test.go @@ -758,6 +758,213 @@ func TestLogNodeMemorySource(t *testing.T) { }) } +func TestRecordNodeSnapshot(t *testing.T) { + tests := []struct { + name string + monitor *Monitor + instance string + node string + snapshot NodeMemorySnapshot + wantPanic bool + wantMapLen int + wantInstance string + wantNode string + checkRetrievedAt string // "set" (auto-set), "preserved" (kept from input), "skip" (don't check) + inputRetrievedAt time.Time + }{ + { + name: "nil Monitor returns early without panic", + monitor: nil, + instance: "pve1", + node: "node1", + snapshot: NodeMemorySnapshot{}, + wantPanic: false, + checkRetrievedAt: "skip", + }, + { + name: "nil nodeSnapshots map gets initialized", + monitor: &Monitor{ + nodeSnapshots: nil, + guestSnapshots: make(map[string]GuestMemorySnapshot), + }, + instance: "pve1", + node: "node1", + snapshot: NodeMemorySnapshot{MemorySource: "test"}, + wantMapLen: 1, + wantInstance: "pve1", + wantNode: "node1", + checkRetrievedAt: "set", + }, + { + name: "Instance and Node are set from parameters", + monitor: &Monitor{ + nodeSnapshots: make(map[string]NodeMemorySnapshot), + guestSnapshots: make(map[string]GuestMemorySnapshot), + }, + instance: "my-instance", + node: "my-node", + snapshot: NodeMemorySnapshot{ + Instance: "wrong-instance", + Node: "wrong-node", + }, + wantMapLen: 1, + wantInstance: "my-instance", + wantNode: "my-node", + checkRetrievedAt: "set", + }, + { + name: "zero RetrievedAt gets set to current time", + monitor: &Monitor{ + nodeSnapshots: make(map[string]NodeMemorySnapshot), + guestSnapshots: make(map[string]GuestMemorySnapshot), + }, + instance: "pve1", + node: "node1", + snapshot: NodeMemorySnapshot{}, + wantMapLen: 1, + wantInstance: "pve1", + wantNode: "node1", + checkRetrievedAt: "set", + }, + { + name: "non-zero RetrievedAt is preserved", + monitor: &Monitor{ + nodeSnapshots: make(map[string]NodeMemorySnapshot), + guestSnapshots: make(map[string]GuestMemorySnapshot), + }, + instance: "pve1", + node: "node1", + snapshot: NodeMemorySnapshot{RetrievedAt: time.Date(2024, 6, 15, 12, 0, 0, 0, time.UTC)}, + wantMapLen: 1, + wantInstance: "pve1", + wantNode: "node1", + checkRetrievedAt: "preserved", + inputRetrievedAt: time.Date(2024, 6, 15, 12, 0, 0, 0, time.UTC), + }, + { + name: "snapshot is stored with correct key", + monitor: &Monitor{ + nodeSnapshots: make(map[string]NodeMemorySnapshot), + guestSnapshots: make(map[string]GuestMemorySnapshot), + }, + instance: "pve1", + node: "node1", + snapshot: NodeMemorySnapshot{MemorySource: "rrd-available"}, + wantMapLen: 1, + wantInstance: "pve1", + wantNode: "node1", + checkRetrievedAt: "set", + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + beforeRecord := time.Now() + + // Call recordNodeSnapshot - should not panic + tc.monitor.recordNodeSnapshot(tc.instance, tc.node, tc.snapshot) + + afterRecord := time.Now() + + // For nil monitor, we just verify no panic occurred + if tc.monitor == nil { + return + } + + // Verify map was initialized and has expected length + if tc.monitor.nodeSnapshots == nil { + t.Fatal("nodeSnapshots should have been initialized") + } + if len(tc.monitor.nodeSnapshots) != tc.wantMapLen { + t.Errorf("nodeSnapshots length = %d, want %d", len(tc.monitor.nodeSnapshots), tc.wantMapLen) + } + + // Verify snapshot was stored with correct key + key := makeNodeSnapshotKey(tc.instance, tc.node) + stored, ok := tc.monitor.nodeSnapshots[key] + if !ok { + t.Fatalf("snapshot not found with key %q", key) + } + + // Verify Instance and Node were set from parameters + if stored.Instance != tc.wantInstance { + t.Errorf("Instance = %q, want %q", stored.Instance, tc.wantInstance) + } + if stored.Node != tc.wantNode { + t.Errorf("Node = %q, want %q", stored.Node, tc.wantNode) + } + + // Verify RetrievedAt handling + switch tc.checkRetrievedAt { + case "set": + if stored.RetrievedAt.Before(beforeRecord) || stored.RetrievedAt.After(afterRecord) { + t.Errorf("RetrievedAt = %v, want between %v and %v", stored.RetrievedAt, beforeRecord, afterRecord) + } + case "preserved": + if !stored.RetrievedAt.Equal(tc.inputRetrievedAt) { + t.Errorf("RetrievedAt = %v, want %v", stored.RetrievedAt, tc.inputRetrievedAt) + } + } + }) + } +} + +func TestRecordNodeSnapshot_MultipleSnapshots(t *testing.T) { + t.Run("records multiple nodes with different keys", func(t *testing.T) { + m := &Monitor{ + nodeSnapshots: make(map[string]NodeMemorySnapshot), + guestSnapshots: make(map[string]GuestMemorySnapshot), + } + + m.recordNodeSnapshot("pve1", "node1", NodeMemorySnapshot{MemorySource: "src1"}) + m.recordNodeSnapshot("pve1", "node2", NodeMemorySnapshot{MemorySource: "src2"}) + m.recordNodeSnapshot("pve2", "node1", NodeMemorySnapshot{MemorySource: "src3"}) + + if len(m.nodeSnapshots) != 3 { + t.Fatalf("Expected 3 node snapshots, got %d", len(m.nodeSnapshots)) + } + + // Verify each one exists with correct data + key1 := makeNodeSnapshotKey("pve1", "node1") + key2 := makeNodeSnapshotKey("pve1", "node2") + key3 := makeNodeSnapshotKey("pve2", "node1") + + if m.nodeSnapshots[key1].MemorySource != "src1" { + t.Errorf("Snapshot 1 MemorySource = %q, want %q", m.nodeSnapshots[key1].MemorySource, "src1") + } + if m.nodeSnapshots[key2].MemorySource != "src2" { + t.Errorf("Snapshot 2 MemorySource = %q, want %q", m.nodeSnapshots[key2].MemorySource, "src2") + } + if m.nodeSnapshots[key3].MemorySource != "src3" { + t.Errorf("Snapshot 3 MemorySource = %q, want %q", m.nodeSnapshots[key3].MemorySource, "src3") + } + }) + + t.Run("overwrites existing snapshot with same key", func(t *testing.T) { + m := &Monitor{ + nodeSnapshots: make(map[string]NodeMemorySnapshot), + guestSnapshots: make(map[string]GuestMemorySnapshot), + } + + m.recordNodeSnapshot("pve1", "node1", NodeMemorySnapshot{ + MemorySource: "old-source", + }) + + m.recordNodeSnapshot("pve1", "node1", NodeMemorySnapshot{ + MemorySource: "new-source", + }) + + if len(m.nodeSnapshots) != 1 { + t.Fatalf("Expected 1 node snapshot after overwrite, got %d", len(m.nodeSnapshots)) + } + + key := makeNodeSnapshotKey("pve1", "node1") + if m.nodeSnapshots[key].MemorySource != "new-source" { + t.Errorf("MemorySource = %q, want %q", m.nodeSnapshots[key].MemorySource, "new-source") + } + }) +} + func TestGetDiagnosticSnapshots(t *testing.T) { t.Run("nil Monitor returns empty set with non-nil slices", func(t *testing.T) { var m *Monitor diff --git a/internal/monitoring/monitor_host_agents_test.go b/internal/monitoring/monitor_host_agents_test.go index c38e43c84..adf10d9b4 100644 --- a/internal/monitoring/monitor_host_agents_test.go +++ b/internal/monitoring/monitor_host_agents_test.go @@ -197,3 +197,321 @@ func TestRemoveHostAgentUnbindsToken(t *testing.T) { t.Fatalf("expected token binding to be cleared after host removal") } } + +func TestEvaluateHostAgentsEmptyHostsList(t *testing.T) { + monitor := &Monitor{ + state: models.NewState(), + alertManager: alerts.NewManager(), + config: &config.Config{}, + } + t.Cleanup(func() { monitor.alertManager.Stop() }) + + // No hosts in state - should complete without error or state changes + monitor.evaluateHostAgents(time.Now()) + + snapshot := monitor.state.GetSnapshot() + if len(snapshot.Hosts) != 0 { + t.Errorf("expected 0 hosts, got %d", len(snapshot.Hosts)) + } + if len(snapshot.ConnectionHealth) != 0 { + t.Errorf("expected 0 connection health entries, got %d", len(snapshot.ConnectionHealth)) + } +} + +func TestEvaluateHostAgentsZeroIntervalUsesDefault(t *testing.T) { + monitor := &Monitor{ + state: models.NewState(), + alertManager: alerts.NewManager(), + config: &config.Config{}, + } + t.Cleanup(func() { monitor.alertManager.Stop() }) + + hostID := "host-zero-interval" + // IntervalSeconds = 0, LastSeen = now, should use default interval (30s) + // Default window = 30s * 4 = 120s, but minimum is 30s, so window = 30s + // With LastSeen = now, the host should be healthy + monitor.state.UpsertHost(models.Host{ + ID: hostID, + Hostname: "zero-interval.local", + Status: "unknown", + IntervalSeconds: 0, // Zero interval - should use default + LastSeen: time.Now(), + }) + + monitor.evaluateHostAgents(time.Now()) + + snapshot := monitor.state.GetSnapshot() + connKey := hostConnectionPrefix + hostID + if healthy, ok := snapshot.ConnectionHealth[connKey]; !ok || !healthy { + t.Fatalf("expected connection health true for zero-interval host with recent LastSeen, got %v (exists=%v)", healthy, ok) + } + + for _, host := range snapshot.Hosts { + if host.ID == hostID && host.Status != "online" { + t.Errorf("expected host status online, got %q", host.Status) + } + } +} + +func TestEvaluateHostAgentsNegativeIntervalUsesDefault(t *testing.T) { + monitor := &Monitor{ + state: models.NewState(), + alertManager: alerts.NewManager(), + config: &config.Config{}, + } + t.Cleanup(func() { monitor.alertManager.Stop() }) + + hostID := "host-negative-interval" + monitor.state.UpsertHost(models.Host{ + ID: hostID, + Hostname: "negative-interval.local", + Status: "unknown", + IntervalSeconds: -10, // Negative interval - should use default + LastSeen: time.Now(), + }) + + monitor.evaluateHostAgents(time.Now()) + + snapshot := monitor.state.GetSnapshot() + connKey := hostConnectionPrefix + hostID + if healthy, ok := snapshot.ConnectionHealth[connKey]; !ok || !healthy { + t.Fatalf("expected connection health true for negative-interval host with recent LastSeen, got %v (exists=%v)", healthy, ok) + } +} + +func TestEvaluateHostAgentsWindowClampedToMinimum(t *testing.T) { + monitor := &Monitor{ + state: models.NewState(), + alertManager: alerts.NewManager(), + config: &config.Config{}, + } + t.Cleanup(func() { monitor.alertManager.Stop() }) + + hostID := "host-min-window" + // IntervalSeconds = 1, so window = 1s * 4 = 4s, but minimum is 30s + // Host last seen 25s ago should still be healthy (within 30s minimum window) + now := time.Now() + monitor.state.UpsertHost(models.Host{ + ID: hostID, + Hostname: "min-window.local", + Status: "unknown", + IntervalSeconds: 1, // Very small interval + LastSeen: now.Add(-25 * time.Second), + }) + + monitor.evaluateHostAgents(now) + + snapshot := monitor.state.GetSnapshot() + connKey := hostConnectionPrefix + hostID + if healthy, ok := snapshot.ConnectionHealth[connKey]; !ok || !healthy { + t.Fatalf("expected connection health true (window clamped to minimum 30s), got %v (exists=%v)", healthy, ok) + } + + for _, host := range snapshot.Hosts { + if host.ID == hostID && host.Status != "online" { + t.Errorf("expected host status online, got %q", host.Status) + } + } +} + +func TestEvaluateHostAgentsWindowClampedToMaximum(t *testing.T) { + monitor := &Monitor{ + state: models.NewState(), + alertManager: alerts.NewManager(), + config: &config.Config{}, + } + t.Cleanup(func() { monitor.alertManager.Stop() }) + + hostID := "host-max-window" + // IntervalSeconds = 300 (5 min), so window = 300s * 4 = 1200s (20 min) + // But maximum is 10 min = 600s + // Host last seen 11 minutes ago should be unhealthy (outside 10 min max window) + now := time.Now() + monitor.state.UpsertHost(models.Host{ + ID: hostID, + Hostname: "max-window.local", + Status: "online", + IntervalSeconds: 300, // 5 minute interval + LastSeen: now.Add(-11 * time.Minute), + }) + + monitor.evaluateHostAgents(now) + + snapshot := monitor.state.GetSnapshot() + connKey := hostConnectionPrefix + hostID + if healthy, ok := snapshot.ConnectionHealth[connKey]; !ok || healthy { + t.Fatalf("expected connection health false (window clamped to maximum 10m), got %v (exists=%v)", healthy, ok) + } + + for _, host := range snapshot.Hosts { + if host.ID == hostID && host.Status != "offline" { + t.Errorf("expected host status offline, got %q", host.Status) + } + } +} + +func TestEvaluateHostAgentsRecentLastSeenIsHealthy(t *testing.T) { + monitor := &Monitor{ + state: models.NewState(), + alertManager: alerts.NewManager(), + config: &config.Config{}, + } + t.Cleanup(func() { monitor.alertManager.Stop() }) + + hostID := "host-recent" + now := time.Now() + // IntervalSeconds = 30, window = 30s * 4 = 120s (clamped to min 30s is not needed) + // LastSeen = 10s ago, should be healthy + monitor.state.UpsertHost(models.Host{ + ID: hostID, + Hostname: "recent.local", + Status: "unknown", + IntervalSeconds: 30, + LastSeen: now.Add(-10 * time.Second), + }) + + monitor.evaluateHostAgents(now) + + snapshot := monitor.state.GetSnapshot() + connKey := hostConnectionPrefix + hostID + if healthy, ok := snapshot.ConnectionHealth[connKey]; !ok || !healthy { + t.Fatalf("expected connection health true for recent LastSeen, got %v (exists=%v)", healthy, ok) + } + + for _, host := range snapshot.Hosts { + if host.ID == hostID && host.Status != "online" { + t.Errorf("expected host status online, got %q", host.Status) + } + } +} + +func TestEvaluateHostAgentsZeroLastSeenIsUnhealthy(t *testing.T) { + monitor := &Monitor{ + state: models.NewState(), + alertManager: alerts.NewManager(), + config: &config.Config{}, + } + t.Cleanup(func() { monitor.alertManager.Stop() }) + + hostID := "host-zero-lastseen" + monitor.state.UpsertHost(models.Host{ + ID: hostID, + Hostname: "zero-lastseen.local", + Status: "online", + IntervalSeconds: 30, + LastSeen: time.Time{}, // Zero time + }) + + monitor.evaluateHostAgents(time.Now()) + + snapshot := monitor.state.GetSnapshot() + connKey := hostConnectionPrefix + hostID + if healthy, ok := snapshot.ConnectionHealth[connKey]; !ok || healthy { + t.Fatalf("expected connection health false for zero LastSeen, got %v (exists=%v)", healthy, ok) + } + + for _, host := range snapshot.Hosts { + if host.ID == hostID && host.Status != "offline" { + t.Errorf("expected host status offline for zero LastSeen, got %q", host.Status) + } + } +} + +func TestEvaluateHostAgentsOldLastSeenIsUnhealthy(t *testing.T) { + monitor := &Monitor{ + state: models.NewState(), + alertManager: alerts.NewManager(), + config: &config.Config{}, + } + t.Cleanup(func() { monitor.alertManager.Stop() }) + + hostID := "host-old-lastseen" + now := time.Now() + // IntervalSeconds = 30, window = 30s * 4 = 120s + // LastSeen = 5 minutes ago, should be unhealthy + monitor.state.UpsertHost(models.Host{ + ID: hostID, + Hostname: "old-lastseen.local", + Status: "online", + IntervalSeconds: 30, + LastSeen: now.Add(-5 * time.Minute), + }) + + monitor.evaluateHostAgents(now) + + snapshot := monitor.state.GetSnapshot() + connKey := hostConnectionPrefix + hostID + if healthy, ok := snapshot.ConnectionHealth[connKey]; !ok || healthy { + t.Fatalf("expected connection health false for old LastSeen, got %v (exists=%v)", healthy, ok) + } + + for _, host := range snapshot.Hosts { + if host.ID == hostID && host.Status != "offline" { + t.Errorf("expected host status offline for old LastSeen, got %q", host.Status) + } + } +} + +func TestEvaluateHostAgentsNilAlertManagerOnline(t *testing.T) { + monitor := &Monitor{ + state: models.NewState(), + alertManager: nil, // No alert manager + config: &config.Config{}, + } + + hostID := "host-nil-am-online" + monitor.state.UpsertHost(models.Host{ + ID: hostID, + Hostname: "nil-am-online.local", + Status: "unknown", + IntervalSeconds: 30, + LastSeen: time.Now(), + }) + + // Should not panic with nil alertManager + monitor.evaluateHostAgents(time.Now()) + + snapshot := monitor.state.GetSnapshot() + connKey := hostConnectionPrefix + hostID + if healthy, ok := snapshot.ConnectionHealth[connKey]; !ok || !healthy { + t.Fatalf("expected connection health true, got %v (exists=%v)", healthy, ok) + } + + for _, host := range snapshot.Hosts { + if host.ID == hostID && host.Status != "online" { + t.Errorf("expected host status online, got %q", host.Status) + } + } +} + +func TestEvaluateHostAgentsNilAlertManagerOffline(t *testing.T) { + monitor := &Monitor{ + state: models.NewState(), + alertManager: nil, // No alert manager + config: &config.Config{}, + } + + hostID := "host-nil-am-offline" + monitor.state.UpsertHost(models.Host{ + ID: hostID, + Hostname: "nil-am-offline.local", + Status: "online", + IntervalSeconds: 30, + LastSeen: time.Time{}, // Zero time - unhealthy + }) + + // Should not panic with nil alertManager + monitor.evaluateHostAgents(time.Now()) + + snapshot := monitor.state.GetSnapshot() + connKey := hostConnectionPrefix + hostID + if healthy, ok := snapshot.ConnectionHealth[connKey]; !ok || healthy { + t.Fatalf("expected connection health false, got %v (exists=%v)", healthy, ok) + } + + for _, host := range snapshot.Hosts { + if host.ID == hostID && host.Status != "offline" { + t.Errorf("expected host status offline, got %q", host.Status) + } + } +} diff --git a/internal/monitoring/temperature_test.go b/internal/monitoring/temperature_test.go index 9b9bcabb9..1065808ca 100644 --- a/internal/monitoring/temperature_test.go +++ b/internal/monitoring/temperature_test.go @@ -2359,6 +2359,246 @@ func TestHandleProxyFailure_PlainError_TriggersDisablePath(t *testing.T) { } } +// ============================================================================= +// Tests for handleProxyHostFailure +// ============================================================================= + +func TestHandleProxyHostFailure_EmptyHost(t *testing.T) { + tc := &TemperatureCollector{ + proxyHostStates: make(map[string]*proxyHostState), + } + + tc.handleProxyHostFailure("", fmt.Errorf("some error")) + + tc.proxyMu.Lock() + defer tc.proxyMu.Unlock() + + if len(tc.proxyHostStates) != 0 { + t.Errorf("expected no state change for empty host, got %d entries", len(tc.proxyHostStates)) + } +} + +func TestHandleProxyHostFailure_WhitespaceOnlyHost(t *testing.T) { + tc := &TemperatureCollector{ + proxyHostStates: make(map[string]*proxyHostState), + } + + tc.handleProxyHostFailure(" ", fmt.Errorf("some error")) + + tc.proxyMu.Lock() + defer tc.proxyMu.Unlock() + + if len(tc.proxyHostStates) != 0 { + t.Errorf("expected no state change for whitespace-only host, got %d entries", len(tc.proxyHostStates)) + } +} + +func TestHandleProxyHostFailure_FirstFailureCreatesState(t *testing.T) { + tc := &TemperatureCollector{ + proxyHostStates: make(map[string]*proxyHostState), + } + + tc.handleProxyHostFailure("192.168.1.100", fmt.Errorf("connection refused")) + + tc.proxyMu.Lock() + defer tc.proxyMu.Unlock() + + state, exists := tc.proxyHostStates["192.168.1.100"] + if !exists { + t.Fatal("expected host state to be created") + } + if state.failures != 1 { + t.Errorf("expected failures to be 1, got %d", state.failures) + } + if state.lastError != "connection refused" { + t.Errorf("expected lastError to be 'connection refused', got %q", state.lastError) + } + if !state.cooldownUntil.IsZero() { + t.Errorf("expected cooldownUntil to be zero (threshold not reached), got %s", state.cooldownUntil) + } +} + +func TestHandleProxyHostFailure_SubsequentFailuresIncrement(t *testing.T) { + tc := &TemperatureCollector{ + proxyHostStates: map[string]*proxyHostState{ + "192.168.1.100": { + failures: 1, + lastError: "first error", + }, + }, + } + + tc.handleProxyHostFailure("192.168.1.100", fmt.Errorf("second error")) + + tc.proxyMu.Lock() + defer tc.proxyMu.Unlock() + + state := tc.proxyHostStates["192.168.1.100"] + if state.failures != 2 { + t.Errorf("expected failures to be 2, got %d", state.failures) + } + if state.lastError != "second error" { + t.Errorf("expected lastError to be 'second error', got %q", state.lastError) + } + if !state.cooldownUntil.IsZero() { + t.Errorf("expected cooldownUntil to remain zero (threshold not reached), got %s", state.cooldownUntil) + } +} + +func TestHandleProxyHostFailure_ReachesThresholdSetsCooldown(t *testing.T) { + tc := &TemperatureCollector{ + proxyHostStates: map[string]*proxyHostState{ + "192.168.1.100": { + failures: proxyFailureThreshold - 1, // one failure away + lastError: "previous error", + }, + }, + } + + before := time.Now() + tc.handleProxyHostFailure("192.168.1.100", fmt.Errorf("final error")) + + tc.proxyMu.Lock() + defer tc.proxyMu.Unlock() + + state := tc.proxyHostStates["192.168.1.100"] + // failures should be reset to 0 after reaching threshold + if state.failures != 0 { + t.Errorf("expected failures to be reset to 0 after reaching threshold, got %d", state.failures) + } + if state.lastError != "final error" { + t.Errorf("expected lastError to be 'final error', got %q", state.lastError) + } + // cooldownUntil should be set in the future + if !state.cooldownUntil.After(before) { + t.Errorf("expected cooldownUntil to be set in the future, got %s", state.cooldownUntil) + } + // cooldownUntil should be approximately proxyRetryInterval from now + expectedMin := before.Add(proxyRetryInterval - time.Second) + if state.cooldownUntil.Before(expectedMin) { + t.Errorf("expected cooldownUntil to be at least %s, got %s", expectedMin, state.cooldownUntil) + } +} + +func TestHandleProxyHostFailure_LastErrorStored(t *testing.T) { + tests := []struct { + name string + err error + wantLastErr string + }{ + { + name: "simple error message", + err: fmt.Errorf("connection refused"), + wantLastErr: "connection refused", + }, + { + name: "error with whitespace is trimmed", + err: fmt.Errorf(" timeout waiting for response "), + wantLastErr: "timeout waiting for response", + }, + { + name: "empty error message", + err: fmt.Errorf(""), + wantLastErr: "", + }, + { + name: "multiline error gets first part after trim", + err: fmt.Errorf("network error\ndetails here"), + wantLastErr: "network error\ndetails here", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tc := &TemperatureCollector{ + proxyHostStates: make(map[string]*proxyHostState), + } + + tc.handleProxyHostFailure("192.168.1.100", tt.err) + + tc.proxyMu.Lock() + state := tc.proxyHostStates["192.168.1.100"] + tc.proxyMu.Unlock() + + if state.lastError != tt.wantLastErr { + t.Errorf("expected lastError to be %q, got %q", tt.wantLastErr, state.lastError) + } + }) + } +} + +func TestHandleProxyHostFailure_NilStateInMap(t *testing.T) { + tc := &TemperatureCollector{ + proxyHostStates: map[string]*proxyHostState{ + "192.168.1.100": nil, // nil state in map + }, + } + + tc.handleProxyHostFailure("192.168.1.100", fmt.Errorf("some error")) + + tc.proxyMu.Lock() + defer tc.proxyMu.Unlock() + + state := tc.proxyHostStates["192.168.1.100"] + if state == nil { + t.Fatal("expected new state to be created for nil entry") + } + if state.failures != 1 { + t.Errorf("expected failures to be 1, got %d", state.failures) + } +} + +func TestHandleProxyHostFailure_TrimsHostWhitespace(t *testing.T) { + tc := &TemperatureCollector{ + proxyHostStates: make(map[string]*proxyHostState), + } + + tc.handleProxyHostFailure(" 192.168.1.100 ", fmt.Errorf("some error")) + + tc.proxyMu.Lock() + defer tc.proxyMu.Unlock() + + // State should be stored under trimmed key + if _, exists := tc.proxyHostStates["192.168.1.100"]; !exists { + t.Error("expected state to be stored under trimmed host key") + } + if _, exists := tc.proxyHostStates[" 192.168.1.100 "]; exists { + t.Error("state should not be stored under untrimmed host key") + } +} + +func TestHandleProxyHostFailure_MultipleHostsIndependent(t *testing.T) { + tc := &TemperatureCollector{ + proxyHostStates: make(map[string]*proxyHostState), + } + + // First host gets multiple failures + tc.handleProxyHostFailure("192.168.1.100", fmt.Errorf("error 1")) + tc.handleProxyHostFailure("192.168.1.100", fmt.Errorf("error 2")) + + // Second host gets one failure + tc.handleProxyHostFailure("192.168.1.101", fmt.Errorf("different error")) + + tc.proxyMu.Lock() + defer tc.proxyMu.Unlock() + + state1 := tc.proxyHostStates["192.168.1.100"] + state2 := tc.proxyHostStates["192.168.1.101"] + + if state1.failures != 2 { + t.Errorf("expected host 1 failures to be 2, got %d", state1.failures) + } + if state2.failures != 1 { + t.Errorf("expected host 2 failures to be 1, got %d", state2.failures) + } + if state1.lastError != "error 2" { + t.Errorf("expected host 1 lastError to be 'error 2', got %q", state1.lastError) + } + if state2.lastError != "different error" { + t.Errorf("expected host 2 lastError to be 'different error', got %q", state2.lastError) + } +} + // Helper functions for test setup func intPtr(i int) *int {