mirror of
https://github.com/rcourtman/Pulse.git
synced 2026-02-18 00:17:39 +01:00
test: Add tests for monitor helper and metrics functions
- clearGuestMetadataCache: nil safety, cache clearing, key isolation - shouldRunBackupPoll: interval-based, cycle-based, config validation - storeNodeLastSuccess/lastNodeSuccessFor: store/retrieve, overwrite, missing keys Improves coverage for backup polling logic and node metrics tracking.
This commit is contained in:
@@ -1215,6 +1215,114 @@ func newDeterministicRng(seed int64) *rand.Rand {
|
||||
return rand.New(rand.NewSource(seed))
|
||||
}
|
||||
|
||||
func TestClearGuestMetadataCache(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
t.Run("nil monitor is safe", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var m *Monitor
|
||||
// Should not panic
|
||||
m.clearGuestMetadataCache("instance", "node", 100)
|
||||
})
|
||||
|
||||
t.Run("nil cache map is safe", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
m := &Monitor{guestMetadataCache: nil}
|
||||
// Should not panic
|
||||
m.clearGuestMetadataCache("instance", "node", 100)
|
||||
})
|
||||
|
||||
t.Run("successfully clears existing entry", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
key := guestMetadataCacheKey("instance", "node", 100)
|
||||
m := &Monitor{
|
||||
guestMetadataCache: map[string]guestMetadataCacheEntry{
|
||||
key: {
|
||||
ipAddresses: []string{"192.168.1.10"},
|
||||
osName: "Linux",
|
||||
osVersion: "5.15",
|
||||
agentVersion: "1.0",
|
||||
fetchedAt: time.Now(),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Verify entry exists before clearing
|
||||
if _, ok := m.guestMetadataCache[key]; !ok {
|
||||
t.Fatal("entry should exist before clearing")
|
||||
}
|
||||
|
||||
m.clearGuestMetadataCache("instance", "node", 100)
|
||||
|
||||
// Verify entry was removed
|
||||
if _, ok := m.guestMetadataCache[key]; ok {
|
||||
t.Fatal("entry should not exist after clearing")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("non-existent key does not cause error", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
existingKey := guestMetadataCacheKey("other-instance", "other-node", 200)
|
||||
m := &Monitor{
|
||||
guestMetadataCache: map[string]guestMetadataCacheEntry{
|
||||
existingKey: {
|
||||
ipAddresses: []string{"10.0.0.5"},
|
||||
fetchedAt: time.Now(),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Clear a key that doesn't exist - should not panic or error
|
||||
m.clearGuestMetadataCache("instance", "node", 100)
|
||||
|
||||
// Verify existing entry is still there
|
||||
if _, ok := m.guestMetadataCache[existingKey]; !ok {
|
||||
t.Fatal("existing entry should not be affected")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("only clears specified key, other entries remain", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
key1 := guestMetadataCacheKey("instance1", "node1", 100)
|
||||
key2 := guestMetadataCacheKey("instance2", "node2", 200)
|
||||
key3 := guestMetadataCacheKey("instance1", "node1", 300)
|
||||
|
||||
m := &Monitor{
|
||||
guestMetadataCache: map[string]guestMetadataCacheEntry{
|
||||
key1: {ipAddresses: []string{"192.168.1.10"}, fetchedAt: time.Now()},
|
||||
key2: {ipAddresses: []string{"192.168.1.20"}, fetchedAt: time.Now()},
|
||||
key3: {ipAddresses: []string{"192.168.1.30"}, fetchedAt: time.Now()},
|
||||
},
|
||||
}
|
||||
|
||||
// Clear only key2
|
||||
m.clearGuestMetadataCache("instance2", "node2", 200)
|
||||
|
||||
// Verify key2 was removed
|
||||
if _, ok := m.guestMetadataCache[key2]; ok {
|
||||
t.Fatal("key2 should be removed")
|
||||
}
|
||||
|
||||
// Verify key1 and key3 still exist
|
||||
if _, ok := m.guestMetadataCache[key1]; !ok {
|
||||
t.Fatal("key1 should still exist")
|
||||
}
|
||||
if _, ok := m.guestMetadataCache[key3]; !ok {
|
||||
t.Fatal("key3 should still exist")
|
||||
}
|
||||
|
||||
// Verify map size
|
||||
if len(m.guestMetadataCache) != 2 {
|
||||
t.Fatalf("expected 2 entries remaining, got %d", len(m.guestMetadataCache))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestReleaseGuestMetadataSlot(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
package monitoring
|
||||
|
||||
import "testing"
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestNormalizeLabel(t *testing.T) {
|
||||
tests := []struct {
|
||||
@@ -369,3 +372,117 @@ func TestMakeNodeMetricKey(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStoreNodeLastSuccess(t *testing.T) {
|
||||
t.Run("stores timestamp correctly", func(t *testing.T) {
|
||||
pm := &PollMetrics{
|
||||
nodeLastSuccessByKey: make(map[nodeMetricKey]time.Time),
|
||||
}
|
||||
ts := time.Date(2025, 1, 15, 10, 30, 0, 0, time.UTC)
|
||||
|
||||
pm.storeNodeLastSuccess("proxmox", "server1", "node1", ts)
|
||||
|
||||
key := makeNodeMetricKey("proxmox", "server1", "node1")
|
||||
got, ok := pm.nodeLastSuccessByKey[key]
|
||||
if !ok {
|
||||
t.Fatal("expected key to exist in map")
|
||||
}
|
||||
if !got.Equal(ts) {
|
||||
t.Errorf("stored timestamp = %v, want %v", got, ts)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("overwrites existing value", func(t *testing.T) {
|
||||
pm := &PollMetrics{
|
||||
nodeLastSuccessByKey: make(map[nodeMetricKey]time.Time),
|
||||
}
|
||||
ts1 := time.Date(2025, 1, 15, 10, 30, 0, 0, time.UTC)
|
||||
ts2 := time.Date(2025, 1, 15, 11, 45, 0, 0, time.UTC)
|
||||
|
||||
pm.storeNodeLastSuccess("proxmox", "server1", "node1", ts1)
|
||||
pm.storeNodeLastSuccess("proxmox", "server1", "node1", ts2)
|
||||
|
||||
key := makeNodeMetricKey("proxmox", "server1", "node1")
|
||||
got := pm.nodeLastSuccessByKey[key]
|
||||
if !got.Equal(ts2) {
|
||||
t.Errorf("stored timestamp = %v, want %v", got, ts2)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("multiple distinct keys stored independently", func(t *testing.T) {
|
||||
pm := &PollMetrics{
|
||||
nodeLastSuccessByKey: make(map[nodeMetricKey]time.Time),
|
||||
}
|
||||
ts1 := time.Date(2025, 1, 15, 10, 0, 0, 0, time.UTC)
|
||||
ts2 := time.Date(2025, 1, 15, 11, 0, 0, 0, time.UTC)
|
||||
ts3 := time.Date(2025, 1, 15, 12, 0, 0, 0, time.UTC)
|
||||
|
||||
pm.storeNodeLastSuccess("proxmox", "server1", "node1", ts1)
|
||||
pm.storeNodeLastSuccess("proxmox", "server1", "node2", ts2)
|
||||
pm.storeNodeLastSuccess("docker", "prod", "host1", ts3)
|
||||
|
||||
key1 := makeNodeMetricKey("proxmox", "server1", "node1")
|
||||
key2 := makeNodeMetricKey("proxmox", "server1", "node2")
|
||||
key3 := makeNodeMetricKey("docker", "prod", "host1")
|
||||
|
||||
if got := pm.nodeLastSuccessByKey[key1]; !got.Equal(ts1) {
|
||||
t.Errorf("key1 timestamp = %v, want %v", got, ts1)
|
||||
}
|
||||
if got := pm.nodeLastSuccessByKey[key2]; !got.Equal(ts2) {
|
||||
t.Errorf("key2 timestamp = %v, want %v", got, ts2)
|
||||
}
|
||||
if got := pm.nodeLastSuccessByKey[key3]; !got.Equal(ts3) {
|
||||
t.Errorf("key3 timestamp = %v, want %v", got, ts3)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestLastNodeSuccessFor(t *testing.T) {
|
||||
t.Run("returns time and true for existing key", func(t *testing.T) {
|
||||
pm := &PollMetrics{
|
||||
nodeLastSuccessByKey: make(map[nodeMetricKey]time.Time),
|
||||
}
|
||||
ts := time.Date(2025, 1, 15, 10, 30, 0, 0, time.UTC)
|
||||
key := makeNodeMetricKey("proxmox", "server1", "node1")
|
||||
pm.nodeLastSuccessByKey[key] = ts
|
||||
|
||||
got, ok := pm.lastNodeSuccessFor("proxmox", "server1", "node1")
|
||||
if !ok {
|
||||
t.Error("expected ok to be true")
|
||||
}
|
||||
if !got.Equal(ts) {
|
||||
t.Errorf("returned timestamp = %v, want %v", got, ts)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("returns zero time and false for non-existent key", func(t *testing.T) {
|
||||
pm := &PollMetrics{
|
||||
nodeLastSuccessByKey: make(map[nodeMetricKey]time.Time),
|
||||
}
|
||||
|
||||
got, ok := pm.lastNodeSuccessFor("proxmox", "server1", "nonexistent")
|
||||
if ok {
|
||||
t.Error("expected ok to be false")
|
||||
}
|
||||
if !got.IsZero() {
|
||||
t.Errorf("expected zero time, got %v", got)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("retrieves correct value after store", func(t *testing.T) {
|
||||
pm := &PollMetrics{
|
||||
nodeLastSuccessByKey: make(map[nodeMetricKey]time.Time),
|
||||
}
|
||||
ts := time.Date(2025, 1, 15, 14, 0, 0, 0, time.UTC)
|
||||
|
||||
pm.storeNodeLastSuccess("docker", "prod", "worker1", ts)
|
||||
|
||||
got, ok := pm.lastNodeSuccessFor("docker", "prod", "worker1")
|
||||
if !ok {
|
||||
t.Error("expected ok to be true")
|
||||
}
|
||||
if !got.Equal(ts) {
|
||||
t.Errorf("returned timestamp = %v, want %v", got, ts)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
220
internal/monitoring/monitor_backup_poll_test.go
Normal file
220
internal/monitoring/monitor_backup_poll_test.go
Normal file
@@ -0,0 +1,220 @@
|
||||
package monitoring
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/rcourtman/pulse-go-rewrite/internal/config"
|
||||
)
|
||||
|
||||
func TestShouldRunBackupPoll(t *testing.T) {
|
||||
now := time.Now()
|
||||
last := now.Add(-5 * time.Minute)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
monitor *Monitor
|
||||
last time.Time
|
||||
now time.Time
|
||||
wantRun bool
|
||||
wantReasonSub string // substring to check in reason
|
||||
wantReturnLast bool // true if returned time should equal last, false if should equal now
|
||||
}{
|
||||
{
|
||||
name: "nil monitor returns false",
|
||||
monitor: nil,
|
||||
last: last,
|
||||
now: now,
|
||||
wantRun: false,
|
||||
wantReasonSub: "configuration unavailable",
|
||||
wantReturnLast: true,
|
||||
},
|
||||
{
|
||||
name: "nil config returns false",
|
||||
monitor: &Monitor{config: nil},
|
||||
last: last,
|
||||
now: now,
|
||||
wantRun: false,
|
||||
wantReasonSub: "configuration unavailable",
|
||||
wantReturnLast: true,
|
||||
},
|
||||
{
|
||||
name: "backup polling disabled returns false",
|
||||
monitor: &Monitor{
|
||||
config: &config.Config{EnableBackupPolling: false},
|
||||
},
|
||||
last: last,
|
||||
now: now,
|
||||
wantRun: false,
|
||||
wantReasonSub: "backup polling globally disabled",
|
||||
wantReturnLast: true,
|
||||
},
|
||||
{
|
||||
name: "interval-based: before interval elapsed returns false",
|
||||
monitor: &Monitor{
|
||||
config: &config.Config{
|
||||
EnableBackupPolling: true,
|
||||
BackupPollingInterval: 10 * time.Minute,
|
||||
},
|
||||
},
|
||||
last: last, // 5 min ago, interval is 10 min
|
||||
now: now,
|
||||
wantRun: false,
|
||||
wantReasonSub: "next run scheduled for",
|
||||
wantReturnLast: true,
|
||||
},
|
||||
{
|
||||
name: "interval-based: after interval elapsed returns true",
|
||||
monitor: &Monitor{
|
||||
config: &config.Config{
|
||||
EnableBackupPolling: true,
|
||||
BackupPollingInterval: 3 * time.Minute,
|
||||
},
|
||||
},
|
||||
last: last, // 5 min ago, interval is 3 min
|
||||
now: now,
|
||||
wantRun: true,
|
||||
wantReasonSub: "",
|
||||
wantReturnLast: false,
|
||||
},
|
||||
{
|
||||
name: "interval-based: last is zero (first run) returns true",
|
||||
monitor: &Monitor{
|
||||
config: &config.Config{
|
||||
EnableBackupPolling: true,
|
||||
BackupPollingInterval: 10 * time.Minute,
|
||||
},
|
||||
},
|
||||
last: time.Time{}, // zero time
|
||||
now: now,
|
||||
wantRun: true,
|
||||
wantReasonSub: "",
|
||||
wantReturnLast: false,
|
||||
},
|
||||
{
|
||||
name: "cycle-based: pollCounter=1 returns true",
|
||||
monitor: &Monitor{
|
||||
config: &config.Config{
|
||||
EnableBackupPolling: true,
|
||||
BackupPollingCycles: 10,
|
||||
BackupPollingInterval: 0,
|
||||
},
|
||||
pollCounter: 1,
|
||||
},
|
||||
last: last,
|
||||
now: now,
|
||||
wantRun: true,
|
||||
wantReasonSub: "",
|
||||
wantReturnLast: false,
|
||||
},
|
||||
{
|
||||
name: "cycle-based: pollCounter divisible by cycles returns true",
|
||||
monitor: &Monitor{
|
||||
config: &config.Config{
|
||||
EnableBackupPolling: true,
|
||||
BackupPollingCycles: 5,
|
||||
BackupPollingInterval: 0,
|
||||
},
|
||||
pollCounter: 15, // 15 % 5 == 0
|
||||
},
|
||||
last: last,
|
||||
now: now,
|
||||
wantRun: true,
|
||||
wantReasonSub: "",
|
||||
wantReturnLast: false,
|
||||
},
|
||||
{
|
||||
name: "cycle-based: pollCounter not divisible returns false",
|
||||
monitor: &Monitor{
|
||||
config: &config.Config{
|
||||
EnableBackupPolling: true,
|
||||
BackupPollingCycles: 5,
|
||||
BackupPollingInterval: 0,
|
||||
},
|
||||
pollCounter: 7, // 7 % 5 == 2, remaining = 3
|
||||
},
|
||||
last: last,
|
||||
now: now,
|
||||
wantRun: false,
|
||||
wantReasonSub: "next run in 3 polling cycles",
|
||||
wantReturnLast: true,
|
||||
},
|
||||
{
|
||||
name: "default cycles (10) when BackupPollingCycles is 0",
|
||||
monitor: &Monitor{
|
||||
config: &config.Config{
|
||||
EnableBackupPolling: true,
|
||||
BackupPollingCycles: 0, // should default to 10
|
||||
BackupPollingInterval: 0,
|
||||
},
|
||||
pollCounter: 10, // 10 % 10 == 0
|
||||
},
|
||||
last: last,
|
||||
now: now,
|
||||
wantRun: true,
|
||||
wantReasonSub: "",
|
||||
wantReturnLast: false,
|
||||
},
|
||||
{
|
||||
name: "default cycles (10) when BackupPollingCycles is negative",
|
||||
monitor: &Monitor{
|
||||
config: &config.Config{
|
||||
EnableBackupPolling: true,
|
||||
BackupPollingCycles: -5, // should default to 10
|
||||
BackupPollingInterval: 0,
|
||||
},
|
||||
pollCounter: 20, // 20 % 10 == 0
|
||||
},
|
||||
last: last,
|
||||
now: now,
|
||||
wantRun: true,
|
||||
wantReasonSub: "",
|
||||
wantReturnLast: false,
|
||||
},
|
||||
{
|
||||
name: "default cycles (10) not divisible returns false with correct remaining",
|
||||
monitor: &Monitor{
|
||||
config: &config.Config{
|
||||
EnableBackupPolling: true,
|
||||
BackupPollingCycles: 0, // defaults to 10
|
||||
BackupPollingInterval: 0,
|
||||
},
|
||||
pollCounter: 3, // 3 % 10 == 3, remaining = 7
|
||||
},
|
||||
last: last,
|
||||
now: now,
|
||||
wantRun: false,
|
||||
wantReasonSub: "next run in 7 polling cycles",
|
||||
wantReturnLast: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
gotRun, gotReason, gotTime := tt.monitor.shouldRunBackupPoll(tt.last, tt.now)
|
||||
|
||||
if gotRun != tt.wantRun {
|
||||
t.Errorf("shouldRunBackupPoll() run = %v, want %v", gotRun, tt.wantRun)
|
||||
}
|
||||
|
||||
if tt.wantReasonSub != "" && !strings.Contains(gotReason, tt.wantReasonSub) {
|
||||
t.Errorf("shouldRunBackupPoll() reason = %q, want substring %q", gotReason, tt.wantReasonSub)
|
||||
}
|
||||
|
||||
if tt.wantReasonSub == "" && gotReason != "" {
|
||||
t.Errorf("shouldRunBackupPoll() reason = %q, want empty", gotReason)
|
||||
}
|
||||
|
||||
if tt.wantReturnLast {
|
||||
if !gotTime.Equal(tt.last) {
|
||||
t.Errorf("shouldRunBackupPoll() time = %v, want last (%v)", gotTime, tt.last)
|
||||
}
|
||||
} else {
|
||||
if !gotTime.Equal(tt.now) {
|
||||
t.Errorf("shouldRunBackupPoll() time = %v, want now (%v)", gotTime, tt.now)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user