mirror of
https://github.com/rcourtman/Pulse.git
synced 2026-02-18 23:41:48 +01:00
feat: make pulse-sensor-proxy rate limits configurable
Add support for configuring rate limits via config.yaml to allow administrators to tune the proxy for different deployment sizes. Changes: - Add RateLimitConfig struct to config.go with per_peer_interval_ms and per_peer_burst - Update newRateLimiter() to accept optional RateLimitConfig parameter - Load rate limit config from YAML and apply overrides to defaults - Update tests to pass nil for default behavior - Add comprehensive config.example.yaml with documentation Configuration examples: - Small (1-3 nodes): 1000ms interval, burst 5 (default) - Medium (4-10 nodes): 500ms interval, burst 10 - Large (10+ nodes): 250ms interval, burst 20 Defaults remain conservative (1 req/sec, burst 5) to support most deployments while allowing customization for larger environments. Related: #46b8b8d08 (rate limit fix for multi-node support)
This commit is contained in:
39
cmd/pulse-sensor-proxy/config.example.yaml
Normal file
39
cmd/pulse-sensor-proxy/config.example.yaml
Normal file
@@ -0,0 +1,39 @@
|
||||
# Pulse Sensor Proxy Configuration
|
||||
# This file is optional. If not provided, the proxy will use sensible defaults.
|
||||
|
||||
# Network Configuration
|
||||
# Specify which networks are allowed to connect to the proxy
|
||||
# If not specified, the proxy will auto-detect host IP addresses
|
||||
allowed_source_subnets:
|
||||
- "127.0.0.1/32" # Localhost
|
||||
- "192.168.0.0/24" # Local network
|
||||
|
||||
# Peer Authorization
|
||||
# Specify which UIDs/GIDs are allowed to connect
|
||||
# Required when running Pulse in a container (use mapped UID/GID from container)
|
||||
allowed_peer_uids: [100999] # Container pulse user UID
|
||||
allowed_peer_gids: [100996] # Container pulse group GID
|
||||
|
||||
# ID-Mapped Root Authentication
|
||||
# Allow connections from ID-mapped root users (for LXC containers)
|
||||
allow_idmapped_root: true
|
||||
allowed_idmap_users:
|
||||
- root
|
||||
|
||||
# Metrics Server
|
||||
# Address for Prometheus metrics endpoint
|
||||
metrics_address: "127.0.0.1:9127"
|
||||
|
||||
# Rate Limiting (Optional)
|
||||
# Control how frequently peers can make requests to prevent abuse
|
||||
# Adjust these values based on your deployment size:
|
||||
# - Small (1-3 nodes): Use defaults (1000ms, burst 5)
|
||||
# - Medium (4-10 nodes): 500ms, burst 10
|
||||
# - Large (10+ nodes): 250ms, burst 20
|
||||
rate_limit:
|
||||
per_peer_interval_ms: 1000 # Minimum milliseconds between requests per peer (1000ms = 1 qps)
|
||||
per_peer_burst: 5 # Number of requests allowed in a burst (supports up to 5 simultaneous requests)
|
||||
|
||||
# Default values if not specified:
|
||||
# per_peer_interval_ms: 1000 (1 second = 1 qps = 60 requests/min)
|
||||
# per_peer_burst: 5
|
||||
@@ -11,6 +11,12 @@ import (
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
// RateLimitConfig holds rate limiting configuration
|
||||
type RateLimitConfig struct {
|
||||
PerPeerIntervalMs int `yaml:"per_peer_interval_ms"` // Milliseconds between requests per peer
|
||||
PerPeerBurst int `yaml:"per_peer_burst"` // Number of requests allowed in a burst
|
||||
}
|
||||
|
||||
// Config holds proxy configuration
|
||||
type Config struct {
|
||||
AllowedSourceSubnets []string `yaml:"allowed_source_subnets"`
|
||||
@@ -20,6 +26,8 @@ type Config struct {
|
||||
AllowedPeerUIDs []uint32 `yaml:"allowed_peer_uids"`
|
||||
AllowedPeerGIDs []uint32 `yaml:"allowed_peer_gids"`
|
||||
AllowedIDMapUsers []string `yaml:"allowed_idmap_users"`
|
||||
|
||||
RateLimit *RateLimitConfig `yaml:"rate_limit,omitempty"`
|
||||
}
|
||||
|
||||
// loadConfig loads configuration from file and environment variables
|
||||
@@ -150,6 +158,14 @@ func loadConfig(configPath string) (*Config, error) {
|
||||
}
|
||||
}
|
||||
|
||||
// Log rate limit configuration if provided
|
||||
if cfg.RateLimit != nil {
|
||||
log.Info().
|
||||
Int("per_peer_interval_ms", cfg.RateLimit.PerPeerIntervalMs).
|
||||
Int("per_peer_burst", cfg.RateLimit.PerPeerBurst).
|
||||
Msg("Rate limit configuration loaded from config file")
|
||||
}
|
||||
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -351,7 +351,7 @@ func runProxy() {
|
||||
socketPath: socketPath,
|
||||
sshKeyPath: sshKeyPath,
|
||||
knownHosts: knownHostsManager,
|
||||
rateLimiter: newRateLimiter(metrics),
|
||||
rateLimiter: newRateLimiter(metrics, cfg.RateLimit),
|
||||
nodeGate: newNodeGate(),
|
||||
config: cfg,
|
||||
metrics: metrics,
|
||||
|
||||
@@ -50,14 +50,30 @@ var (
|
||||
)
|
||||
|
||||
// newRateLimiter creates a new rate limiter with cleanup loop
|
||||
func newRateLimiter(metrics *ProxyMetrics) *rateLimiter {
|
||||
// If rateLimitCfg is provided, it overrides the default rate limit settings
|
||||
func newRateLimiter(metrics *ProxyMetrics, rateLimitCfg *RateLimitConfig) *rateLimiter {
|
||||
// Use defaults
|
||||
perPeerLimit := defaultPerPeerLimit
|
||||
perPeerBurst := defaultPerPeerBurst
|
||||
|
||||
// Override with config if provided
|
||||
if rateLimitCfg != nil {
|
||||
if rateLimitCfg.PerPeerIntervalMs > 0 {
|
||||
interval := time.Duration(rateLimitCfg.PerPeerIntervalMs) * time.Millisecond
|
||||
perPeerLimit = rate.Every(interval)
|
||||
}
|
||||
if rateLimitCfg.PerPeerBurst > 0 {
|
||||
perPeerBurst = rateLimitCfg.PerPeerBurst
|
||||
}
|
||||
}
|
||||
|
||||
rl := &rateLimiter{
|
||||
entries: make(map[peerID]*limiterEntry),
|
||||
quitChan: make(chan struct{}),
|
||||
globalSem: make(chan struct{}, defaultGlobalConcurrency),
|
||||
policy: limiterPolicy{
|
||||
perPeerLimit: defaultPerPeerLimit,
|
||||
perPeerBurst: defaultPerPeerBurst,
|
||||
perPeerLimit: perPeerLimit,
|
||||
perPeerBurst: perPeerBurst,
|
||||
perPeerConcurrency: defaultPerPeerConcurrency,
|
||||
globalConcurrency: defaultGlobalConcurrency,
|
||||
penaltyDuration: defaultPenaltyDuration,
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
|
||||
func TestRateLimiterPenalizeMetrics(t *testing.T) {
|
||||
metrics := NewProxyMetrics("test")
|
||||
rl := newRateLimiter(metrics)
|
||||
rl := newRateLimiter(metrics, nil)
|
||||
rl.policy.penaltyDuration = 10 * time.Millisecond
|
||||
|
||||
start := time.Now()
|
||||
|
||||
Reference in New Issue
Block a user