WIP: Save current state - security subsystems, migrations, logging

This commit is contained in:
Fimeg
2025-12-16 14:19:59 -05:00
parent f792ab23c7
commit f7c8d23c5d
89 changed files with 8884 additions and 1394 deletions

View File

@@ -0,0 +1,104 @@
package orchestrator
import (
"crypto/ed25519"
"fmt"
"log"
"github.com/Fimeg/RedFlag/aggregator-agent/internal/client"
"github.com/Fimeg/RedFlag/aggregator-agent/internal/config"
"github.com/Fimeg/RedFlag/aggregator-agent/internal/crypto"
"github.com/Fimeg/RedFlag/aggregator-agent/internal/logging"
"github.com/google/uuid"
)
// CommandHandler handles command processing with signature verification
type CommandHandler struct {
verifier *crypto.CommandVerifier
securityLogger *logging.SecurityLogger
serverPublicKey ed25519.PublicKey
logger *log.Logger
}
// CommandSigningConfig holds configuration for command signing
type CommandSigningConfig struct {
Enabled bool `json:"enabled" env:"REDFLAG_AGENT_COMMAND_SIGNING_ENABLED" default:"true"`
EnforcementMode string `json:"enforcement_mode" env:"REDFLAG_AGENT_COMMAND_ENFORCEMENT_MODE" default:"strict"` // strict, warning, disabled
}
// NewCommandHandler creates a new command handler
func NewCommandHandler(cfg *config.Config, securityLogger *logging.SecurityLogger, logger *log.Logger) (*CommandHandler, error) {
handler := &CommandHandler{
securityLogger: securityLogger,
logger: logger,
verifier: crypto.NewCommandVerifier(),
}
// Load server public key if command signing is enabled
if cfg.CommandSigning.Enabled {
publicKey, err := crypto.LoadCachedPublicKey()
if err != nil {
// Try to fetch from server if not cached
publicKey, err = crypto.GetPublicKey(cfg.ServerURL)
if err != nil {
return nil, fmt.Errorf("failed to load server public key: %w", err)
}
}
handler.serverPublicKey = publicKey
}
return handler, nil
}
// ProcessCommand processes a command with signature verification
func (h *CommandHandler) ProcessCommand(cmd client.CommandItem, cfg *config.Config, agentID uuid.UUID) error {
config := cfg.CommandSigning
if config.Enabled {
if config.EnforcementMode == "strict" {
// Strict mode: Verification is required
if cmd.Signature == "" {
err := fmt.Errorf("strict enforcement enabled but command not signed")
h.securityLogger.LogCommandVerificationFailure(cmd.ID, "missing signature")
return fmt.Errorf("command verification failed: %w", err)
}
err := h.verifier.VerifyCommand(cmd, h.serverPublicKey)
if err != nil {
h.securityLogger.LogCommandVerificationFailure(cmd.ID, err.Error())
return fmt.Errorf("command verification failed: %w", err)
}
h.securityLogger.LogCommandVerificationSuccess(cmd.ID)
} else if config.EnforcementMode == "warning" {
// Warning mode: Log failures but allow execution
if cmd.Signature != "" {
err := h.verifier.VerifyCommand(cmd, h.serverPublicKey)
if err != nil {
h.logger.Printf("[WARNING] Command verification failed but allowed in warning mode: %v", err)
h.securityLogger.LogCommandVerificationFailure(cmd.ID, err.Error())
} else {
h.securityLogger.LogCommandVerificationSuccess(cmd.ID)
}
} else {
h.logger.Printf("[WARNING] Command not signed but allowed in warning mode")
}
}
// disabled mode: Skip verification entirely
} else if cmd.Signature != "" {
// Signing is disabled but command has signature - log info
h.logger.Printf("[INFO] Command has signature but signing is disabled")
}
return nil
}
// UpdateServerPublicKey updates the cached server public key
func (h *CommandHandler) UpdateServerPublicKey(serverURL string) error {
publicKey, err := crypto.FetchAndCacheServerPublicKey(serverURL)
if err != nil {
return fmt.Errorf("failed to update server public key: %w", err)
}
h.serverPublicKey = publicKey
h.logger.Printf("Server public key updated successfully")
return nil
}

View File

@@ -9,6 +9,8 @@ import (
"github.com/Fimeg/RedFlag/aggregator-agent/internal/circuitbreaker"
"github.com/Fimeg/RedFlag/aggregator-agent/internal/client"
"github.com/Fimeg/RedFlag/aggregator-agent/internal/event"
"github.com/Fimeg/RedFlag/aggregator-agent/internal/models"
)
// Scanner represents a generic update scanner
@@ -42,8 +44,9 @@ type ScanResult struct {
// Orchestrator manages and coordinates multiple scanners
type Orchestrator struct {
scanners map[string]*ScannerConfig
mu sync.RWMutex
scanners map[string]*ScannerConfig
eventBuffer *event.Buffer
mu sync.RWMutex
}
// NewOrchestrator creates a new scanner orchestrator
@@ -53,6 +56,14 @@ func NewOrchestrator() *Orchestrator {
}
}
// NewOrchestratorWithEvents creates a new scanner orchestrator with event buffering
func NewOrchestratorWithEvents(buffer *event.Buffer) *Orchestrator {
return &Orchestrator{
scanners: make(map[string]*ScannerConfig),
eventBuffer: buffer,
}
}
// RegisterScanner adds a scanner to the orchestrator
func (o *Orchestrator) RegisterScanner(name string, scanner Scanner, cb *circuitbreaker.CircuitBreaker, timeout time.Duration, enabled bool) {
o.mu.Lock()
@@ -135,6 +146,27 @@ func (o *Orchestrator) executeScan(ctx context.Context, name string, cfg *Scanne
if !cfg.Enabled {
result.Status = "disabled"
log.Printf("[%s] Scanner disabled via configuration", name)
// Buffer disabled event if event buffer is available
if o.eventBuffer != nil {
event := &models.SystemEvent{
EventType: "agent_scan",
EventSubtype: "skipped",
Severity: "info",
Component: "scanner",
Message: fmt.Sprintf("Scanner %s is disabled via configuration", name),
Metadata: map[string]interface{}{
"scanner_name": name,
"status": "disabled",
"reason": "configuration",
},
CreatedAt: time.Now(),
}
if err := o.eventBuffer.BufferEvent(event); err != nil {
log.Printf("Warning: Failed to buffer scanner disabled event: %v", err)
}
}
return result
}
@@ -142,6 +174,27 @@ func (o *Orchestrator) executeScan(ctx context.Context, name string, cfg *Scanne
if !cfg.Scanner.IsAvailable() {
result.Status = "unavailable"
log.Printf("[%s] Scanner not available on this system", name)
// Buffer unavailable event if event buffer is available
if o.eventBuffer != nil {
event := &models.SystemEvent{
EventType: "agent_scan",
EventSubtype: "skipped",
Severity: "info",
Component: "scanner",
Message: fmt.Sprintf("Scanner %s is not available on this system", name),
Metadata: map[string]interface{}{
"scanner_name": name,
"status": "unavailable",
"reason": "system_incompatible",
},
CreatedAt: time.Now(),
}
if err := o.eventBuffer.BufferEvent(event); err != nil {
log.Printf("Warning: Failed to buffer scanner unavailable event: %v", err)
}
}
return result
}
@@ -185,12 +238,55 @@ func (o *Orchestrator) executeScan(ctx context.Context, name string, cfg *Scanne
result.Error = err
result.Status = "failed"
log.Printf("[%s] Scan failed: %v", name, err)
// Buffer event if event buffer is available
if o.eventBuffer != nil {
event := &models.SystemEvent{
EventType: "agent_scan",
EventSubtype: "failed",
Severity: "error",
Component: "scanner",
Message: fmt.Sprintf("Scanner %s failed: %v", name, err),
Metadata: map[string]interface{}{
"scanner_name": name,
"error_type": "scan_failed",
"error_details": err.Error(),
"duration_ms": result.Duration.Milliseconds(),
},
CreatedAt: time.Now(),
}
if err := o.eventBuffer.BufferEvent(event); err != nil {
log.Printf("Warning: Failed to buffer scanner failure event: %v", err)
}
}
return result
}
result.Updates = updates
result.Status = "success"
log.Printf("[%s] Scan completed: found %d updates (took %v)", name, len(updates), result.Duration)
// Buffer success event if event buffer is available
if o.eventBuffer != nil {
event := &models.SystemEvent{
EventType: "agent_scan",
EventSubtype: "completed",
Severity: "info",
Component: "scanner",
Message: fmt.Sprintf("Scanner %s completed successfully", name),
Metadata: map[string]interface{}{
"scanner_name": name,
"updates_found": len(updates),
"duration_ms": result.Duration.Milliseconds(),
"status": "success",
},
CreatedAt: time.Now(),
}
if err := o.eventBuffer.BufferEvent(event); err != nil {
log.Printf("Warning: Failed to buffer scanner success event: %v", err)
}
}
return result
}