fix: migration runner and scan logging fixes

- Fix migration conflicts and duplicate key errors
- Remove duplicate scan logging from agents
- Fix AgentHealth UI and Storage page triggers
- Prevent scans from appearing on wrong pages

Fixes duplicate key violations on fresh installs and
storage scans appearing on Updates page.
This commit is contained in:
Fimeg
2025-12-19 20:59:12 -05:00
parent 6b3ab6d6fc
commit 2da93e442e
8 changed files with 424 additions and 109 deletions

View File

@@ -906,11 +906,6 @@ func runAgent(cfg *config.Config) error {
log.Printf("Processing command: %s (%s)\n", cmd.Type, cmd.ID)
switch cmd.Type {
case "scan_updates":
if err := handleScanUpdatesV2(apiClient, cfg, ackTracker, scanOrchestrator, cmd.ID); err != nil {
log.Printf("Error scanning updates: %v\n", err)
}
case "scan_storage":
if err := handleScanStorage(apiClient, cfg, ackTracker, scanOrchestrator, cmd.ID); err != nil {
log.Printf("Error scanning storage: %v\n", err)
@@ -926,6 +921,26 @@ func runAgent(cfg *config.Config) error {
log.Printf("Error scanning Docker: %v\n", err)
}
case "scan_apt":
if err := handleScanAPT(apiClient, cfg, ackTracker, scanOrchestrator, cmd.ID); err != nil {
log.Printf("Error scanning APT: %v\n", err)
}
case "scan_dnf":
if err := handleScanDNF(apiClient, cfg, ackTracker, scanOrchestrator, cmd.ID); err != nil {
log.Printf("Error scanning DNF: %v\n", err)
}
case "scan_windows":
if err := handleScanWindows(apiClient, cfg, ackTracker, scanOrchestrator, cmd.ID); err != nil {
log.Printf("Error scanning Windows Updates: %v\n", err)
}
case "scan_winget":
if err := handleScanWinget(apiClient, cfg, ackTracker, scanOrchestrator, cmd.ID); err != nil {
log.Printf("Error scanning Winget: %v\n", err)
}
case "collect_specs":
log.Println("Spec collection not yet implemented")

View File

@@ -23,48 +23,6 @@ import (
"github.com/Fimeg/RedFlag/aggregator-agent/internal/orchestrator"
)
// handleScanUpdatesV2 scans all update subsystems (APT, DNF, Docker, Windows Update, Winget) in parallel
// This is the new orchestrator-based version for v0.1.20
func handleScanUpdatesV2(apiClient *client.Client, cfg *config.Config, ackTracker *acknowledgment.Tracker, orch *orchestrator.Orchestrator, commandID string) error {
log.Println("Scanning for updates (parallel execution)...")
ctx := context.Background()
startTime := time.Now()
// Execute all update scanners in parallel
results, allUpdates := orch.ScanAll(ctx)
// Format results
stdout, _, _ := orchestrator.FormatScanSummary(results)
// Add timing information
duration := time.Since(startTime)
stdout += fmt.Sprintf("\nScan completed in %.2f seconds\n", duration.Seconds())
// [REMOVED] Collective logging disabled - individual subsystems log separately
// logReport := client.LogReport{...}
// if err := reportLogWithAck(...); err != nil {...}
// Report updates to server if any were found
if len(allUpdates) > 0 {
report := client.UpdateReport{
CommandID: commandID,
Timestamp: time.Now(),
Updates: allUpdates,
}
if err := apiClient.ReportUpdates(cfg.AgentID, report); err != nil {
return fmt.Errorf("failed to report updates: %w", err)
}
log.Printf("✓ Reported %d updates to server\n", len(allUpdates))
} else {
log.Println("No updates found")
}
return nil
}
// handleScanStorage scans disk usage metrics only
func handleScanStorage(apiClient *client.Client, cfg *config.Config, ackTracker *acknowledgment.Tracker, orch *orchestrator.Orchestrator, commandID string) error {
log.Println("Scanning storage...")
@@ -80,8 +38,7 @@ func handleScanStorage(apiClient *client.Client, cfg *config.Config, ackTracker
// Format results
results := []orchestrator.ScanResult{result}
stdout, _, _ := orchestrator.FormatScanSummary(results)
// [REMOVED] stderr, exitCode unused after ReportLog removal
stdout, stderr, exitCode := orchestrator.FormatScanSummary(results)
duration := time.Since(startTime)
stdout += fmt.Sprintf("\nStorage scan completed in %.2f seconds\n", duration.Seconds())
@@ -92,8 +49,10 @@ func handleScanStorage(apiClient *client.Client, cfg *config.Config, ackTracker
// Report storage metrics to server using dedicated endpoint
// Use proper StorageMetricReport with clean field names
storageScanner := orchestrator.NewStorageScanner(cfg.AgentVersion)
var metrics []orchestrator.StorageMetric // Declare outside if block for ReportLog access
if storageScanner.IsAvailable() {
metrics, err := storageScanner.ScanStorage()
var err error
metrics, err = storageScanner.ScanStorage()
if err != nil {
return fmt.Errorf("failed to scan storage metrics: %w", err)
}
@@ -134,6 +93,29 @@ func handleScanStorage(apiClient *client.Client, cfg *config.Config, ackTracker
}
}
// Create history entry for unified view with proper formatting
logReport := client.LogReport{
CommandID: commandID,
Action: "scan_storage",
Result: map[bool]string{true: "success", false: "failure"}[exitCode == 0],
Stdout: stdout,
Stderr: stderr,
ExitCode: exitCode,
DurationSeconds: int(duration.Seconds()),
Metadata: map[string]string{
"subsystem_label": "Disk Usage",
"subsystem": "storage",
"metrics_count": fmt.Sprintf("%d", len(metrics)),
},
}
if err := reportLogWithAck(apiClient, cfg, ackTracker, logReport); err != nil {
log.Printf("[ERROR] [agent] [storage] report_log_failed: %v", err)
log.Printf("[HISTORY] [agent] [storage] report_log_failed error=\"%v\" timestamp=%s", err, time.Now().Format(time.RFC3339))
} else {
log.Printf("[INFO] [agent] [storage] history_log_created command_id=%s timestamp=%s", commandID, time.Now().Format(time.RFC3339))
log.Printf("[HISTORY] [agent] [scan_storage] log_created agent_id=%s command_id=%s result=%s timestamp=%s", cfg.AgentID, commandID, map[bool]string{true: "success", false: "failure"}[exitCode == 0], time.Now().Format(time.RFC3339))
}
return nil
}
@@ -152,8 +134,7 @@ func handleScanSystem(apiClient *client.Client, cfg *config.Config, ackTracker *
// Format results
results := []orchestrator.ScanResult{result}
stdout, _, _ := orchestrator.FormatScanSummary(results)
// [REMOVED] stderr, exitCode unused after ReportLog removal
stdout, stderr, exitCode := orchestrator.FormatScanSummary(results)
duration := time.Since(startTime)
stdout += fmt.Sprintf("\nSystem scan completed in %.2f seconds\n", duration.Seconds())
@@ -164,8 +145,10 @@ func handleScanSystem(apiClient *client.Client, cfg *config.Config, ackTracker *
// Report system metrics to server using dedicated endpoint
// Get system scanner and use proper interface
systemScanner := orchestrator.NewSystemScanner("unknown") // TODO: Get actual agent version
var metrics []orchestrator.SystemMetric // Declare outside if block for ReportLog access
if systemScanner.IsAvailable() {
metrics, err := systemScanner.ScanSystem()
var err error
metrics, err = systemScanner.ScanSystem()
if err != nil {
return fmt.Errorf("failed to scan system metrics: %w", err)
}
@@ -200,6 +183,29 @@ func handleScanSystem(apiClient *client.Client, cfg *config.Config, ackTracker *
}
}
// Create history entry for unified view with proper formatting
logReport := client.LogReport{
CommandID: commandID,
Action: "scan_system",
Result: map[bool]string{true: "success", false: "failure"}[exitCode == 0],
Stdout: stdout,
Stderr: stderr,
ExitCode: exitCode,
DurationSeconds: int(duration.Seconds()),
Metadata: map[string]string{
"subsystem_label": "System Metrics",
"subsystem": "system",
"metrics_count": fmt.Sprintf("%d", len(metrics)),
},
}
if err := reportLogWithAck(apiClient, cfg, ackTracker, logReport); err != nil {
log.Printf("[ERROR] [agent] [system] report_log_failed: %v", err)
log.Printf("[HISTORY] [agent] [system] report_log_failed error=\"%v\" timestamp=%s", err, time.Now().Format(time.RFC3339))
} else {
log.Printf("[INFO] [agent] [system] history_log_created command_id=%s timestamp=%s", commandID, time.Now().Format(time.RFC3339))
log.Printf("[HISTORY] [agent] [scan_system] log_created agent_id=%s command_id=%s result=%s timestamp=%s", cfg.AgentID, commandID, map[bool]string{true: "success", false: "failure"}[exitCode == 0], time.Now().Format(time.RFC3339))
}
return nil
}
@@ -218,8 +224,7 @@ func handleScanDocker(apiClient *client.Client, cfg *config.Config, ackTracker *
// Format results
results := []orchestrator.ScanResult{result}
stdout, _, _ := orchestrator.FormatScanSummary(results)
// [REMOVED] stderr, exitCode unused after ReportLog removal
stdout, stderr, exitCode := orchestrator.FormatScanSummary(results)
duration := time.Since(startTime)
stdout += fmt.Sprintf("\nDocker scan completed in %.2f seconds\n", duration.Seconds())
@@ -235,13 +240,16 @@ func handleScanDocker(apiClient *client.Client, cfg *config.Config, ackTracker *
}
defer dockerScanner.Close()
var images []orchestrator.DockerImage // Declare outside if block for ReportLog access
var updateCount int // Declare outside if block for ReportLog access
if dockerScanner.IsAvailable() {
images, err := dockerScanner.ScanDocker()
images, err = dockerScanner.ScanDocker()
if err != nil {
return fmt.Errorf("failed to scan Docker images: %w", err)
}
// Always report all Docker images (not just those with updates)
updateCount = 0 // Reset for counting
if len(images) > 0 {
// Convert DockerImage to DockerReportItem for API call
imageItems := make([]client.DockerReportItem, 0, len(images))
@@ -268,7 +276,6 @@ func handleScanDocker(apiClient *client.Client, cfg *config.Config, ackTracker *
return fmt.Errorf("failed to report Docker images: %w", err)
}
updateCount := 0
for _, image := range images {
if image.HasUpdate {
updateCount++
@@ -282,6 +289,286 @@ func handleScanDocker(apiClient *client.Client, cfg *config.Config, ackTracker *
log.Println("Docker not available on this system")
}
// Create history entry for unified view with proper formatting
logReport := client.LogReport{
CommandID: commandID,
Action: "scan_docker",
Result: map[bool]string{true: "success", false: "failure"}[exitCode == 0],
Stdout: stdout,
Stderr: stderr,
ExitCode: exitCode,
DurationSeconds: int(duration.Seconds()),
Metadata: map[string]string{
"subsystem_label": "Docker Images",
"subsystem": "docker",
"images_count": fmt.Sprintf("%d", len(images)),
"updates_found": fmt.Sprintf("%d", updateCount),
},
}
if err := reportLogWithAck(apiClient, cfg, ackTracker, logReport); err != nil {
log.Printf("[ERROR] [agent] [docker] report_log_failed: %v", err)
log.Printf("[HISTORY] [agent] [docker] report_log_failed error=\"%v\" timestamp=%s", err, time.Now().Format(time.RFC3339))
} else {
log.Printf("[INFO] [agent] [docker] history_log_created command_id=%s timestamp=%s", commandID, time.Now().Format(time.RFC3339))
log.Printf("[HISTORY] [agent] [scan_docker] log_created agent_id=%s command_id=%s result=%s timestamp=%s", cfg.AgentID, commandID, map[bool]string{true: "success", false: "failure"}[exitCode == 0], time.Now().Format(time.RFC3339))
}
return nil
}
// handleScanAPT scans APT package updates only
func handleScanAPT(apiClient *client.Client, cfg *config.Config, ackTracker *acknowledgment.Tracker, orch *orchestrator.Orchestrator, commandID string) error {
log.Println("Scanning APT packages...")
ctx := context.Background()
startTime := time.Now()
// Execute APT scanner
result, err := orch.ScanSingle(ctx, "apt")
if err != nil {
return fmt.Errorf("failed to scan APT: %w", err)
}
// Format results
results := []orchestrator.ScanResult{result}
stdout, stderr, exitCode := orchestrator.FormatScanSummary(results)
duration := time.Since(startTime)
stdout += fmt.Sprintf("\nAPT scan completed in %.2f seconds\n", duration.Seconds())
// Report APT updates to server if any were found
// Declare updates at function scope for ReportLog access
var updates []client.UpdateReportItem
if result.Status == "success" && len(result.Updates) > 0 {
updates = result.Updates
report := client.UpdateReport{
CommandID: commandID,
Timestamp: time.Now(),
Updates: updates,
}
if err := apiClient.ReportUpdates(cfg.AgentID, report); err != nil {
return fmt.Errorf("failed to report APT updates: %w", err)
}
log.Printf("[INFO] [agent] [apt] Successfully reported %d APT updates to server\n", len(updates))
}
// Create history entry for unified view with proper formatting
logReport := client.LogReport{
CommandID: commandID,
Action: "scan_apt",
Result: map[bool]string{true: "success", false: "failure"}[exitCode == 0],
Stdout: stdout,
Stderr: stderr,
ExitCode: exitCode,
DurationSeconds: int(duration.Seconds()),
Metadata: map[string]string{
"subsystem_label": "APT Packages",
"subsystem": "apt",
"updates_found": fmt.Sprintf("%d", len(updates)),
},
}
if err := reportLogWithAck(apiClient, cfg, ackTracker, logReport); err != nil {
log.Printf("[ERROR] [agent] [apt] report_log_failed: %v", err)
log.Printf("[HISTORY] [agent] [apt] report_log_failed error=\"%v\" timestamp=%s", err, time.Now().Format(time.RFC3339))
} else {
log.Printf("[INFO] [agent] [apt] history_log_created command_id=%s timestamp=%s", commandID, time.Now().Format(time.RFC3339))
log.Printf("[HISTORY] [agent] [scan_apt] log_created agent_id=%s command_id=%s result=%s timestamp=%s", cfg.AgentID, commandID, map[bool]string{true: "success", false: "failure"}[exitCode == 0], time.Now().Format(time.RFC3339))
}
return nil
}
// handleScanDNF scans DNF package updates only
func handleScanDNF(apiClient *client.Client, cfg *config.Config, ackTracker *acknowledgment.Tracker, orch *orchestrator.Orchestrator, commandID string) error {
log.Println("Scanning DNF packages...")
ctx := context.Background()
startTime := time.Now()
// Execute DNF scanner
result, err := orch.ScanSingle(ctx, "dnf")
if err != nil {
return fmt.Errorf("failed to scan DNF: %w", err)
}
// Format results
results := []orchestrator.ScanResult{result}
stdout, stderr, exitCode := orchestrator.FormatScanSummary(results)
duration := time.Since(startTime)
stdout += fmt.Sprintf("\nDNF scan completed in %.2f seconds\n", duration.Seconds())
// Report DNF updates to server if any were found
// Declare updates at function scope for ReportLog access
var updates []client.UpdateReportItem
if result.Status == "success" && len(result.Updates) > 0 {
updates = result.Updates
report := client.UpdateReport{
CommandID: commandID,
Timestamp: time.Now(),
Updates: updates,
}
if err := apiClient.ReportUpdates(cfg.AgentID, report); err != nil {
return fmt.Errorf("failed to report DNF updates: %w", err)
}
log.Printf("[INFO] [agent] [dnf] Successfully reported %d DNF updates to server\n", len(updates))
}
// Create history entry for unified view with proper formatting
logReport := client.LogReport{
CommandID: commandID,
Action: "scan_dnf",
Result: map[bool]string{true: "success", false: "failure"}[exitCode == 0],
Stdout: stdout,
Stderr: stderr,
ExitCode: exitCode,
DurationSeconds: int(duration.Seconds()),
Metadata: map[string]string{
"subsystem_label": "DNF Packages",
"subsystem": "dnf",
"updates_found": fmt.Sprintf("%d", len(updates)),
},
}
if err := reportLogWithAck(apiClient, cfg, ackTracker, logReport); err != nil {
log.Printf("[ERROR] [agent] [dnf] report_log_failed: %v", err)
log.Printf("[HISTORY] [agent] [dnf] report_log_failed error=\"%v\" timestamp=%s", err, time.Now().Format(time.RFC3339))
} else {
log.Printf("[INFO] [agent] [dnf] history_log_created command_id=%s timestamp=%s", commandID, time.Now().Format(time.RFC3339))
log.Printf("[HISTORY] [agent] [scan_dnf] log_created agent_id=%s command_id=%s result=%s timestamp=%s", cfg.AgentID, commandID, map[bool]string{true: "success", false: "failure"}[exitCode == 0], time.Now().Format(time.RFC3339))
}
return nil
}
// handleScanWindows scans Windows Updates only
func handleScanWindows(apiClient *client.Client, cfg *config.Config, ackTracker *acknowledgment.Tracker, orch *orchestrator.Orchestrator, commandID string) error {
log.Println("Scanning Windows Updates...")
ctx := context.Background()
startTime := time.Now()
// Execute Windows Update scanner
result, err := orch.ScanSingle(ctx, "windows")
if err != nil {
return fmt.Errorf("failed to scan Windows Updates: %w", err)
}
// Format results
results := []orchestrator.ScanResult{result}
stdout, stderr, exitCode := orchestrator.FormatScanSummary(results)
duration := time.Since(startTime)
stdout += fmt.Sprintf("\nWindows Update scan completed in %.2f seconds\n", duration.Seconds())
// Report Windows updates to server if any were found
// Declare updates at function scope for ReportLog access
var updates []client.UpdateReportItem
if result.Status == "success" && len(result.Updates) > 0 {
updates = result.Updates
report := client.UpdateReport{
CommandID: commandID,
Timestamp: time.Now(),
Updates: updates,
}
if err := apiClient.ReportUpdates(cfg.AgentID, report); err != nil {
return fmt.Errorf("failed to report Windows updates: %w", err)
}
log.Printf("[INFO] [agent] [windows] Successfully reported %d Windows updates to server\n", len(updates))
}
// Create history entry for unified view with proper formatting
logReport := client.LogReport{
CommandID: commandID,
Action: "scan_windows",
Result: map[bool]string{true: "success", false: "failure"}[exitCode == 0],
Stdout: stdout,
Stderr: stderr,
ExitCode: exitCode,
DurationSeconds: int(duration.Seconds()),
Metadata: map[string]string{
"subsystem_label": "Windows Updates",
"subsystem": "windows",
"updates_found": fmt.Sprintf("%d", len(updates)),
},
}
if err := reportLogWithAck(apiClient, cfg, ackTracker, logReport); err != nil {
log.Printf("[ERROR] [agent] [windows] report_log_failed: %v", err)
log.Printf("[HISTORY] [agent] [windows] report_log_failed error=\"%v\" timestamp=%s", err, time.Now().Format(time.RFC3339))
} else {
log.Printf("[INFO] [agent] [windows] history_log_created command_id=%s timestamp=%s", commandID, time.Now().Format(time.RFC3339))
log.Printf("[HISTORY] [agent] [scan_windows] log_created agent_id=%s command_id=%s result=%s timestamp=%s", cfg.AgentID, commandID, map[bool]string{true: "success", false: "failure"}[exitCode == 0], time.Now().Format(time.RFC3339))
}
return nil
}
// handleScanWinget scans Winget package updates only
func handleScanWinget(apiClient *client.Client, cfg *config.Config, ackTracker *acknowledgment.Tracker, orch *orchestrator.Orchestrator, commandID string) error {
log.Println("Scanning Winget packages...")
ctx := context.Background()
startTime := time.Now()
// Execute Winget scanner
result, err := orch.ScanSingle(ctx, "winget")
if err != nil {
return fmt.Errorf("failed to scan Winget: %w", err)
}
// Format results
results := []orchestrator.ScanResult{result}
stdout, stderr, exitCode := orchestrator.FormatScanSummary(results)
duration := time.Since(startTime)
stdout += fmt.Sprintf("\nWinget scan completed in %.2f seconds\n", duration.Seconds())
// Report Winget updates to server if any were found
// Declare updates at function scope for ReportLog access
var updates []client.UpdateReportItem
if result.Status == "success" && len(result.Updates) > 0 {
updates = result.Updates
report := client.UpdateReport{
CommandID: commandID,
Timestamp: time.Now(),
Updates: updates,
}
if err := apiClient.ReportUpdates(cfg.AgentID, report); err != nil {
return fmt.Errorf("failed to report Winget updates: %w", err)
}
log.Printf("[INFO] [agent] [winget] Successfully reported %d Winget updates to server\n", len(updates))
}
// Create history entry for unified view with proper formatting
logReport := client.LogReport{
CommandID: commandID,
Action: "scan_winget",
Result: map[bool]string{true: "success", false: "failure"}[exitCode == 0],
Stdout: stdout,
Stderr: stderr,
ExitCode: exitCode,
DurationSeconds: int(duration.Seconds()),
Metadata: map[string]string{
"subsystem_label": "Winget Packages",
"subsystem": "winget",
"updates_found": fmt.Sprintf("%d", len(updates)),
},
}
if err := reportLogWithAck(apiClient, cfg, ackTracker, logReport); err != nil {
log.Printf("[ERROR] [agent] [winget] report_log_failed: %v", err)
log.Printf("[HISTORY] [agent] [winget] report_log_failed error=\"%v\" timestamp=%s", err, time.Now().Format(time.RFC3339))
} else {
log.Printf("[INFO] [agent] [winget] history_log_created command_id=%s timestamp=%s", commandID, time.Now().Format(time.RFC3339))
log.Printf("[HISTORY] [agent] [scan_winget] log_created agent_id=%s command_id=%s result=%s timestamp=%s", cfg.AgentID, commandID, map[bool]string{true: "success", false: "failure"}[exitCode == 0], time.Now().Format(time.RFC3339))
}
return nil
}