fix: migration runner and scan logging fixes

- Fix migration conflicts and duplicate key errors
- Remove duplicate scan logging from agents
- Fix AgentHealth UI and Storage page triggers
- Prevent scans from appearing on wrong pages

Fixes duplicate key violations on fresh installs and
storage scans appearing on Updates page.
This commit is contained in:
Fimeg
2025-12-19 20:59:12 -05:00
parent 6b3ab6d6fc
commit 2da93e442e
8 changed files with 424 additions and 109 deletions

Binary file not shown.

View File

@@ -906,11 +906,6 @@ func runAgent(cfg *config.Config) error {
log.Printf("Processing command: %s (%s)\n", cmd.Type, cmd.ID)
switch cmd.Type {
case "scan_updates":
if err := handleScanUpdatesV2(apiClient, cfg, ackTracker, scanOrchestrator, cmd.ID); err != nil {
log.Printf("Error scanning updates: %v\n", err)
}
case "scan_storage":
if err := handleScanStorage(apiClient, cfg, ackTracker, scanOrchestrator, cmd.ID); err != nil {
log.Printf("Error scanning storage: %v\n", err)
@@ -926,6 +921,26 @@ func runAgent(cfg *config.Config) error {
log.Printf("Error scanning Docker: %v\n", err)
}
case "scan_apt":
if err := handleScanAPT(apiClient, cfg, ackTracker, scanOrchestrator, cmd.ID); err != nil {
log.Printf("Error scanning APT: %v\n", err)
}
case "scan_dnf":
if err := handleScanDNF(apiClient, cfg, ackTracker, scanOrchestrator, cmd.ID); err != nil {
log.Printf("Error scanning DNF: %v\n", err)
}
case "scan_windows":
if err := handleScanWindows(apiClient, cfg, ackTracker, scanOrchestrator, cmd.ID); err != nil {
log.Printf("Error scanning Windows Updates: %v\n", err)
}
case "scan_winget":
if err := handleScanWinget(apiClient, cfg, ackTracker, scanOrchestrator, cmd.ID); err != nil {
log.Printf("Error scanning Winget: %v\n", err)
}
case "collect_specs":
log.Println("Spec collection not yet implemented")

View File

@@ -23,48 +23,6 @@ import (
"github.com/Fimeg/RedFlag/aggregator-agent/internal/orchestrator"
)
// handleScanUpdatesV2 scans all update subsystems (APT, DNF, Docker, Windows Update, Winget) in parallel
// This is the new orchestrator-based version for v0.1.20
func handleScanUpdatesV2(apiClient *client.Client, cfg *config.Config, ackTracker *acknowledgment.Tracker, orch *orchestrator.Orchestrator, commandID string) error {
log.Println("Scanning for updates (parallel execution)...")
ctx := context.Background()
startTime := time.Now()
// Execute all update scanners in parallel
results, allUpdates := orch.ScanAll(ctx)
// Format results
stdout, _, _ := orchestrator.FormatScanSummary(results)
// Add timing information
duration := time.Since(startTime)
stdout += fmt.Sprintf("\nScan completed in %.2f seconds\n", duration.Seconds())
// [REMOVED] Collective logging disabled - individual subsystems log separately
// logReport := client.LogReport{...}
// if err := reportLogWithAck(...); err != nil {...}
// Report updates to server if any were found
if len(allUpdates) > 0 {
report := client.UpdateReport{
CommandID: commandID,
Timestamp: time.Now(),
Updates: allUpdates,
}
if err := apiClient.ReportUpdates(cfg.AgentID, report); err != nil {
return fmt.Errorf("failed to report updates: %w", err)
}
log.Printf("✓ Reported %d updates to server\n", len(allUpdates))
} else {
log.Println("No updates found")
}
return nil
}
// handleScanStorage scans disk usage metrics only
func handleScanStorage(apiClient *client.Client, cfg *config.Config, ackTracker *acknowledgment.Tracker, orch *orchestrator.Orchestrator, commandID string) error {
log.Println("Scanning storage...")
@@ -80,8 +38,7 @@ func handleScanStorage(apiClient *client.Client, cfg *config.Config, ackTracker
// Format results
results := []orchestrator.ScanResult{result}
stdout, _, _ := orchestrator.FormatScanSummary(results)
// [REMOVED] stderr, exitCode unused after ReportLog removal
stdout, stderr, exitCode := orchestrator.FormatScanSummary(results)
duration := time.Since(startTime)
stdout += fmt.Sprintf("\nStorage scan completed in %.2f seconds\n", duration.Seconds())
@@ -92,8 +49,10 @@ func handleScanStorage(apiClient *client.Client, cfg *config.Config, ackTracker
// Report storage metrics to server using dedicated endpoint
// Use proper StorageMetricReport with clean field names
storageScanner := orchestrator.NewStorageScanner(cfg.AgentVersion)
var metrics []orchestrator.StorageMetric // Declare outside if block for ReportLog access
if storageScanner.IsAvailable() {
metrics, err := storageScanner.ScanStorage()
var err error
metrics, err = storageScanner.ScanStorage()
if err != nil {
return fmt.Errorf("failed to scan storage metrics: %w", err)
}
@@ -134,6 +93,29 @@ func handleScanStorage(apiClient *client.Client, cfg *config.Config, ackTracker
}
}
// Create history entry for unified view with proper formatting
logReport := client.LogReport{
CommandID: commandID,
Action: "scan_storage",
Result: map[bool]string{true: "success", false: "failure"}[exitCode == 0],
Stdout: stdout,
Stderr: stderr,
ExitCode: exitCode,
DurationSeconds: int(duration.Seconds()),
Metadata: map[string]string{
"subsystem_label": "Disk Usage",
"subsystem": "storage",
"metrics_count": fmt.Sprintf("%d", len(metrics)),
},
}
if err := reportLogWithAck(apiClient, cfg, ackTracker, logReport); err != nil {
log.Printf("[ERROR] [agent] [storage] report_log_failed: %v", err)
log.Printf("[HISTORY] [agent] [storage] report_log_failed error=\"%v\" timestamp=%s", err, time.Now().Format(time.RFC3339))
} else {
log.Printf("[INFO] [agent] [storage] history_log_created command_id=%s timestamp=%s", commandID, time.Now().Format(time.RFC3339))
log.Printf("[HISTORY] [agent] [scan_storage] log_created agent_id=%s command_id=%s result=%s timestamp=%s", cfg.AgentID, commandID, map[bool]string{true: "success", false: "failure"}[exitCode == 0], time.Now().Format(time.RFC3339))
}
return nil
}
@@ -152,8 +134,7 @@ func handleScanSystem(apiClient *client.Client, cfg *config.Config, ackTracker *
// Format results
results := []orchestrator.ScanResult{result}
stdout, _, _ := orchestrator.FormatScanSummary(results)
// [REMOVED] stderr, exitCode unused after ReportLog removal
stdout, stderr, exitCode := orchestrator.FormatScanSummary(results)
duration := time.Since(startTime)
stdout += fmt.Sprintf("\nSystem scan completed in %.2f seconds\n", duration.Seconds())
@@ -164,8 +145,10 @@ func handleScanSystem(apiClient *client.Client, cfg *config.Config, ackTracker *
// Report system metrics to server using dedicated endpoint
// Get system scanner and use proper interface
systemScanner := orchestrator.NewSystemScanner("unknown") // TODO: Get actual agent version
var metrics []orchestrator.SystemMetric // Declare outside if block for ReportLog access
if systemScanner.IsAvailable() {
metrics, err := systemScanner.ScanSystem()
var err error
metrics, err = systemScanner.ScanSystem()
if err != nil {
return fmt.Errorf("failed to scan system metrics: %w", err)
}
@@ -200,6 +183,29 @@ func handleScanSystem(apiClient *client.Client, cfg *config.Config, ackTracker *
}
}
// Create history entry for unified view with proper formatting
logReport := client.LogReport{
CommandID: commandID,
Action: "scan_system",
Result: map[bool]string{true: "success", false: "failure"}[exitCode == 0],
Stdout: stdout,
Stderr: stderr,
ExitCode: exitCode,
DurationSeconds: int(duration.Seconds()),
Metadata: map[string]string{
"subsystem_label": "System Metrics",
"subsystem": "system",
"metrics_count": fmt.Sprintf("%d", len(metrics)),
},
}
if err := reportLogWithAck(apiClient, cfg, ackTracker, logReport); err != nil {
log.Printf("[ERROR] [agent] [system] report_log_failed: %v", err)
log.Printf("[HISTORY] [agent] [system] report_log_failed error=\"%v\" timestamp=%s", err, time.Now().Format(time.RFC3339))
} else {
log.Printf("[INFO] [agent] [system] history_log_created command_id=%s timestamp=%s", commandID, time.Now().Format(time.RFC3339))
log.Printf("[HISTORY] [agent] [scan_system] log_created agent_id=%s command_id=%s result=%s timestamp=%s", cfg.AgentID, commandID, map[bool]string{true: "success", false: "failure"}[exitCode == 0], time.Now().Format(time.RFC3339))
}
return nil
}
@@ -218,8 +224,7 @@ func handleScanDocker(apiClient *client.Client, cfg *config.Config, ackTracker *
// Format results
results := []orchestrator.ScanResult{result}
stdout, _, _ := orchestrator.FormatScanSummary(results)
// [REMOVED] stderr, exitCode unused after ReportLog removal
stdout, stderr, exitCode := orchestrator.FormatScanSummary(results)
duration := time.Since(startTime)
stdout += fmt.Sprintf("\nDocker scan completed in %.2f seconds\n", duration.Seconds())
@@ -235,13 +240,16 @@ func handleScanDocker(apiClient *client.Client, cfg *config.Config, ackTracker *
}
defer dockerScanner.Close()
var images []orchestrator.DockerImage // Declare outside if block for ReportLog access
var updateCount int // Declare outside if block for ReportLog access
if dockerScanner.IsAvailable() {
images, err := dockerScanner.ScanDocker()
images, err = dockerScanner.ScanDocker()
if err != nil {
return fmt.Errorf("failed to scan Docker images: %w", err)
}
// Always report all Docker images (not just those with updates)
updateCount = 0 // Reset for counting
if len(images) > 0 {
// Convert DockerImage to DockerReportItem for API call
imageItems := make([]client.DockerReportItem, 0, len(images))
@@ -268,7 +276,6 @@ func handleScanDocker(apiClient *client.Client, cfg *config.Config, ackTracker *
return fmt.Errorf("failed to report Docker images: %w", err)
}
updateCount := 0
for _, image := range images {
if image.HasUpdate {
updateCount++
@@ -282,6 +289,286 @@ func handleScanDocker(apiClient *client.Client, cfg *config.Config, ackTracker *
log.Println("Docker not available on this system")
}
// Create history entry for unified view with proper formatting
logReport := client.LogReport{
CommandID: commandID,
Action: "scan_docker",
Result: map[bool]string{true: "success", false: "failure"}[exitCode == 0],
Stdout: stdout,
Stderr: stderr,
ExitCode: exitCode,
DurationSeconds: int(duration.Seconds()),
Metadata: map[string]string{
"subsystem_label": "Docker Images",
"subsystem": "docker",
"images_count": fmt.Sprintf("%d", len(images)),
"updates_found": fmt.Sprintf("%d", updateCount),
},
}
if err := reportLogWithAck(apiClient, cfg, ackTracker, logReport); err != nil {
log.Printf("[ERROR] [agent] [docker] report_log_failed: %v", err)
log.Printf("[HISTORY] [agent] [docker] report_log_failed error=\"%v\" timestamp=%s", err, time.Now().Format(time.RFC3339))
} else {
log.Printf("[INFO] [agent] [docker] history_log_created command_id=%s timestamp=%s", commandID, time.Now().Format(time.RFC3339))
log.Printf("[HISTORY] [agent] [scan_docker] log_created agent_id=%s command_id=%s result=%s timestamp=%s", cfg.AgentID, commandID, map[bool]string{true: "success", false: "failure"}[exitCode == 0], time.Now().Format(time.RFC3339))
}
return nil
}
// handleScanAPT scans APT package updates only
func handleScanAPT(apiClient *client.Client, cfg *config.Config, ackTracker *acknowledgment.Tracker, orch *orchestrator.Orchestrator, commandID string) error {
log.Println("Scanning APT packages...")
ctx := context.Background()
startTime := time.Now()
// Execute APT scanner
result, err := orch.ScanSingle(ctx, "apt")
if err != nil {
return fmt.Errorf("failed to scan APT: %w", err)
}
// Format results
results := []orchestrator.ScanResult{result}
stdout, stderr, exitCode := orchestrator.FormatScanSummary(results)
duration := time.Since(startTime)
stdout += fmt.Sprintf("\nAPT scan completed in %.2f seconds\n", duration.Seconds())
// Report APT updates to server if any were found
// Declare updates at function scope for ReportLog access
var updates []client.UpdateReportItem
if result.Status == "success" && len(result.Updates) > 0 {
updates = result.Updates
report := client.UpdateReport{
CommandID: commandID,
Timestamp: time.Now(),
Updates: updates,
}
if err := apiClient.ReportUpdates(cfg.AgentID, report); err != nil {
return fmt.Errorf("failed to report APT updates: %w", err)
}
log.Printf("[INFO] [agent] [apt] Successfully reported %d APT updates to server\n", len(updates))
}
// Create history entry for unified view with proper formatting
logReport := client.LogReport{
CommandID: commandID,
Action: "scan_apt",
Result: map[bool]string{true: "success", false: "failure"}[exitCode == 0],
Stdout: stdout,
Stderr: stderr,
ExitCode: exitCode,
DurationSeconds: int(duration.Seconds()),
Metadata: map[string]string{
"subsystem_label": "APT Packages",
"subsystem": "apt",
"updates_found": fmt.Sprintf("%d", len(updates)),
},
}
if err := reportLogWithAck(apiClient, cfg, ackTracker, logReport); err != nil {
log.Printf("[ERROR] [agent] [apt] report_log_failed: %v", err)
log.Printf("[HISTORY] [agent] [apt] report_log_failed error=\"%v\" timestamp=%s", err, time.Now().Format(time.RFC3339))
} else {
log.Printf("[INFO] [agent] [apt] history_log_created command_id=%s timestamp=%s", commandID, time.Now().Format(time.RFC3339))
log.Printf("[HISTORY] [agent] [scan_apt] log_created agent_id=%s command_id=%s result=%s timestamp=%s", cfg.AgentID, commandID, map[bool]string{true: "success", false: "failure"}[exitCode == 0], time.Now().Format(time.RFC3339))
}
return nil
}
// handleScanDNF scans DNF package updates only
func handleScanDNF(apiClient *client.Client, cfg *config.Config, ackTracker *acknowledgment.Tracker, orch *orchestrator.Orchestrator, commandID string) error {
log.Println("Scanning DNF packages...")
ctx := context.Background()
startTime := time.Now()
// Execute DNF scanner
result, err := orch.ScanSingle(ctx, "dnf")
if err != nil {
return fmt.Errorf("failed to scan DNF: %w", err)
}
// Format results
results := []orchestrator.ScanResult{result}
stdout, stderr, exitCode := orchestrator.FormatScanSummary(results)
duration := time.Since(startTime)
stdout += fmt.Sprintf("\nDNF scan completed in %.2f seconds\n", duration.Seconds())
// Report DNF updates to server if any were found
// Declare updates at function scope for ReportLog access
var updates []client.UpdateReportItem
if result.Status == "success" && len(result.Updates) > 0 {
updates = result.Updates
report := client.UpdateReport{
CommandID: commandID,
Timestamp: time.Now(),
Updates: updates,
}
if err := apiClient.ReportUpdates(cfg.AgentID, report); err != nil {
return fmt.Errorf("failed to report DNF updates: %w", err)
}
log.Printf("[INFO] [agent] [dnf] Successfully reported %d DNF updates to server\n", len(updates))
}
// Create history entry for unified view with proper formatting
logReport := client.LogReport{
CommandID: commandID,
Action: "scan_dnf",
Result: map[bool]string{true: "success", false: "failure"}[exitCode == 0],
Stdout: stdout,
Stderr: stderr,
ExitCode: exitCode,
DurationSeconds: int(duration.Seconds()),
Metadata: map[string]string{
"subsystem_label": "DNF Packages",
"subsystem": "dnf",
"updates_found": fmt.Sprintf("%d", len(updates)),
},
}
if err := reportLogWithAck(apiClient, cfg, ackTracker, logReport); err != nil {
log.Printf("[ERROR] [agent] [dnf] report_log_failed: %v", err)
log.Printf("[HISTORY] [agent] [dnf] report_log_failed error=\"%v\" timestamp=%s", err, time.Now().Format(time.RFC3339))
} else {
log.Printf("[INFO] [agent] [dnf] history_log_created command_id=%s timestamp=%s", commandID, time.Now().Format(time.RFC3339))
log.Printf("[HISTORY] [agent] [scan_dnf] log_created agent_id=%s command_id=%s result=%s timestamp=%s", cfg.AgentID, commandID, map[bool]string{true: "success", false: "failure"}[exitCode == 0], time.Now().Format(time.RFC3339))
}
return nil
}
// handleScanWindows scans Windows Updates only
func handleScanWindows(apiClient *client.Client, cfg *config.Config, ackTracker *acknowledgment.Tracker, orch *orchestrator.Orchestrator, commandID string) error {
log.Println("Scanning Windows Updates...")
ctx := context.Background()
startTime := time.Now()
// Execute Windows Update scanner
result, err := orch.ScanSingle(ctx, "windows")
if err != nil {
return fmt.Errorf("failed to scan Windows Updates: %w", err)
}
// Format results
results := []orchestrator.ScanResult{result}
stdout, stderr, exitCode := orchestrator.FormatScanSummary(results)
duration := time.Since(startTime)
stdout += fmt.Sprintf("\nWindows Update scan completed in %.2f seconds\n", duration.Seconds())
// Report Windows updates to server if any were found
// Declare updates at function scope for ReportLog access
var updates []client.UpdateReportItem
if result.Status == "success" && len(result.Updates) > 0 {
updates = result.Updates
report := client.UpdateReport{
CommandID: commandID,
Timestamp: time.Now(),
Updates: updates,
}
if err := apiClient.ReportUpdates(cfg.AgentID, report); err != nil {
return fmt.Errorf("failed to report Windows updates: %w", err)
}
log.Printf("[INFO] [agent] [windows] Successfully reported %d Windows updates to server\n", len(updates))
}
// Create history entry for unified view with proper formatting
logReport := client.LogReport{
CommandID: commandID,
Action: "scan_windows",
Result: map[bool]string{true: "success", false: "failure"}[exitCode == 0],
Stdout: stdout,
Stderr: stderr,
ExitCode: exitCode,
DurationSeconds: int(duration.Seconds()),
Metadata: map[string]string{
"subsystem_label": "Windows Updates",
"subsystem": "windows",
"updates_found": fmt.Sprintf("%d", len(updates)),
},
}
if err := reportLogWithAck(apiClient, cfg, ackTracker, logReport); err != nil {
log.Printf("[ERROR] [agent] [windows] report_log_failed: %v", err)
log.Printf("[HISTORY] [agent] [windows] report_log_failed error=\"%v\" timestamp=%s", err, time.Now().Format(time.RFC3339))
} else {
log.Printf("[INFO] [agent] [windows] history_log_created command_id=%s timestamp=%s", commandID, time.Now().Format(time.RFC3339))
log.Printf("[HISTORY] [agent] [scan_windows] log_created agent_id=%s command_id=%s result=%s timestamp=%s", cfg.AgentID, commandID, map[bool]string{true: "success", false: "failure"}[exitCode == 0], time.Now().Format(time.RFC3339))
}
return nil
}
// handleScanWinget scans Winget package updates only
func handleScanWinget(apiClient *client.Client, cfg *config.Config, ackTracker *acknowledgment.Tracker, orch *orchestrator.Orchestrator, commandID string) error {
log.Println("Scanning Winget packages...")
ctx := context.Background()
startTime := time.Now()
// Execute Winget scanner
result, err := orch.ScanSingle(ctx, "winget")
if err != nil {
return fmt.Errorf("failed to scan Winget: %w", err)
}
// Format results
results := []orchestrator.ScanResult{result}
stdout, stderr, exitCode := orchestrator.FormatScanSummary(results)
duration := time.Since(startTime)
stdout += fmt.Sprintf("\nWinget scan completed in %.2f seconds\n", duration.Seconds())
// Report Winget updates to server if any were found
// Declare updates at function scope for ReportLog access
var updates []client.UpdateReportItem
if result.Status == "success" && len(result.Updates) > 0 {
updates = result.Updates
report := client.UpdateReport{
CommandID: commandID,
Timestamp: time.Now(),
Updates: updates,
}
if err := apiClient.ReportUpdates(cfg.AgentID, report); err != nil {
return fmt.Errorf("failed to report Winget updates: %w", err)
}
log.Printf("[INFO] [agent] [winget] Successfully reported %d Winget updates to server\n", len(updates))
}
// Create history entry for unified view with proper formatting
logReport := client.LogReport{
CommandID: commandID,
Action: "scan_winget",
Result: map[bool]string{true: "success", false: "failure"}[exitCode == 0],
Stdout: stdout,
Stderr: stderr,
ExitCode: exitCode,
DurationSeconds: int(duration.Seconds()),
Metadata: map[string]string{
"subsystem_label": "Winget Packages",
"subsystem": "winget",
"updates_found": fmt.Sprintf("%d", len(updates)),
},
}
if err := reportLogWithAck(apiClient, cfg, ackTracker, logReport); err != nil {
log.Printf("[ERROR] [agent] [winget] report_log_failed: %v", err)
log.Printf("[HISTORY] [agent] [winget] report_log_failed error=\"%v\" timestamp=%s", err, time.Now().Format(time.RFC3339))
} else {
log.Printf("[INFO] [agent] [winget] history_log_created command_id=%s timestamp=%s", commandID, time.Now().Format(time.RFC3339))
log.Printf("[HISTORY] [agent] [scan_winget] log_created agent_id=%s command_id=%s result=%s timestamp=%s", cfg.AgentID, commandID, map[bool]string{true: "success", false: "failure"}[exitCode == 0], time.Now().Format(time.RFC3339))
}
return nil
}

View File

@@ -91,28 +91,33 @@ func (db *DB) Migrate(migrationsPath string) error {
// Execute the migration SQL
if _, err := tx.Exec(string(content)); err != nil {
// Check if it's a "already exists" error - if so, handle gracefully
// Check if it's an "already exists" error
if strings.Contains(err.Error(), "already exists") ||
strings.Contains(err.Error(), "duplicate key") ||
strings.Contains(err.Error(), "relation") && strings.Contains(err.Error(), "already exists") {
fmt.Printf("⚠ Migration %s failed (objects already exist), marking as applied: %v\n", filename, err)
// Rollback current transaction and start a new one for tracking
// Rollback the failed transaction
tx.Rollback()
// Start new transaction just for migration tracking
if newTx, newTxErr := db.Beginx(); newTxErr == nil {
if _, insertErr := newTx.Exec("INSERT INTO schema_migrations (version) VALUES ($1)", filename); insertErr == nil {
newTx.Commit()
// Check if this migration was already recorded as applied
var count int
checkErr := db.Get(&count, "SELECT COUNT(*) FROM schema_migrations WHERE version = $1", filename)
if checkErr == nil && count > 0 {
// Migration was already applied, just skip it
fmt.Printf("⚠ Migration %s already applied, skipping\n", filename)
} else {
newTx.Rollback()
}
// Migration failed and wasn't applied - this is a real error
return fmt.Errorf("migration %s failed with 'already exists' but migration not recorded: %w", filename, err)
}
continue
}
// For any other error, rollback and fail
tx.Rollback()
return fmt.Errorf("failed to execute migration %s: %w", filename, err)
}
// Record the migration as applied
// Record the migration as applied (normal success path)
if _, err := tx.Exec("INSERT INTO schema_migrations (version) VALUES ($1)", filename); err != nil {
tx.Rollback()
return fmt.Errorf("failed to record migration %s: %w", filename, err)
@@ -123,7 +128,7 @@ func (db *DB) Migrate(migrationsPath string) error {
return fmt.Errorf("failed to commit migration %s: %w", filename, err)
}
fmt.Printf("✓ Executed migration: %s\n", filename)
fmt.Printf("✓ Successfully executed migration: %s\n", filename)
}
return nil

View File

@@ -1,11 +1,12 @@
-- Add machine_id column to agents table for hardware fingerprint binding
-- Ensure proper UNIQUE constraint on machine_id for hardware fingerprint binding
-- This prevents config file copying attacks by validating hardware identity
-- NOTE: Migration 016 already added the machine_id column, this ensures proper unique constraint
ALTER TABLE agents
ADD COLUMN machine_id VARCHAR(64);
-- Drop the old non-unique index if it exists
DROP INDEX IF EXISTS idx_agents_machine_id;
-- Create unique index to prevent duplicate machine IDs
CREATE UNIQUE INDEX idx_agents_machine_id ON agents(machine_id) WHERE machine_id IS NOT NULL;
-- Create unique index to prevent duplicate machine IDs (allows multiple NULLs)
CREATE UNIQUE INDEX CONCURRENTLY idx_agents_machine_id_unique ON agents(machine_id) WHERE machine_id IS NOT NULL;
-- Add comment for documentation
COMMENT ON COLUMN agents.machine_id IS 'SHA-256 hash of hardware fingerprint (prevents agent impersonation via config copying)';

View File

@@ -22,6 +22,3 @@ CREATE INDEX idx_storage_metrics_agent_id ON storage_metrics(agent_id);
CREATE INDEX idx_storage_metrics_created_at ON storage_metrics(created_at DESC);
CREATE INDEX idx_storage_metrics_mountpoint ON storage_metrics(mountpoint);
CREATE INDEX idx_storage_metrics_agent_mount ON storage_metrics(agent_id, mountpoint, created_at DESC);
-- Track migration
INSERT INTO schema_migrations (version, description) VALUES ('021', 'Create storage_metrics table');

View File

@@ -227,31 +227,25 @@ export function AgentHealth({ agentId }: AgentHealthProps) {
};
// Get package manager badges based on OS type
const getPackageManagerBadges = (osType: string) => {
const getPackageManagerStatus = (pm: string, osType: string) => {
const os = osType.toLowerCase();
const badges = [];
if (os.includes('windows')) {
badges.push(
<span key="windows" className="text-[10px] px-1 py-0.5 bg-blue-100 rounded text-blue-700">Windows</span>,
<span key="winget" className="text-[10px] px-1 py-0.5 bg-blue-100 rounded text-blue-700">Winget</span>
);
} else if (os.includes('fedora') || os.includes('rhel') || os.includes('centos')) {
badges.push(
<span key="dnf" className="text-[10px] px-1 py-0.5 bg-green-100 rounded text-green-700">DNF</span>
);
} else if (os.includes('debian') || os.includes('ubuntu') || os.includes('linux')) {
badges.push(
<span key="apt" className="text-[10px] px-1 py-0.5 bg-purple-100 rounded text-purple-700">APT</span>
);
switch (pm) {
case 'apt': return os.includes('debian') || os.includes('ubuntu');
case 'dnf': return os.includes('fedora') || os.includes('rhel') || os.includes('centos');
case 'winget': return os.includes('windows');
case 'windows': return os.includes('windows');
default: return false;
}
};
// Docker is cross-platform
badges.push(
<span key="docker" className="text-[10px] px-1 py-0.5 bg-gray-100 rounded text-gray-600">Docker</span>
);
return badges;
const getPackageManagerBadgeStyle = (pm: string) => {
switch (pm) {
case 'apt': return 'bg-purple-100 text-purple-700';
case 'dnf': return 'bg-green-100 text-green-700';
case 'winget': return 'bg-blue-100 text-blue-700';
case 'windows': return 'bg-blue-100 text-blue-700';
default: return 'bg-gray-100 text-gray-500';
}
};
const frequencyOptions = [
@@ -337,11 +331,27 @@ export function AgentHealth({ agentId }: AgentHealthProps) {
<div className="font-medium">{config.name}</div>
<div className="text-xs text-gray-500">
{subsystem.subsystem === 'updates' ? (
<div className="flex items-center space-x-1">
<span>Scans for available package updates</span>
<div className="flex items-center space-x-1 ml-1">
{getPackageManagerBadges(agent?.os_type || '')}
</div>
<div className="text-xs text-gray-500">
<span>Scans for available package updates (</span>
{['apt', 'dnf', 'winget', 'windows'].map((pm, index) => {
const isEnabled = getPackageManagerStatus(pm, agent?.os_type || '');
const isLast = index === 3;
return (
<span key={pm}>
{index > 0 && ', '}
<span className={cn(
'text-[10px] px-1 py-0.5 rounded',
isEnabled
? getPackageManagerBadgeStyle(pm)
: 'bg-gray-100 text-gray-500'
)}>
{pm === 'windows' ? 'Windows Update' : pm.toUpperCase()}
</span>
{isLast && ')'}
</span>
);
})}
</div>
) : (
config.description

View File

@@ -66,13 +66,13 @@ export function AgentStorage({ agentId }: AgentStorageProps) {
const handleFullStorageScan = async () => {
setIsScanning(true);
try {
// Trigger a system scan to get full disk inventory
await agentApi.scanAgent(agentId);
toast.success('Full storage scan initiated');
// Trigger storage scan only (not full system scan)
await agentApi.triggerSubsystem(agentId, 'storage');
toast.success('Storage scan initiated');
// Refresh data after a short delay
setTimeout(() => {
refetchAgent();
refetchStorage();
setIsScanning(false);
}, 3000);
} catch (error) {