feat: granular subsystem commands with parallel scanner execution

Split monolithic scan_updates into individual subsystems (updates/storage/system/docker).
Scanners now run in parallel via goroutines - cuts scan time roughly in half, maybe more.

Agent changes:
- Orchestrator pattern for scanner management
- New scanners: storage (disk metrics), system (cpu/mem/processes)
- New commands: scan_storage, scan_system, scan_docker
- Wrapped existing scanners (APT/DNF/Docker/Windows/Winget) with common interface
- Version bump to 0.1.20

Server changes:
- Migration 015: agent_subsystems table with trigger for auto-init
- Subsystem CRUD: enable/disable, interval (5min-24hr), auto-run toggle
- API routes: /api/v1/agents/:id/subsystems/* (9 endpoints)
- Stats tracking per subsystem

Web UI changes:
- ChatTimeline shows subsystem-specific labels and icons
- AgentScanners got interactive toggles, interval dropdowns, manual trigger buttons
- TypeScript types added for subsystems

Backward compatible with legacy scan_updates - for now. Bugs probably exist somewhere.
This commit is contained in:
Fimeg
2025-11-01 20:34:00 -04:00
parent bf4d46529f
commit 3690472396
19 changed files with 2151 additions and 253 deletions

View File

@@ -19,6 +19,7 @@ import (
"github.com/Fimeg/RedFlag/aggregator-agent/internal/config"
"github.com/Fimeg/RedFlag/aggregator-agent/internal/display"
"github.com/Fimeg/RedFlag/aggregator-agent/internal/installer"
"github.com/Fimeg/RedFlag/aggregator-agent/internal/orchestrator"
"github.com/Fimeg/RedFlag/aggregator-agent/internal/scanner"
"github.com/Fimeg/RedFlag/aggregator-agent/internal/service"
"github.com/Fimeg/RedFlag/aggregator-agent/internal/system"
@@ -26,7 +27,7 @@ import (
)
const (
AgentVersion = "0.1.19" // Phase 0: Circuit breakers, timeouts, and subsystem resilience
AgentVersion = "0.1.20" // Phase 1: Granular subsystem commands and parallel scanner execution
)
// getConfigPath returns the platform-specific config path
@@ -466,6 +467,37 @@ func runAgent(cfg *config.Config) error {
HalfOpenAttempts: cfg.Subsystems.Winget.CircuitBreaker.HalfOpenAttempts,
})
// Initialize scanner orchestrator for parallel execution and granular subsystem management
scanOrchestrator := orchestrator.NewOrchestrator()
// Register update scanners
scanOrchestrator.RegisterScanner("apt", orchestrator.NewAPTScannerWrapper(aptScanner), aptCB, cfg.Subsystems.APT.Timeout, cfg.Subsystems.APT.Enabled)
scanOrchestrator.RegisterScanner("dnf", orchestrator.NewDNFScannerWrapper(dnfScanner), dnfCB, cfg.Subsystems.DNF.Timeout, cfg.Subsystems.DNF.Enabled)
scanOrchestrator.RegisterScanner("docker", orchestrator.NewDockerScannerWrapper(dockerScanner), dockerCB, cfg.Subsystems.Docker.Timeout, cfg.Subsystems.Docker.Enabled)
scanOrchestrator.RegisterScanner("windows", orchestrator.NewWindowsUpdateScannerWrapper(windowsUpdateScanner), windowsCB, cfg.Subsystems.Windows.Timeout, cfg.Subsystems.Windows.Enabled)
scanOrchestrator.RegisterScanner("winget", orchestrator.NewWingetScannerWrapper(wingetScanner), wingetCB, cfg.Subsystems.Winget.Timeout, cfg.Subsystems.Winget.Enabled)
// Register storage and system scanners
storageScanner := orchestrator.NewStorageScanner(AgentVersion)
systemScanner := orchestrator.NewSystemScanner(AgentVersion)
// Storage and system scanners don't need circuit breakers (always available, fast operations)
storageCB := circuitbreaker.New("Storage", circuitbreaker.Config{
FailureThreshold: 5,
FailureWindow: 10 * time.Minute,
OpenDuration: 5 * time.Minute,
HalfOpenAttempts: 1,
})
systemCB := circuitbreaker.New("System", circuitbreaker.Config{
FailureThreshold: 5,
FailureWindow: 10 * time.Minute,
OpenDuration: 5 * time.Minute,
HalfOpenAttempts: 1,
})
scanOrchestrator.RegisterScanner("storage", storageScanner, storageCB, 30*time.Second, cfg.Subsystems.Storage.Enabled)
scanOrchestrator.RegisterScanner("system", systemScanner, systemCB, 30*time.Second, true) // System scanner always enabled
// Initialize acknowledgment tracker for command result reliability
ackTracker := acknowledgment.NewTracker(getStatePath())
if err := ackTracker.Load(); err != nil {
@@ -610,10 +642,25 @@ func runAgent(cfg *config.Config) error {
switch cmd.Type {
case "scan_updates":
if err := handleScanUpdates(apiClient, cfg, ackTracker, aptScanner, dnfScanner, dockerScanner, windowsUpdateScanner, wingetScanner, aptCB, dnfCB, dockerCB, windowsCB, wingetCB, cmd.ID); err != nil {
if err := handleScanUpdatesV2(apiClient, cfg, ackTracker, scanOrchestrator, cmd.ID); err != nil {
log.Printf("Error scanning updates: %v\n", err)
}
case "scan_storage":
if err := handleScanStorage(apiClient, cfg, ackTracker, scanOrchestrator, cmd.ID); err != nil {
log.Printf("Error scanning storage: %v\n", err)
}
case "scan_system":
if err := handleScanSystem(apiClient, cfg, ackTracker, scanOrchestrator, cmd.ID); err != nil {
log.Printf("Error scanning system: %v\n", err)
}
case "scan_docker":
if err := handleScanDocker(apiClient, cfg, ackTracker, scanOrchestrator, cmd.ID); err != nil {
log.Printf("Error scanning Docker: %v\n", err)
}
case "collect_specs":
log.Println("Spec collection not yet implemented")

View File

@@ -0,0 +1,232 @@
package main
import (
"context"
"fmt"
"log"
"time"
"github.com/Fimeg/RedFlag/aggregator-agent/internal/acknowledgment"
"github.com/Fimeg/RedFlag/aggregator-agent/internal/client"
"github.com/Fimeg/RedFlag/aggregator-agent/internal/config"
"github.com/Fimeg/RedFlag/aggregator-agent/internal/orchestrator"
)
// handleScanUpdatesV2 scans all update subsystems (APT, DNF, Docker, Windows Update, Winget) in parallel
// This is the new orchestrator-based version for v0.1.20
func handleScanUpdatesV2(apiClient *client.Client, cfg *config.Config, ackTracker *acknowledgment.Tracker, orch *orchestrator.Orchestrator, commandID string) error {
log.Println("Scanning for updates (parallel execution)...")
ctx := context.Background()
startTime := time.Now()
// Execute all update scanners in parallel
results, allUpdates := orch.ScanAll(ctx)
// Format results
stdout, stderr, exitCode := orchestrator.FormatScanSummary(results)
// Add timing information
duration := time.Since(startTime)
stdout += fmt.Sprintf("\nScan completed in %.2f seconds\n", duration.Seconds())
// Create scan log entry with subsystem metadata
logReport := client.LogReport{
CommandID: commandID,
Action: "scan_updates",
Result: map[bool]string{true: "success", false: "failure"}[exitCode == 0],
Stdout: stdout,
Stderr: stderr,
ExitCode: exitCode,
DurationSeconds: int(duration.Seconds()),
}
// Report the scan log
if err := reportLogWithAck(apiClient, cfg, ackTracker, logReport); err != nil {
log.Printf("Failed to report scan log: %v\n", err)
// Continue anyway - updates are more important
}
// Report updates to server if any were found
if len(allUpdates) > 0 {
report := client.UpdateReport{
CommandID: commandID,
Timestamp: time.Now(),
Updates: allUpdates,
}
if err := apiClient.ReportUpdates(cfg.AgentID, report); err != nil {
return fmt.Errorf("failed to report updates: %w", err)
}
log.Printf("✓ Reported %d updates to server\n", len(allUpdates))
} else {
log.Println("No updates found")
}
return nil
}
// handleScanStorage scans disk usage metrics only
func handleScanStorage(apiClient *client.Client, cfg *config.Config, ackTracker *acknowledgment.Tracker, orch *orchestrator.Orchestrator, commandID string) error {
log.Println("Scanning storage...")
ctx := context.Background()
startTime := time.Now()
// Execute storage scanner
result, err := orch.ScanSingle(ctx, "storage")
if err != nil {
return fmt.Errorf("failed to scan storage: %w", err)
}
// Format results
results := []orchestrator.ScanResult{result}
stdout, stderr, exitCode := orchestrator.FormatScanSummary(results)
duration := time.Since(startTime)
stdout += fmt.Sprintf("\nStorage scan completed in %.2f seconds\n", duration.Seconds())
// Create scan log entry
logReport := client.LogReport{
CommandID: commandID,
Action: "scan_storage",
Result: map[bool]string{true: "success", false: "failure"}[exitCode == 0],
Stdout: stdout,
Stderr: stderr,
ExitCode: exitCode,
DurationSeconds: int(duration.Seconds()),
}
// Report the scan log
if err := reportLogWithAck(apiClient, cfg, ackTracker, logReport); err != nil {
log.Printf("Failed to report scan log: %v\n", err)
}
// Report "updates" (disk info) to server
if len(result.Updates) > 0 {
report := client.UpdateReport{
CommandID: commandID,
Timestamp: time.Now(),
Updates: result.Updates,
}
if err := apiClient.ReportUpdates(cfg.AgentID, report); err != nil {
return fmt.Errorf("failed to report storage metrics: %w", err)
}
log.Printf("✓ Reported %d disk mount points to server\n", len(result.Updates))
}
return nil
}
// handleScanSystem scans system metrics (CPU, memory, processes, uptime)
func handleScanSystem(apiClient *client.Client, cfg *config.Config, ackTracker *acknowledgment.Tracker, orch *orchestrator.Orchestrator, commandID string) error {
log.Println("Scanning system metrics...")
ctx := context.Background()
startTime := time.Now()
// Execute system scanner
result, err := orch.ScanSingle(ctx, "system")
if err != nil {
return fmt.Errorf("failed to scan system: %w", err)
}
// Format results
results := []orchestrator.ScanResult{result}
stdout, stderr, exitCode := orchestrator.FormatScanSummary(results)
duration := time.Since(startTime)
stdout += fmt.Sprintf("\nSystem scan completed in %.2f seconds\n", duration.Seconds())
// Create scan log entry
logReport := client.LogReport{
CommandID: commandID,
Action: "scan_system",
Result: map[bool]string{true: "success", false: "failure"}[exitCode == 0],
Stdout: stdout,
Stderr: stderr,
ExitCode: exitCode,
DurationSeconds: int(duration.Seconds()),
}
// Report the scan log
if err := reportLogWithAck(apiClient, cfg, ackTracker, logReport); err != nil {
log.Printf("Failed to report scan log: %v\n", err)
}
// Report "updates" (system metrics) to server
if len(result.Updates) > 0 {
report := client.UpdateReport{
CommandID: commandID,
Timestamp: time.Now(),
Updates: result.Updates,
}
if err := apiClient.ReportUpdates(cfg.AgentID, report); err != nil {
return fmt.Errorf("failed to report system metrics: %w", err)
}
log.Printf("✓ Reported system metrics to server\n")
}
return nil
}
// handleScanDocker scans Docker image updates only
func handleScanDocker(apiClient *client.Client, cfg *config.Config, ackTracker *acknowledgment.Tracker, orch *orchestrator.Orchestrator, commandID string) error {
log.Println("Scanning Docker images...")
ctx := context.Background()
startTime := time.Now()
// Execute Docker scanner
result, err := orch.ScanSingle(ctx, "docker")
if err != nil {
return fmt.Errorf("failed to scan Docker: %w", err)
}
// Format results
results := []orchestrator.ScanResult{result}
stdout, stderr, exitCode := orchestrator.FormatScanSummary(results)
duration := time.Since(startTime)
stdout += fmt.Sprintf("\nDocker scan completed in %.2f seconds\n", duration.Seconds())
// Create scan log entry
logReport := client.LogReport{
CommandID: commandID,
Action: "scan_docker",
Result: map[bool]string{true: "success", false: "failure"}[exitCode == 0],
Stdout: stdout,
Stderr: stderr,
ExitCode: exitCode,
DurationSeconds: int(duration.Seconds()),
}
// Report the scan log
if err := reportLogWithAck(apiClient, cfg, ackTracker, logReport); err != nil {
log.Printf("Failed to report scan log: %v\n", err)
}
// Report updates to server if any were found
if len(result.Updates) > 0 {
report := client.UpdateReport{
CommandID: commandID,
Timestamp: time.Now(),
Updates: result.Updates,
}
if err := apiClient.ReportUpdates(cfg.AgentID, report); err != nil {
return fmt.Errorf("failed to report Docker updates: %w", err)
}
log.Printf("✓ Reported %d Docker image updates to server\n", len(result.Updates))
} else {
log.Println("No Docker image updates found")
}
return nil
}

View File

@@ -0,0 +1,261 @@
package orchestrator
import (
"context"
"fmt"
"log"
"sync"
"time"
"github.com/Fimeg/RedFlag/aggregator-agent/internal/circuitbreaker"
"github.com/Fimeg/RedFlag/aggregator-agent/internal/client"
)
// Scanner represents a generic update scanner
type Scanner interface {
// IsAvailable checks if the scanner is available on this system
IsAvailable() bool
// Scan performs the actual scanning and returns update items
Scan() ([]client.UpdateReportItem, error)
// Name returns the scanner name for logging
Name() string
}
// ScannerConfig holds configuration for a single scanner
type ScannerConfig struct {
Scanner Scanner
CircuitBreaker *circuitbreaker.CircuitBreaker
Timeout time.Duration
Enabled bool
}
// ScanResult holds the result of a scanner execution
type ScanResult struct {
ScannerName string
Updates []client.UpdateReportItem
Error error
Duration time.Duration
Status string // "success", "failed", "disabled", "unavailable", "skipped"
}
// Orchestrator manages and coordinates multiple scanners
type Orchestrator struct {
scanners map[string]*ScannerConfig
mu sync.RWMutex
}
// NewOrchestrator creates a new scanner orchestrator
func NewOrchestrator() *Orchestrator {
return &Orchestrator{
scanners: make(map[string]*ScannerConfig),
}
}
// RegisterScanner adds a scanner to the orchestrator
func (o *Orchestrator) RegisterScanner(name string, scanner Scanner, cb *circuitbreaker.CircuitBreaker, timeout time.Duration, enabled bool) {
o.mu.Lock()
defer o.mu.Unlock()
o.scanners[name] = &ScannerConfig{
Scanner: scanner,
CircuitBreaker: cb,
Timeout: timeout,
Enabled: enabled,
}
}
// ScanAll executes all registered scanners in parallel
func (o *Orchestrator) ScanAll(ctx context.Context) ([]ScanResult, []client.UpdateReportItem) {
o.mu.RLock()
defer o.mu.RUnlock()
var wg sync.WaitGroup
resultsChan := make(chan ScanResult, len(o.scanners))
// Launch goroutine for each scanner
for name, scannerConfig := range o.scanners {
wg.Add(1)
go func(name string, cfg *ScannerConfig) {
defer wg.Done()
result := o.executeScan(ctx, name, cfg)
resultsChan <- result
}(name, scannerConfig)
}
// Wait for all scanners to complete
wg.Wait()
close(resultsChan)
// Collect results
var results []ScanResult
var allUpdates []client.UpdateReportItem
for result := range resultsChan {
results = append(results, result)
if result.Error == nil && len(result.Updates) > 0 {
allUpdates = append(allUpdates, result.Updates...)
}
}
return results, allUpdates
}
// ScanSingle executes a single scanner by name
func (o *Orchestrator) ScanSingle(ctx context.Context, scannerName string) (ScanResult, error) {
o.mu.RLock()
defer o.mu.RUnlock()
cfg, exists := o.scanners[scannerName]
if !exists {
return ScanResult{
ScannerName: scannerName,
Status: "failed",
Error: fmt.Errorf("scanner not found: %s", scannerName),
}, fmt.Errorf("scanner not found: %s", scannerName)
}
return o.executeScan(ctx, scannerName, cfg), nil
}
// executeScan runs a single scanner with circuit breaker and timeout protection
func (o *Orchestrator) executeScan(ctx context.Context, name string, cfg *ScannerConfig) ScanResult {
result := ScanResult{
ScannerName: name,
Status: "failed",
}
startTime := time.Now()
defer func() {
result.Duration = time.Since(startTime)
}()
// Check if enabled
if !cfg.Enabled {
result.Status = "disabled"
log.Printf("[%s] Scanner disabled via configuration", name)
return result
}
// Check if available
if !cfg.Scanner.IsAvailable() {
result.Status = "unavailable"
log.Printf("[%s] Scanner not available on this system", name)
return result
}
// Execute with circuit breaker and timeout
log.Printf("[%s] Starting scan...", name)
var updates []client.UpdateReportItem
err := cfg.CircuitBreaker.Call(func() error {
// Create timeout context
timeoutCtx, cancel := context.WithTimeout(ctx, cfg.Timeout)
defer cancel()
// Channel for scan result
type scanResult struct {
updates []client.UpdateReportItem
err error
}
scanChan := make(chan scanResult, 1)
// Run scan in goroutine
go func() {
u, e := cfg.Scanner.Scan()
scanChan <- scanResult{updates: u, err: e}
}()
// Wait for scan or timeout
select {
case <-timeoutCtx.Done():
return fmt.Errorf("scan timeout after %v", cfg.Timeout)
case res := <-scanChan:
if res.err != nil {
return res.err
}
updates = res.updates
return nil
}
})
if err != nil {
result.Error = err
result.Status = "failed"
log.Printf("[%s] Scan failed: %v", name, err)
return result
}
result.Updates = updates
result.Status = "success"
log.Printf("[%s] Scan completed: found %d updates (took %v)", name, len(updates), result.Duration)
return result
}
// GetScannerNames returns a list of all registered scanner names
func (o *Orchestrator) GetScannerNames() []string {
o.mu.RLock()
defer o.mu.RUnlock()
names := make([]string, 0, len(o.scanners))
for name := range o.scanners {
names = append(names, name)
}
return names
}
// FormatScanSummary creates a human-readable summary of scan results
func FormatScanSummary(results []ScanResult) (stdout string, stderr string, exitCode int) {
var successResults []string
var errorMessages []string
totalUpdates := 0
for _, result := range results {
switch result.Status {
case "success":
msg := fmt.Sprintf("%s: Found %d updates (%.2fs)",
result.ScannerName, len(result.Updates), result.Duration.Seconds())
successResults = append(successResults, msg)
totalUpdates += len(result.Updates)
case "failed":
msg := fmt.Sprintf("%s: %v", result.ScannerName, result.Error)
errorMessages = append(errorMessages, msg)
case "disabled":
successResults = append(successResults, fmt.Sprintf("%s: Disabled", result.ScannerName))
case "unavailable":
successResults = append(successResults, fmt.Sprintf("%s: Not available", result.ScannerName))
}
}
// Build stdout
if len(successResults) > 0 {
stdout = "Scan Results:\n"
for _, msg := range successResults {
stdout += fmt.Sprintf(" - %s\n", msg)
}
stdout += fmt.Sprintf("\nTotal Updates Found: %d\n", totalUpdates)
}
// Build stderr
if len(errorMessages) > 0 {
stderr = "Scan Errors:\n"
for _, msg := range errorMessages {
stderr += fmt.Sprintf(" - %s\n", msg)
}
}
// Determine exit code
if len(errorMessages) > 0 {
exitCode = 1
} else {
exitCode = 0
}
return stdout, stderr, exitCode
}

View File

@@ -0,0 +1,114 @@
package orchestrator
import (
"github.com/Fimeg/RedFlag/aggregator-agent/internal/client"
"github.com/Fimeg/RedFlag/aggregator-agent/internal/scanner"
)
// APTScannerWrapper wraps the APT scanner to implement the Scanner interface
type APTScannerWrapper struct {
scanner *scanner.APTScanner
}
func NewAPTScannerWrapper(s *scanner.APTScanner) *APTScannerWrapper {
return &APTScannerWrapper{scanner: s}
}
func (w *APTScannerWrapper) IsAvailable() bool {
return w.scanner.IsAvailable()
}
func (w *APTScannerWrapper) Scan() ([]client.UpdateReportItem, error) {
return w.scanner.Scan()
}
func (w *APTScannerWrapper) Name() string {
return "APT Update Scanner"
}
// DNFScannerWrapper wraps the DNF scanner to implement the Scanner interface
type DNFScannerWrapper struct {
scanner *scanner.DNFScanner
}
func NewDNFScannerWrapper(s *scanner.DNFScanner) *DNFScannerWrapper {
return &DNFScannerWrapper{scanner: s}
}
func (w *DNFScannerWrapper) IsAvailable() bool {
return w.scanner.IsAvailable()
}
func (w *DNFScannerWrapper) Scan() ([]client.UpdateReportItem, error) {
return w.scanner.Scan()
}
func (w *DNFScannerWrapper) Name() string {
return "DNF Update Scanner"
}
// DockerScannerWrapper wraps the Docker scanner to implement the Scanner interface
type DockerScannerWrapper struct {
scanner *scanner.DockerScanner
}
func NewDockerScannerWrapper(s *scanner.DockerScanner) *DockerScannerWrapper {
return &DockerScannerWrapper{scanner: s}
}
func (w *DockerScannerWrapper) IsAvailable() bool {
if w.scanner == nil {
return false
}
return w.scanner.IsAvailable()
}
func (w *DockerScannerWrapper) Scan() ([]client.UpdateReportItem, error) {
return w.scanner.Scan()
}
func (w *DockerScannerWrapper) Name() string {
return "Docker Image Update Scanner"
}
// WindowsUpdateScannerWrapper wraps the Windows Update scanner to implement the Scanner interface
type WindowsUpdateScannerWrapper struct {
scanner *scanner.WindowsUpdateScanner
}
func NewWindowsUpdateScannerWrapper(s *scanner.WindowsUpdateScanner) *WindowsUpdateScannerWrapper {
return &WindowsUpdateScannerWrapper{scanner: s}
}
func (w *WindowsUpdateScannerWrapper) IsAvailable() bool {
return w.scanner.IsAvailable()
}
func (w *WindowsUpdateScannerWrapper) Scan() ([]client.UpdateReportItem, error) {
return w.scanner.Scan()
}
func (w *WindowsUpdateScannerWrapper) Name() string {
return "Windows Update Scanner"
}
// WingetScannerWrapper wraps the Winget scanner to implement the Scanner interface
type WingetScannerWrapper struct {
scanner *scanner.WingetScanner
}
func NewWingetScannerWrapper(s *scanner.WingetScanner) *WingetScannerWrapper {
return &WingetScannerWrapper{scanner: s}
}
func (w *WingetScannerWrapper) IsAvailable() bool {
return w.scanner.IsAvailable()
}
func (w *WingetScannerWrapper) Scan() ([]client.UpdateReportItem, error) {
return w.scanner.Scan()
}
func (w *WingetScannerWrapper) Name() string {
return "Winget Package Update Scanner"
}

View File

@@ -0,0 +1,87 @@
package orchestrator
import (
"fmt"
"github.com/Fimeg/RedFlag/aggregator-agent/internal/client"
"github.com/Fimeg/RedFlag/aggregator-agent/internal/system"
)
// StorageScanner scans disk usage metrics
type StorageScanner struct {
agentVersion string
}
// NewStorageScanner creates a new storage scanner
func NewStorageScanner(agentVersion string) *StorageScanner {
return &StorageScanner{
agentVersion: agentVersion,
}
}
// IsAvailable always returns true since storage scanning is always available
func (s *StorageScanner) IsAvailable() bool {
return true
}
// Scan collects disk usage information and returns it as "updates" for reporting
func (s *StorageScanner) Scan() ([]client.UpdateReportItem, error) {
sysInfo, err := system.GetSystemInfo(s.agentVersion)
if err != nil {
return nil, fmt.Errorf("failed to get system info: %w", err)
}
if len(sysInfo.DiskInfo) == 0 {
return nil, fmt.Errorf("no disk information available")
}
// Convert disk info to UpdateReportItem format for reporting
// This is a bit unconventional but allows us to use the existing reporting infrastructure
var items []client.UpdateReportItem
for _, disk := range sysInfo.DiskInfo {
// Create a pseudo-update item for each disk
item := client.UpdateReportItem{
PackageName: fmt.Sprintf("disk-%s", disk.Mountpoint),
CurrentVersion: fmt.Sprintf("%.1f%% used", disk.UsedPercent),
AvailableVersion: fmt.Sprintf("%d GB available", disk.Available/(1024*1024*1024)),
PackageType: "storage",
Severity: determineDiskSeverity(disk.UsedPercent),
PackageDescription: fmt.Sprintf("Disk: %s (%s) - %s", disk.Mountpoint, disk.Filesystem, disk.Device),
Metadata: map[string]interface{}{
"mountpoint": disk.Mountpoint,
"filesystem": disk.Filesystem,
"device": disk.Device,
"disk_type": disk.DiskType,
"total_bytes": disk.Total,
"used_bytes": disk.Used,
"available_bytes": disk.Available,
"used_percent": disk.UsedPercent,
"is_root": disk.IsRoot,
"is_largest": disk.IsLargest,
},
}
items = append(items, item)
}
return items, nil
}
// Name returns the scanner name
func (s *StorageScanner) Name() string {
return "Disk Usage Reporter"
}
// determineDiskSeverity returns severity based on disk usage percentage
func determineDiskSeverity(usedPercent float64) string {
switch {
case usedPercent >= 95:
return "critical"
case usedPercent >= 90:
return "important"
case usedPercent >= 80:
return "moderate"
default:
return "low"
}
}

View File

@@ -0,0 +1,137 @@
package orchestrator
import (
"fmt"
"github.com/Fimeg/RedFlag/aggregator-agent/internal/client"
"github.com/Fimeg/RedFlag/aggregator-agent/internal/system"
)
// SystemScanner scans system metrics (CPU, memory, processes, uptime)
type SystemScanner struct {
agentVersion string
}
// NewSystemScanner creates a new system scanner
func NewSystemScanner(agentVersion string) *SystemScanner {
return &SystemScanner{
agentVersion: agentVersion,
}
}
// IsAvailable always returns true since system scanning is always available
func (s *SystemScanner) IsAvailable() bool {
return true
}
// Scan collects system information and returns it as "updates" for reporting
func (s *SystemScanner) Scan() ([]client.UpdateReportItem, error) {
sysInfo, err := system.GetSystemInfo(s.agentVersion)
if err != nil {
return nil, fmt.Errorf("failed to get system info: %w", err)
}
// Convert system info to UpdateReportItem format for reporting
var items []client.UpdateReportItem
// CPU info item
cpuItem := client.UpdateReportItem{
PackageName: "system-cpu",
CurrentVersion: fmt.Sprintf("%d cores, %d threads", sysInfo.CPUInfo.Cores, sysInfo.CPUInfo.Threads),
AvailableVersion: sysInfo.CPUInfo.ModelName,
PackageType: "system",
Severity: "low",
PackageDescription: fmt.Sprintf("CPU: %s", sysInfo.CPUInfo.ModelName),
Metadata: map[string]interface{}{
"cpu_model": sysInfo.CPUInfo.ModelName,
"cpu_cores": sysInfo.CPUInfo.Cores,
"cpu_threads": sysInfo.CPUInfo.Threads,
},
}
items = append(items, cpuItem)
// Memory info item
memItem := client.UpdateReportItem{
PackageName: "system-memory",
CurrentVersion: fmt.Sprintf("%.1f%% used", sysInfo.MemoryInfo.UsedPercent),
AvailableVersion: fmt.Sprintf("%d GB total", sysInfo.MemoryInfo.Total/(1024*1024*1024)),
PackageType: "system",
Severity: determineMemorySeverity(sysInfo.MemoryInfo.UsedPercent),
PackageDescription: fmt.Sprintf("Memory: %.1f GB / %.1f GB used",
float64(sysInfo.MemoryInfo.Used)/(1024*1024*1024),
float64(sysInfo.MemoryInfo.Total)/(1024*1024*1024)),
Metadata: map[string]interface{}{
"memory_total": sysInfo.MemoryInfo.Total,
"memory_used": sysInfo.MemoryInfo.Used,
"memory_available": sysInfo.MemoryInfo.Available,
"memory_used_percent": sysInfo.MemoryInfo.UsedPercent,
},
}
items = append(items, memItem)
// Process count item
processItem := client.UpdateReportItem{
PackageName: "system-processes",
CurrentVersion: fmt.Sprintf("%d processes", sysInfo.RunningProcesses),
AvailableVersion: "n/a",
PackageType: "system",
Severity: "low",
PackageDescription: fmt.Sprintf("Running Processes: %d", sysInfo.RunningProcesses),
Metadata: map[string]interface{}{
"process_count": sysInfo.RunningProcesses,
},
}
items = append(items, processItem)
// Uptime item
uptimeItem := client.UpdateReportItem{
PackageName: "system-uptime",
CurrentVersion: sysInfo.Uptime,
AvailableVersion: "n/a",
PackageType: "system",
Severity: "low",
PackageDescription: fmt.Sprintf("System Uptime: %s", sysInfo.Uptime),
Metadata: map[string]interface{}{
"uptime": sysInfo.Uptime,
},
}
items = append(items, uptimeItem)
// Reboot required item (if applicable)
if sysInfo.RebootRequired {
rebootItem := client.UpdateReportItem{
PackageName: "system-reboot",
CurrentVersion: "required",
AvailableVersion: "n/a",
PackageType: "system",
Severity: "important",
PackageDescription: fmt.Sprintf("Reboot Required: %s", sysInfo.RebootReason),
Metadata: map[string]interface{}{
"reboot_required": true,
"reboot_reason": sysInfo.RebootReason,
},
}
items = append(items, rebootItem)
}
return items, nil
}
// Name returns the scanner name
func (s *SystemScanner) Name() string {
return "System Metrics Reporter"
}
// determineMemorySeverity returns severity based on memory usage percentage
func determineMemorySeverity(usedPercent float64) string {
switch {
case usedPercent >= 95:
return "critical"
case usedPercent >= 90:
return "important"
case usedPercent >= 80:
return "moderate"
default:
return "low"
}
}