Files
Redflag/aggregator-agent/internal/orchestrator/storage_scanner.go
Fimeg 3690472396 feat: granular subsystem commands with parallel scanner execution
Split monolithic scan_updates into individual subsystems (updates/storage/system/docker).
Scanners now run in parallel via goroutines - cuts scan time roughly in half, maybe more.

Agent changes:
- Orchestrator pattern for scanner management
- New scanners: storage (disk metrics), system (cpu/mem/processes)
- New commands: scan_storage, scan_system, scan_docker
- Wrapped existing scanners (APT/DNF/Docker/Windows/Winget) with common interface
- Version bump to 0.1.20

Server changes:
- Migration 015: agent_subsystems table with trigger for auto-init
- Subsystem CRUD: enable/disable, interval (5min-24hr), auto-run toggle
- API routes: /api/v1/agents/:id/subsystems/* (9 endpoints)
- Stats tracking per subsystem

Web UI changes:
- ChatTimeline shows subsystem-specific labels and icons
- AgentScanners got interactive toggles, interval dropdowns, manual trigger buttons
- TypeScript types added for subsystems

Backward compatible with legacy scan_updates - for now. Bugs probably exist somewhere.
2025-11-01 21:34:26 -04:00

88 lines
2.5 KiB
Go

package orchestrator
import (
"fmt"
"github.com/Fimeg/RedFlag/aggregator-agent/internal/client"
"github.com/Fimeg/RedFlag/aggregator-agent/internal/system"
)
// StorageScanner scans disk usage metrics
type StorageScanner struct {
agentVersion string
}
// NewStorageScanner creates a new storage scanner
func NewStorageScanner(agentVersion string) *StorageScanner {
return &StorageScanner{
agentVersion: agentVersion,
}
}
// IsAvailable always returns true since storage scanning is always available
func (s *StorageScanner) IsAvailable() bool {
return true
}
// Scan collects disk usage information and returns it as "updates" for reporting
func (s *StorageScanner) Scan() ([]client.UpdateReportItem, error) {
sysInfo, err := system.GetSystemInfo(s.agentVersion)
if err != nil {
return nil, fmt.Errorf("failed to get system info: %w", err)
}
if len(sysInfo.DiskInfo) == 0 {
return nil, fmt.Errorf("no disk information available")
}
// Convert disk info to UpdateReportItem format for reporting
// This is a bit unconventional but allows us to use the existing reporting infrastructure
var items []client.UpdateReportItem
for _, disk := range sysInfo.DiskInfo {
// Create a pseudo-update item for each disk
item := client.UpdateReportItem{
PackageName: fmt.Sprintf("disk-%s", disk.Mountpoint),
CurrentVersion: fmt.Sprintf("%.1f%% used", disk.UsedPercent),
AvailableVersion: fmt.Sprintf("%d GB available", disk.Available/(1024*1024*1024)),
PackageType: "storage",
Severity: determineDiskSeverity(disk.UsedPercent),
PackageDescription: fmt.Sprintf("Disk: %s (%s) - %s", disk.Mountpoint, disk.Filesystem, disk.Device),
Metadata: map[string]interface{}{
"mountpoint": disk.Mountpoint,
"filesystem": disk.Filesystem,
"device": disk.Device,
"disk_type": disk.DiskType,
"total_bytes": disk.Total,
"used_bytes": disk.Used,
"available_bytes": disk.Available,
"used_percent": disk.UsedPercent,
"is_root": disk.IsRoot,
"is_largest": disk.IsLargest,
},
}
items = append(items, item)
}
return items, nil
}
// Name returns the scanner name
func (s *StorageScanner) Name() string {
return "Disk Usage Reporter"
}
// determineDiskSeverity returns severity based on disk usage percentage
func determineDiskSeverity(usedPercent float64) string {
switch {
case usedPercent >= 95:
return "critical"
case usedPercent >= 90:
return "important"
case usedPercent >= 80:
return "moderate"
default:
return "low"
}
}