Implement proper storage metrics (P0-009)\n\n- Add dedicated storage_metrics table\n- Create StorageMetricReport models with proper field names\n- Add ReportStorageMetrics to agent client\n- Update storage scanner to use new method\n- Implement server-side handlers and queries\n- Register new routes and update UI\n- Remove legacy Scan() method\n- Follow ETHOS principles: honest naming, clean architecture

This commit is contained in:
Fimeg
2025-12-17 16:38:36 -05:00
parent f7c8d23c5d
commit 0fff047cb5
43 changed files with 3641 additions and 248 deletions

View File

@@ -8,7 +8,6 @@ import (
"math/rand"
"os"
"os/exec"
"path/filepath"
"runtime"
"strings"
"time"
@@ -18,6 +17,7 @@ import (
"github.com/Fimeg/RedFlag/aggregator-agent/internal/circuitbreaker"
"github.com/Fimeg/RedFlag/aggregator-agent/internal/client"
"github.com/Fimeg/RedFlag/aggregator-agent/internal/config"
"github.com/Fimeg/RedFlag/aggregator-agent/internal/constants"
"github.com/Fimeg/RedFlag/aggregator-agent/internal/crypto"
"github.com/Fimeg/RedFlag/aggregator-agent/internal/display"
"github.com/Fimeg/RedFlag/aggregator-agent/internal/installer"
@@ -26,33 +26,14 @@ import (
"github.com/Fimeg/RedFlag/aggregator-agent/internal/scanner"
"github.com/Fimeg/RedFlag/aggregator-agent/internal/service"
"github.com/Fimeg/RedFlag/aggregator-agent/internal/system"
"github.com/Fimeg/RedFlag/aggregator-agent/internal/version"
"github.com/google/uuid"
)
const (
AgentVersion = "0.1.23" // v0.1.23: Real security metrics and config sync
)
var (
lastConfigVersion int64 = 0 // Track last applied config version
)
// getConfigPath returns the platform-specific config path
func getConfigPath() string {
if runtime.GOOS == "windows" {
return "C:\\ProgramData\\RedFlag\\config.json"
}
return "/etc/redflag/config.json"
}
// getStatePath returns the platform-specific state directory path
func getStatePath() string {
if runtime.GOOS == "windows" {
return "C:\\ProgramData\\RedFlag\\state"
}
return "/var/lib/redflag"
}
// reportLogWithAck reports a command log to the server and tracks it for acknowledgment
func reportLogWithAck(apiClient *client.Client, cfg *config.Config, ackTracker *acknowledgment.Tracker, logReport client.LogReport) error {
// Track this command result as pending acknowledgment
@@ -85,7 +66,7 @@ func getCurrentPollingInterval(cfg *config.Config) int {
cfg.RapidPollingEnabled = false
cfg.RapidPollingUntil = time.Time{}
// Save the updated config to clean up expired rapid mode
if err := cfg.Save(getConfigPath()); err != nil {
if err := cfg.Save(constants.GetAgentConfigPath()); err != nil {
log.Printf("Warning: Failed to cleanup expired rapid polling mode: %v", err)
}
}
@@ -138,7 +119,7 @@ func main() {
// Handle version command
if *versionCmd {
fmt.Printf("RedFlag Agent v%s\n", AgentVersion)
fmt.Printf("RedFlag Agent v%s\n", version.Version)
fmt.Printf("Self-hosted update management platform\n")
os.Exit(0)
}
@@ -201,7 +182,7 @@ func main() {
ProxyHTTP: *proxyHTTP,
ProxyHTTPS: *proxyHTTPS,
ProxyNoProxy: *proxyNoProxy,
LogLevel: *logLevel,
LogLevel: *logLevel,
ConfigFile: *configFile,
Tags: tags,
Organization: *organization,
@@ -210,7 +191,7 @@ func main() {
}
// Determine config path
configPath := getConfigPath()
configPath := constants.GetAgentConfigPath()
if *configFile != "" {
configPath = *configFile
}
@@ -218,30 +199,30 @@ func main() {
// Check for migration requirements before loading configuration
migrationConfig := migration.NewFileDetectionConfig()
// Set old paths to detect existing installations
migrationConfig.OldConfigPath = "/etc/aggregator"
migrationConfig.OldStatePath = "/var/lib/aggregator"
migrationConfig.OldConfigPath = constants.LegacyConfigPath
migrationConfig.OldStatePath = constants.LegacyStatePath
// Set new paths that agent will actually use
migrationConfig.NewConfigPath = filepath.Dir(configPath)
migrationConfig.NewStatePath = getStatePath()
migrationConfig.NewConfigPath = constants.GetAgentConfigDir()
migrationConfig.NewStatePath = constants.GetAgentStateDir()
// Detect migration requirements
migrationDetection, err := migration.DetectMigrationRequirements(migrationConfig)
if err != nil {
log.Printf("Warning: Failed to detect migration requirements: %v", err)
} else if migrationDetection.RequiresMigration {
log.Printf("[RedFlag Server Migrator] Migration detected: %s → %s", migrationDetection.CurrentAgentVersion, AgentVersion)
log.Printf("[RedFlag Server Migrator] Migration detected: %s → %s", migrationDetection.CurrentAgentVersion, version.Version)
log.Printf("[RedFlag Server Migrator] Required migrations: %v", migrationDetection.RequiredMigrations)
// Create migration plan
migrationPlan := &migration.MigrationPlan{
Detection: migrationDetection,
TargetVersion: AgentVersion,
TargetVersion: version.Version,
Config: migrationConfig,
BackupPath: filepath.Join(getStatePath(), "migration_backups"), // Set backup path within agent's state directory
BackupPath: constants.GetMigrationBackupDir(), // Set backup path within agent's state directory
}
// Execute migration
executor := migration.NewMigrationExecutor(migrationPlan)
executor := migration.NewMigrationExecutor(migrationPlan, configPath)
result, err := executor.ExecuteMigration()
if err != nil {
log.Printf("[RedFlag Server Migrator] Migration failed: %v", err)
@@ -262,14 +243,14 @@ func main() {
}
// Always set the current agent version in config
if cfg.AgentVersion != AgentVersion {
if cfg.AgentVersion != version.Version {
if cfg.AgentVersion != "" {
log.Printf("[RedFlag Server Migrator] Version change detected: %s → %s", cfg.AgentVersion, AgentVersion)
log.Printf("[RedFlag Server Migrator] Version change detected: %s → %s", cfg.AgentVersion, version.Version)
log.Printf("[RedFlag Server Migrator] Performing lightweight migration check...")
}
// Update config version to match current agent
cfg.AgentVersion = AgentVersion
cfg.AgentVersion = version.Version
// Save updated config
if err := cfg.Save(configPath); err != nil {
@@ -364,7 +345,7 @@ func main() {
func registerAgent(cfg *config.Config, serverURL string) error {
// Get detailed system information
sysInfo, err := system.GetSystemInfo(AgentVersion)
sysInfo, err := system.GetSystemInfo(version.Version)
if err != nil {
log.Printf("Warning: Failed to get detailed system info: %v\n", err)
// Fall back to basic detection
@@ -375,7 +356,7 @@ func registerAgent(cfg *config.Config, serverURL string) error {
OSType: osType,
OSVersion: osVersion,
OSArchitecture: osArch,
AgentVersion: AgentVersion,
AgentVersion: version.Version,
Metadata: make(map[string]string),
}
}
@@ -429,14 +410,14 @@ func registerAgent(cfg *config.Config, serverURL string) error {
}
req := client.RegisterRequest{
Hostname: sysInfo.Hostname,
OSType: sysInfo.OSType,
OSVersion: sysInfo.OSVersion,
OSArchitecture: sysInfo.OSArchitecture,
AgentVersion: sysInfo.AgentVersion,
MachineID: machineID,
Hostname: sysInfo.Hostname,
OSType: sysInfo.OSType,
OSVersion: sysInfo.OSVersion,
OSArchitecture: sysInfo.OSArchitecture,
AgentVersion: sysInfo.AgentVersion,
MachineID: machineID,
PublicKeyFingerprint: publicKeyFingerprint,
Metadata: metadata,
Metadata: metadata,
}
resp, err := apiClient.Register(req)
@@ -458,7 +439,7 @@ func registerAgent(cfg *config.Config, serverURL string) error {
}
// Save configuration
if err := cfg.Save(getConfigPath()); err != nil {
if err := cfg.Save(constants.GetAgentConfigPath()); err != nil {
return fmt.Errorf("failed to save config: %w", err)
}
@@ -496,7 +477,7 @@ func renewTokenIfNeeded(apiClient *client.Client, cfg *config.Config, err error)
tempClient := client.NewClient(cfg.ServerURL, "")
// Attempt to renew access token using refresh token
if err := tempClient.RenewToken(cfg.AgentID, cfg.RefreshToken); err != nil {
if err := tempClient.RenewToken(cfg.AgentID, cfg.RefreshToken, version.Version); err != nil {
log.Printf("❌ Refresh token renewal failed: %v", err)
log.Printf("💡 Refresh token may be expired (>90 days) - re-registration required")
return nil, fmt.Errorf("refresh token renewal failed: %w - please re-register agent", err)
@@ -506,7 +487,7 @@ func renewTokenIfNeeded(apiClient *client.Client, cfg *config.Config, err error)
cfg.Token = tempClient.GetToken()
// Save updated config
if err := cfg.Save(getConfigPath()); err != nil {
if err := cfg.Save(constants.GetAgentConfigPath()); err != nil {
log.Printf("⚠️ Warning: Failed to save renewed access token: %v", err)
}
@@ -625,7 +606,7 @@ func syncServerConfig(apiClient *client.Client, cfg *config.Config) error {
}
func runAgent(cfg *config.Config) error {
log.Printf("🚩 RedFlag Agent v%s starting...\n", AgentVersion)
log.Printf("🚩 RedFlag Agent v%s starting...\n", version.Version)
log.Printf("==================================================================")
log.Printf("📋 AGENT ID: %s", cfg.AgentID)
log.Printf("🌐 SERVER: %s", cfg.ServerURL)
@@ -688,7 +669,7 @@ func runAgent(cfg *config.Config) error {
// - System: handleScanSystem → ReportMetrics()
// Initialize acknowledgment tracker for command result reliability
ackTracker := acknowledgment.NewTracker(getStatePath())
ackTracker := acknowledgment.NewTracker(constants.GetAgentStateDir())
if err := ackTracker.Load(); err != nil {
log.Printf("Warning: Failed to load pending acknowledgments: %v", err)
} else {
@@ -734,7 +715,7 @@ func runAgent(cfg *config.Config) error {
}
}
log.Printf("Checking in with server... (Agent v%s)", AgentVersion)
log.Printf("Checking in with server... (Agent v%s)", version.Version)
// Collect lightweight system metrics
sysMetrics, err := system.GetLightweightMetrics()
@@ -749,7 +730,7 @@ func runAgent(cfg *config.Config) error {
DiskTotalGB: sysMetrics.DiskTotalGB,
DiskPercent: sysMetrics.DiskPercent,
Uptime: sysMetrics.Uptime,
Version: AgentVersion,
Version: version.Version,
}
}
@@ -890,7 +871,6 @@ func runAgent(cfg *config.Config) error {
log.Printf("[Heartbeat] Error disabling heartbeat: %v\n", err)
}
case "reboot":
if err := handleReboot(apiClient, cfg, ackTracker, cmd.ID, cmd.Params); err != nil {
log.Printf("[Reboot] Error processing reboot command: %v\n", err)
@@ -1298,26 +1278,26 @@ func handleDryRunUpdate(apiClient *client.Client, cfg *config.Config, ackTracker
// Convert installer.InstallResult to client.InstallResult for reporting
clientResult := &client.InstallResult{
Success: result.Success,
ErrorMessage: result.ErrorMessage,
Stdout: result.Stdout,
Stderr: result.Stderr,
ExitCode: result.ExitCode,
DurationSeconds: result.DurationSeconds,
Action: result.Action,
Success: result.Success,
ErrorMessage: result.ErrorMessage,
Stdout: result.Stdout,
Stderr: result.Stderr,
ExitCode: result.ExitCode,
DurationSeconds: result.DurationSeconds,
Action: result.Action,
PackagesInstalled: result.PackagesInstalled,
ContainersUpdated: result.ContainersUpdated,
Dependencies: result.Dependencies,
IsDryRun: true,
Dependencies: result.Dependencies,
IsDryRun: true,
}
// Report dependencies back to server
depReport := client.DependencyReport{
PackageName: packageName,
PackageType: packageType,
Dependencies: result.Dependencies,
UpdateID: params["update_id"].(string),
DryRunResult: clientResult,
PackageName: packageName,
PackageType: packageType,
Dependencies: result.Dependencies,
UpdateID: params["update_id"].(string),
DryRunResult: clientResult,
}
if reportErr := apiClient.ReportDependencies(cfg.AgentID, depReport); reportErr != nil {
@@ -1490,7 +1470,7 @@ func handleEnableHeartbeat(apiClient *client.Client, cfg *config.Config, ackTrac
cfg.RapidPollingUntil = expiryTime
// Save config to persist heartbeat settings
if err := cfg.Save(getConfigPath()); err != nil {
if err := cfg.Save(constants.GetAgentConfigPath()); err != nil {
log.Printf("[Heartbeat] Warning: Failed to save config: %v", err)
}
@@ -1522,7 +1502,7 @@ func handleEnableHeartbeat(apiClient *client.Client, cfg *config.Config, ackTrac
DiskTotalGB: sysMetrics.DiskTotalGB,
DiskPercent: sysMetrics.DiskPercent,
Uptime: sysMetrics.Uptime,
Version: AgentVersion,
Version: version.Version,
}
// Include heartbeat metadata to show enabled state
metrics.Metadata = map[string]interface{}{
@@ -1554,7 +1534,7 @@ func handleDisableHeartbeat(apiClient *client.Client, cfg *config.Config, ackTra
cfg.RapidPollingUntil = time.Time{} // Zero value
// Save config to persist heartbeat settings
if err := cfg.Save(getConfigPath()); err != nil {
if err := cfg.Save(constants.GetAgentConfigPath()); err != nil {
log.Printf("[Heartbeat] Warning: Failed to save config: %v", err)
}
@@ -1586,7 +1566,7 @@ func handleDisableHeartbeat(apiClient *client.Client, cfg *config.Config, ackTra
DiskTotalGB: sysMetrics.DiskTotalGB,
DiskPercent: sysMetrics.DiskPercent,
Uptime: sysMetrics.Uptime,
Version: AgentVersion,
Version: version.Version,
}
// Include empty heartbeat metadata to explicitly show disabled state
metrics.Metadata = map[string]interface{}{
@@ -1612,7 +1592,7 @@ func handleDisableHeartbeat(apiClient *client.Client, cfg *config.Config, ackTra
// reportSystemInfo collects and reports detailed system information to the server
func reportSystemInfo(apiClient *client.Client, cfg *config.Config) error {
// Collect detailed system information
sysInfo, err := system.GetSystemInfo(AgentVersion)
sysInfo, err := system.GetSystemInfo(version.Version)
if err != nil {
return fmt.Errorf("failed to get system info: %w", err)
}
@@ -1620,16 +1600,16 @@ func reportSystemInfo(apiClient *client.Client, cfg *config.Config) error {
// Create system info report
report := client.SystemInfoReport{
Timestamp: time.Now(),
CPUModel: sysInfo.CPUInfo.ModelName,
CPUCores: sysInfo.CPUInfo.Cores,
CPUThreads: sysInfo.CPUInfo.Threads,
MemoryTotal: sysInfo.MemoryInfo.Total,
DiskTotal: uint64(0),
DiskUsed: uint64(0),
IPAddress: sysInfo.IPAddress,
Processes: sysInfo.RunningProcesses,
Uptime: sysInfo.Uptime,
Metadata: make(map[string]interface{}),
CPUModel: sysInfo.CPUInfo.ModelName,
CPUCores: sysInfo.CPUInfo.Cores,
CPUThreads: sysInfo.CPUInfo.Threads,
MemoryTotal: sysInfo.MemoryInfo.Total,
DiskTotal: uint64(0),
DiskUsed: uint64(0),
IPAddress: sysInfo.IPAddress,
Processes: sysInfo.RunningProcesses,
Uptime: sysInfo.Uptime,
Metadata: make(map[string]interface{}),
}
// Add primary disk info

View File

@@ -122,8 +122,8 @@ func handleScanStorage(apiClient *client.Client, cfg *config.Config, ackTracker
}
// Report storage metrics to server using dedicated endpoint
// Get storage scanner and use proper interface
storageScanner := orchestrator.NewStorageScanner("unknown") // TODO: Get actual agent version
// Use proper StorageMetricReport with clean field names
storageScanner := orchestrator.NewStorageScanner(cfg.AgentVersion)
if storageScanner.IsAvailable() {
metrics, err := storageScanner.ScanStorage()
if err != nil {
@@ -131,32 +131,38 @@ func handleScanStorage(apiClient *client.Client, cfg *config.Config, ackTracker
}
if len(metrics) > 0 {
// Convert StorageMetric to MetricsReportItem for API call
metricItems := make([]client.MetricsReportItem, 0, len(metrics))
for _, metric := range metrics {
item := client.MetricsReportItem{
PackageType: "storage",
PackageName: metric.Mountpoint,
CurrentVersion: fmt.Sprintf("%d bytes used", metric.UsedBytes),
AvailableVersion: fmt.Sprintf("%d bytes total", metric.TotalBytes),
Severity: metric.Severity,
RepositorySource: metric.Filesystem,
Metadata: metric.Metadata,
// Convert from orchestrator.StorageMetric to models.StorageMetric
metricItems := make([]models.StorageMetric, 0, len(metrics))
for _, m := range metrics {
item := models.StorageMetric{
Mountpoint: m.Mountpoint,
Device: m.Device,
DiskType: m.DiskType,
Filesystem: m.Filesystem,
TotalBytes: m.TotalBytes,
UsedBytes: m.UsedBytes,
AvailableBytes: m.AvailableBytes,
UsedPercent: m.UsedPercent,
IsRoot: m.IsRoot,
IsLargest: m.IsLargest,
Severity: m.Severity,
Metadata: m.Metadata,
}
metricItems = append(metricItems, item)
}
report := client.MetricsReport{
report := models.StorageMetricReport{
AgentID: cfg.AgentID,
CommandID: commandID,
Timestamp: time.Now(),
Metrics: metricItems,
}
if err := apiClient.ReportMetrics(cfg.AgentID, report); err != nil {
if err := apiClient.ReportStorageMetrics(cfg.AgentID, report); err != nil {
return fmt.Errorf("failed to report storage metrics: %w", err)
}
log.Printf("✓ Reported %d storage metrics to server\n", len(metrics))
log.Printf("[INFO] [storage] Successfully reported %d storage metrics to server\n", len(metrics))
}
}

View File

@@ -8,6 +8,7 @@ import (
"time"
"github.com/Fimeg/RedFlag/aggregator-agent/internal/client"
"github.com/Fimeg/RedFlag/aggregator-agent/internal/constants"
"github.com/google/uuid"
)
@@ -22,15 +23,12 @@ type LocalCache struct {
AgentStatus string `json:"agent_status"`
}
// CacheDir is the directory where local cache is stored
const CacheDir = "/var/lib/redflag-agent"
// CacheFile is the file where scan results are cached
const CacheFile = "last_scan.json"
// cacheFile is the file where scan results are cached
const cacheFile = "last_scan.json"
// GetCachePath returns the full path to the cache file
func GetCachePath() string {
return filepath.Join(CacheDir, CacheFile)
return filepath.Join(constants.GetAgentCacheDir(), cacheFile)
}
// Load reads the local cache from disk
@@ -62,7 +60,7 @@ func (c *LocalCache) Save() error {
cachePath := GetCachePath()
// Ensure cache directory exists
if err := os.MkdirAll(CacheDir, 0755); err != nil {
if err := os.MkdirAll(constants.GetAgentCacheDir(), 0755); err != nil {
return fmt.Errorf("failed to create cache directory: %w", err)
}

View File

@@ -576,6 +576,37 @@ func (c *Client) ReportDockerImages(agentID uuid.UUID, report DockerReport) erro
return nil
}
// ReportStorageMetrics sends storage metrics to the server via dedicated endpoint
func (c *Client) ReportStorageMetrics(agentID uuid.UUID, report StorageMetricReport) error {
url := fmt.Sprintf("%s/api/v1/agents/%s/storage-metrics", c.baseURL, agentID)
body, err := json.Marshal(report)
if err != nil {
return fmt.Errorf("failed to marshal storage metrics: %w", err)
}
req, err := http.NewRequest("POST", url, bytes.NewBuffer(body))
if err != nil {
return fmt.Errorf("failed to create request: %w", err)
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Authorization", "Bearer "+c.token)
c.addMachineIDHeader(req) // Security: Validate machine binding (v0.1.22+)
resp, err := c.http.Do(req)
if err != nil {
return fmt.Errorf("failed to send request: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
bodyBytes, _ := io.ReadAll(resp.Body)
return fmt.Errorf("failed to report storage metrics: %s - %s", resp.Status, string(bodyBytes))
}
return nil
}
// LogReport represents an execution log
type LogReport struct {
CommandID string `json:"command_id"`

View File

@@ -31,7 +31,7 @@ func CalculateChecksum(filePath string) (string, error) {
// IsRequiredFile determines if a file is required for agent operation
func IsRequiredFile(path string) bool {
requiredFiles := []string{
"/etc/redflag/config.json",
"/etc/redflag/agent/config.json", // Agent config in nested structure
"/usr/local/bin/redflag-agent",
"/etc/systemd/system/redflag-agent.service",
}

View File

@@ -324,12 +324,13 @@ func migrateConfig(cfg *Config) {
}
// Migration 2: Add missing subsystem fields with defaults
if cfg.Subsystems.System.Timeout == 0 && cfg.Subsystems.System.CircuitBreaker.FailureThreshold == 0 {
// Check if subsystem is zero value (truly missing), not just has zero fields
if cfg.Subsystems.System == (SubsystemConfig{}) {
fmt.Printf("[CONFIG] Adding missing 'system' subsystem configuration\n")
cfg.Subsystems.System = GetDefaultSubsystemsConfig().System
}
if cfg.Subsystems.Updates.Timeout == 0 && cfg.Subsystems.Updates.CircuitBreaker.FailureThreshold == 0 {
if cfg.Subsystems.Updates == (SubsystemConfig{}) {
fmt.Printf("[CONFIG] Adding missing 'updates' subsystem configuration\n")
cfg.Subsystems.Updates = GetDefaultSubsystemsConfig().Updates
}

View File

@@ -0,0 +1,96 @@
// Package constants provides centralized path definitions for the RedFlag agent.
// This package ensures consistency across all components and makes path management
// maintainable and testable.
package constants
import (
"runtime"
"path/filepath"
)
// Base directories
const (
LinuxBaseDir = "/var/lib/redflag"
WindowsBaseDir = "C:\\ProgramData\\RedFlag"
)
// Subdirectory structure
const (
AgentDir = "agent"
ServerDir = "server"
CacheSubdir = "cache"
StateSubdir = "state"
MigrationSubdir = "migration_backups"
)
// Config paths
const (
LinuxConfigBase = "/etc/redflag"
WindowsConfigBase = "C:\\ProgramData\\RedFlag"
ConfigFile = "config.json"
)
// Log paths
const (
LinuxLogBase = "/var/log/redflag"
)
// Legacy paths for migration
const (
LegacyConfigPath = "/etc/aggregator/config.json"
LegacyStatePath = "/var/lib/aggregator"
)
// GetBaseDir returns platform-specific base directory
func GetBaseDir() string {
if runtime.GOOS == "windows" {
return WindowsBaseDir
}
return LinuxBaseDir
}
// GetAgentStateDir returns /var/lib/redflag/agent/state
func GetAgentStateDir() string {
return filepath.Join(GetBaseDir(), AgentDir, StateSubdir)
}
// GetAgentCacheDir returns /var/lib/redflag/agent/cache
func GetAgentCacheDir() string {
return filepath.Join(GetBaseDir(), AgentDir, CacheSubdir)
}
// GetMigrationBackupDir returns /var/lib/redflag/agent/migration_backups
func GetMigrationBackupDir() string {
return filepath.Join(GetBaseDir(), AgentDir, MigrationSubdir)
}
// GetAgentConfigPath returns /etc/redflag/agent/config.json
func GetAgentConfigPath() string {
if runtime.GOOS == "windows" {
return filepath.Join(WindowsConfigBase, AgentDir, ConfigFile)
}
return filepath.Join(LinuxConfigBase, AgentDir, ConfigFile)
}
// GetAgentConfigDir returns /etc/redflag/agent
func GetAgentConfigDir() string {
if runtime.GOOS == "windows" {
return filepath.Join(WindowsConfigBase, AgentDir)
}
return filepath.Join(LinuxConfigBase, AgentDir)
}
// GetAgentLogDir returns /var/log/redflag/agent
func GetAgentLogDir() string {
return filepath.Join(LinuxLogBase, AgentDir)
}
// GetLegacyAgentConfigPath returns legacy /etc/aggregator/config.json
func GetLegacyAgentConfigPath() string {
return LegacyConfigPath
}
// GetLegacyAgentStatePath returns legacy /var/lib/aggregator
func GetLegacyAgentStatePath() string {
return LegacyStatePath
}

View File

@@ -12,6 +12,7 @@ import (
"time"
"github.com/Fimeg/RedFlag/aggregator-agent/internal/common"
"github.com/Fimeg/RedFlag/aggregator-agent/internal/constants"
"github.com/Fimeg/RedFlag/aggregator-agent/internal/version"
)
@@ -58,11 +59,11 @@ type FileDetectionConfig struct {
// NewFileDetectionConfig creates a default detection configuration
func NewFileDetectionConfig() *FileDetectionConfig {
return &FileDetectionConfig{
OldConfigPath: "/etc/aggregator",
OldStatePath: "/var/lib/aggregator",
NewConfigPath: "/etc/redflag",
NewStatePath: "/var/lib/redflag-agent",
BackupDirPattern: "/var/lib/redflag-agent/migration_backups_%s",
OldConfigPath: constants.LegacyConfigPath,
OldStatePath: constants.LegacyStatePath,
NewConfigPath: constants.GetAgentConfigDir(),
NewStatePath: constants.GetAgentStateDir(),
BackupDirPattern: constants.GetMigrationBackupDir() + "/%d",
}
}

View File

@@ -0,0 +1,235 @@
package pathutils
import (
"fmt"
"os"
"path/filepath"
"strings"
)
// PathManager provides centralized path operations with validation
type PathManager struct {
config *Config
}
// Config holds path configuration for migration
type Config struct {
OldConfigPath string
OldStatePath string
NewConfigPath string
NewStatePath string
BackupDirPattern string
}
// NewPathManager creates a new path manager with cleaned configuration
func NewPathManager(config *Config) *PathManager {
// Clean all paths to remove trailing slashes and normalize
cleanConfig := &Config{
OldConfigPath: filepath.Clean(strings.TrimSpace(config.OldConfigPath)),
OldStatePath: filepath.Clean(strings.TrimSpace(config.OldStatePath)),
NewConfigPath: filepath.Clean(strings.TrimSpace(config.NewConfigPath)),
NewStatePath: filepath.Clean(strings.TrimSpace(config.NewStatePath)),
BackupDirPattern: strings.TrimSpace(config.BackupDirPattern),
}
return &PathManager{config: cleanConfig}
}
// NormalizeToAbsolute ensures a path is absolute and cleaned
func (pm *PathManager) NormalizeToAbsolute(path string) (string, error) {
if path == "" {
return "", fmt.Errorf("path cannot be empty")
}
// Clean and make absolute
cleaned := filepath.Clean(path)
// Check for path traversal attempts
if strings.Contains(cleaned, "..") {
return "", fmt.Errorf("path contains parent directory reference: %s", path)
}
// Ensure it's absolute
if !filepath.IsAbs(cleaned) {
return "", fmt.Errorf("path must be absolute: %s", path)
}
return cleaned, nil
}
// ValidatePath validates a single path exists
func (pm *PathManager) ValidatePath(path string) error {
if path == "" {
return fmt.Errorf("path cannot be empty")
}
// Normalize path first
normalized, err := pm.NormalizeToAbsolute(path)
if err != nil {
return err
}
// Check existence
info, err := os.Stat(normalized)
if err != nil {
if os.IsNotExist(err) {
return fmt.Errorf("path does not exist: %s", normalized)
}
return fmt.Errorf("failed to access path %s: %w", normalized, err)
}
// Additional validation for security
if filepath.IsAbs(normalized) && strings.HasPrefix(normalized, "/etc/") {
// Config files should be owned by root or agent user (checking basic permissions)
if info.Mode().Perm()&0004 == 0 && info.Mode().Perm()&0002 == 0 {
return fmt.Errorf("config file is not readable: %s", normalized)
}
}
return nil
}
// EnsureDirectory creates directory if it doesn't exist
func (pm *PathManager) EnsureDirectory(path string) error {
normalized, err := pm.NormalizeToAbsolute(path)
if err != nil {
return err
}
// Check if it exists and is a directory
if info, err := os.Stat(normalized); err == nil {
if !info.IsDir() {
return fmt.Errorf("path exists but is not a directory: %s", normalized)
}
return nil
}
// Create directory with proper permissions
if err := os.MkdirAll(normalized, 0755); err != nil {
return fmt.Errorf("failed to create directory %s: %w", normalized, err)
}
return nil
}
// GetRelativePath gets relative path from base directory
// Returns error if path would traverse outside base
func (pm *PathManager) GetRelativePath(basePath, fullPath string) (string, error) {
normBase, err := pm.NormalizeToAbsolute(basePath)
if err != nil {
return "", fmt.Errorf("invalid base path: %w", err)
}
normFull, err := pm.NormalizeToAbsolute(fullPath)
if err != nil {
return "", fmt.Errorf("invalid full path: %w", err)
}
// Check if full path is actually under base path
if !strings.HasPrefix(normFull, normBase) {
// Not under base path, use filename-only approach
return filepath.Base(normFull), nil
}
rel, err := filepath.Rel(normBase, normFull)
if err != nil {
return "", fmt.Errorf("failed to get relative path from %s to %s: %w", normBase, normFull, err)
}
// Final safety check
if strings.Contains(rel, "..") {
return filepath.Base(normFull), nil
}
return rel, nil
}
// JoinPath joins path components safely
func (pm *PathManager) JoinPath(base, components ...string) string {
// Ensure base is absolute and cleaned
if absBase, err := pm.NormalizeToAbsolute(base); err == nil {
base = absBase
}
// Clean all components
cleanComponents := make([]string, len(components))
for i, comp := range components {
cleanComponents[i] = filepath.Clean(comp)
}
// Join all components
result := filepath.Join(append([]string{base}, cleanComponents...)...)
// Final safety check
if strings.Contains(result, "..") {
// Fallback to string-based join if path traversal detected
return filepath.Join(base, filepath.Join(cleanComponents...))
}
return result
}
// GetConfig returns the path configuration
func (pm *PathManager) GetConfig() *Config {
return pm.config
}
// ValidateConfig validates all configured paths
func (pm *PathManager) ValidateConfig() error {
if pm.config.OldConfigPath == "" || pm.config.OldStatePath == "" {
return fmt.Errorf("old paths cannot be empty")
}
if pm.config.NewConfigPath == "" || pm.config.NewStatePath == "" {
return fmt.Errorf("new paths cannot be empty")
}
if pm.config.BackupDirPattern == "" {
return fmt.Errorf("backup dir pattern cannot be empty")
}
// Validate paths are absolute
paths := []string{
pm.config.OldConfigPath,
pm.config.OldStatePath,
pm.config.NewConfigPath,
pm.config.NewStatePath,
}
for _, path := range paths {
if !filepath.IsAbs(path) {
return fmt.Errorf("path must be absolute: %s", path)
}
}
return nil
}
// GetNewPathForOldPath determines the new path for a file that was in an old location
func (pm *PathManager) GetNewPathForOldPath(oldPath string) (string, error) {
// Validate old path
normalizedOld, err := pm.NormalizeToAbsolute(oldPath)
if err != nil {
return "", fmt.Errorf("invalid old path: %w", err)
}
// Check if it's in old config path
if strings.HasPrefix(normalizedOld, pm.config.OldConfigPath) {
relPath, err := pm.GetRelativePath(pm.config.OldConfigPath, normalizedOld)
if err != nil {
return "", err
}
return pm.JoinPath(pm.config.NewConfigPath, relPath), nil
}
// Check if it's in old state path
if strings.HasPrefix(normalizedOld, pm.config.OldStatePath) {
relPath, err := pm.GetRelativePath(pm.config.OldStatePath, normalizedOld)
if err != nil {
return "", err
}
return pm.JoinPath(pm.config.NewStatePath, relPath), nil
}
// File is not in expected old locations, return as is
return normalizedOld, nil
}

View File

@@ -0,0 +1,172 @@
package migration
import (
"encoding/json"
"fmt"
"os"
"time"
"github.com/Fimeg/RedFlag/aggregator-agent/internal/config"
)
// MigrationState is imported from config package to avoid duplication
// StateManager manages migration state persistence
type StateManager struct {
configPath string
}
// NewStateManager creates a new state manager
func NewStateManager(configPath string) *StateManager {
return &StateManager{
configPath: configPath,
}
}
// LoadState loads migration state from config file
func (sm *StateManager) LoadState() (*config.MigrationState, error) {
// Load config to get migration state
cfg, err := sm.loadConfig()
if err != nil {
if os.IsNotExist(err) {
// Fresh install - no migration state yet
return &config.MigrationState{
LastCompleted: make(map[string]time.Time),
AgentVersion: "",
ConfigVersion: "",
Timestamp: time.Now(),
Success: false,
CompletedMigrations: []string{},
}, nil
}
return nil, fmt.Errorf("failed to load config: %w", err)
}
// Check if migration state exists in config
if cfg.MigrationState == nil {
return &config.MigrationState{
LastCompleted: make(map[string]time.Time),
AgentVersion: cfg.AgentVersion,
ConfigVersion: cfg.Version,
Timestamp: time.Now(),
Success: false,
CompletedMigrations: []string{},
}, nil
}
return cfg.MigrationState, nil
}
// SaveState saves migration state to config file
func (sm *StateManager) SaveState(state *config.MigrationState) error {
// Load current config
cfg, err := sm.loadConfig()
if err != nil {
return fmt.Errorf("failed to load config for state save: %w", err)
}
// Update migration state
cfg.MigrationState = state
state.Timestamp = time.Now()
// Save config with updated state
return sm.saveConfig(cfg)
}
// IsMigrationCompleted checks if a specific migration was completed
func (sm *StateManager) IsMigrationCompleted(migrationType string) (bool, error) {
state, err := sm.LoadState()
if err != nil {
return false, err
}
// Check completed migrations list
for _, completed := range state.CompletedMigrations {
if completed == migrationType {
return true, nil
}
}
// Also check legacy last_completed map for backward compatibility
if timestamp, exists := state.LastCompleted[migrationType]; exists {
return !timestamp.IsZero(), nil
}
return false, nil
}
// MarkMigrationCompleted marks a migration as completed
func (sm *StateManager) MarkMigrationCompleted(migrationType string, rollbackPath string, agentVersion string) error {
state, err := sm.LoadState()
if err != nil {
return err
}
// Update completed migrations list
found := false
for _, completed := range state.CompletedMigrations {
if completed == migrationType {
found = true
// Update timestamp
state.LastCompleted[migrationType] = time.Now()
break
}
}
if !found {
state.CompletedMigrations = append(state.CompletedMigrations, migrationType)
}
state.LastCompleted[migrationType] = time.Now()
state.AgentVersion = agentVersion
state.Success = true
if rollbackPath != "" {
state.RollbackPath = rollbackPath
}
return sm.SaveState(state)
}
// CleanupOldDirectories removes old migration directories after successful migration
func (sm *StateManager) CleanupOldDirectories() error {
oldDirs := []string{
"/etc/aggregator",
"/var/lib/aggregator",
}
for _, oldDir := range oldDirs {
if _, err := os.Stat(oldDir); err == nil {
fmt.Printf("[MIGRATION] Cleaning up old directory: %s\n", oldDir)
if err := os.RemoveAll(oldDir); err != nil {
fmt.Printf("[MIGRATION] Warning: Failed to remove old directory %s: %v\n", oldDir, err)
}
}
}
return nil
}
// loadConfig loads configuration from file
func (sm *StateManager) loadConfig() (*config.Config, error) {
data, err := os.ReadFile(sm.configPath)
if err != nil {
return nil, err
}
var cfg config.Config
if err := json.Unmarshal(data, &cfg); err != nil {
return nil, err
}
return &cfg, nil
}
// saveConfig saves configuration to file
func (sm *StateManager) saveConfig(cfg *config.Config) error {
data, err := json.MarshalIndent(cfg, "", " ")
if err != nil {
return err
}
return os.WriteFile(sm.configPath, data, 0644)
}

View File

@@ -0,0 +1,398 @@
package validation
import (
"fmt"
"os"
"path/filepath"
"strings"
"time"
"github.com/Fimeg/RedFlag/aggregator-agent/internal/common"
"github.com/Fimeg/RedFlag/aggregator-agent/internal/event"
"github.com/Fimeg/RedFlag/aggregator-agent/internal/migration/pathutils"
"github.com/Fimeg/RedFlag/aggregator-agent/internal/models"
"github.com/google/uuid"
)
// FileValidator handles comprehensive file validation for migration
type FileValidator struct {
pathManager *pathutils.PathManager
eventBuffer *event.Buffer
agentID uuid.UUID
}
// NewFileValidator creates a new file validator
func NewFileValidator(pm *pathutils.PathManager, eventBuffer *event.Buffer, agentID uuid.UUID) *FileValidator {
return &FileValidator{
pathManager: pm,
eventBuffer: eventBuffer,
agentID: agentID,
}
}
// ValidationResult holds validation results
type ValidationResult struct {
Valid bool `json:"valid"`
Errors []string `json:"errors"`
Warnings []string `json:"warnings"`
Inventory *FileInventory `json:"inventory"`
Statistics *ValidationStats `json:"statistics"`
}
// FileInventory represents validated files
type FileInventory struct {
ValidFiles []common.AgentFile `json:"valid_files"`
InvalidFiles []InvalidFile `json:"invalid_files"`
MissingFiles []string `json:"missing_files"`
SkippedFiles []SkippedFile `json:"skipped_files"`
Directories []string `json:"directories"`
}
// InvalidFile represents a file that failed validation
type InvalidFile struct {
Path string `json:"path"`
Reason string `json:"reason"`
ErrorType string `json:"error_type"` // "not_found", "permission", "traversal", "other"
Expected string `json:"expected"`
}
// SkippedFile represents a file that was intentionally skipped
type SkippedFile struct {
Path string `json:"path"`
Reason string `json:"reason"`
}
// ValidationStats holds statistics about validation
type ValidationStats struct {
TotalFiles int `json:"total_files"`
ValidFiles int `json:"valid_files"`
InvalidFiles int `json:"invalid_files"`
MissingFiles int `json:"missing_files"`
SkippedFiles int `json:"skipped_files"`
ValidationTime int64 `json:"validation_time_ms"`
TotalSizeBytes int64 `json:"total_size_bytes"`
}
// ValidateInventory performs comprehensive validation of file inventory
func (v *FileValidator) ValidateInventory(files []common.AgentFile, requiredPatterns []string) (*ValidationResult, error) {
start := time.Now()
result := &ValidationResult{
Valid: true,
Errors: []string{},
Warnings: []string{},
Inventory: &FileInventory{
ValidFiles: []common.AgentFile{},
InvalidFiles: []InvalidFile{},
MissingFiles: []string{},
SkippedFiles: []SkippedFile{},
Directories: []string{},
},
Statistics: &ValidationStats{},
}
// Group files by directory and collect statistics
dirMap := make(map[string]bool)
var totalSize int64
for _, file := range files {
result.Statistics.TotalFiles++
// Skip log files (.log, .tmp) as they shouldn't be migrated
if containsAny(file.Path, []string{"*.log", "*.tmp"}) {
result.Inventory.SkippedFiles = append(result.Inventory.SkippedFiles, SkippedFile{
Path: file.Path,
Reason: "Log/temp files are not migrated",
})
result.Statistics.SkippedFiles++
continue
}
// Validate file path and existence
if err := v.pathManager.ValidatePath(file.Path); err != nil {
result.Valid = false
result.Statistics.InvalidFiles++
errorType := "other"
reason := err.Error()
if os.IsNotExist(err) {
errorType = "not_found"
reason = fmt.Sprintf("File does not exist: %s", file.Path)
} else if os.IsPermission(err) {
errorType = "permission"
reason = fmt.Sprintf("Permission denied: %s", file.Path)
}
result.Errors = append(result.Errors, reason)
result.Inventory.InvalidFiles = append(result.Inventory.InvalidFiles, InvalidFile{
Path: file.Path,
Reason: reason,
ErrorType: errorType,
})
// Log the validation failure
v.bufferEvent("file_validation_failed", "warning", "migration_validator",
reason,
map[string]interface{}{
"file_path": file.Path,
"error_type": errorType,
"file_size": file.Size,
})
continue
}
// Track directory
dir := filepath.Dir(file.Path)
if !dirMap[dir] {
dirMap[dir] = true
result.Inventory.Directories = append(result.Inventory.Directories, dir)
}
result.Inventory.ValidFiles = append(result.Inventory.ValidFiles, file)
result.Statistics.ValidFiles++
totalSize += file.Size
}
result.Statistics.TotalSizeBytes = totalSize
// Check for required files
for _, pattern := range requiredPatterns {
found := false
for _, file := range result.Inventory.ValidFiles {
if matched, _ := filepath.Match(pattern, filepath.Base(file.Path)); matched {
found = true
break
}
}
if !found {
result.Valid = false
missing := fmt.Sprintf("Required file pattern not found: %s", pattern)
result.Errors = append(result.Errors, missing)
result.Inventory.MissingFiles = append(result.Inventory.MissingFiles, pattern)
result.Statistics.MissingFiles++
// Log missing required file
v.bufferEvent("required_file_missing", "error", "migration_validator",
missing,
map[string]interface{}{
"required_pattern": pattern,
"phase": "validation",
})
}
}
result.Statistics.ValidationTime = time.Since(start).Milliseconds()
// Log validation completion
v.bufferEvent("validation_completed", "info", "migration_validator",
fmt.Sprintf("File validation completed: %d total, %d valid, %d invalid, %d skipped",
result.Statistics.TotalFiles,
result.Statistics.ValidFiles,
result.Statistics.InvalidFiles,
result.Statistics.SkippedFiles),
map[string]interface{}{
"stats": result.Statistics,
"valid": result.Valid,
})
return result, nil
}
// ValidateBackupLocation validates backup location is writable and safe
func (v *FileValidator) ValidateBackupLocation(backupPath string) error {
// Normalize path
normalized, err := v.pathManager.NormalizeToAbsolute(backupPath)
if err != nil {
return fmt.Errorf("invalid backup path: %w", err)
}
// Ensure backup path isn't in system directories
if strings.HasPrefix(normalized, "/bin/") || strings.HasPrefix(normalized, "/sbin/") ||
strings.HasPrefix(normalized, "/usr/bin/") || strings.HasPrefix(normalized, "/usr/sbin/") {
return fmt.Errorf("backup path cannot be in system directory: %s", normalized)
}
// Ensure parent directory exists and is writable
parent := filepath.Dir(normalized)
if err := v.pathManager.EnsureDirectory(parent); err != nil {
return fmt.Errorf("cannot create backup directory: %w", err)
}
// Test write permission (create a temp file)
testFile := filepath.Join(parent, ".migration_test_"+uuid.New().String()[:8])
if err := os.WriteFile(testFile, []byte("test"), 0600); err != nil {
return fmt.Errorf("backup directory not writable: %w", err)
}
// Clean up test file
_ = os.Remove(testFile)
return nil
}
// PreValidate validates all conditions before migration starts
func (v *FileValidator) PreValidate(detection *MigrationDetection, backupPath string) (*ValidationResult, error) {
v.bufferEvent("pre_validation_started", "info", "migration_validator",
"Starting comprehensive migration validation",
map[string]interface{}{
"agent_version": detection.CurrentAgentVersion,
"config_version": detection.CurrentConfigVersion,
})
// Collect all files from inventory
allFiles := v.collectAllFiles(detection.Inventory)
// Define required patterns based on migration needs
requiredPatterns := []string{
"config.json", // Config is essential
// Note: agent.key files are generated if missing
}
// Validate inventory
result, err := v.ValidateInventory(allFiles, requiredPatterns)
if err != nil {
v.bufferEvent("validation_error", "error", "migration_validator",
fmt.Sprintf("Validation failed: %v", err),
map[string]interface{}{
"error": err.Error(),
"phase": "pre_validation",
})
return nil, fmt.Errorf("validation failed: %w", err)
}
// Validate backup location
if err := v.ValidateBackupLocation(backupPath); err != nil {
result.Valid = false
result.Errors = append(result.Errors, fmt.Sprintf("Backup location invalid: %v", err))
v.bufferEvent("backup_validation_failed", "error", "migration_validator",
fmt.Sprintf("Backup validation failed: %v", err),
map[string]interface{}{
"backup_path": backupPath,
"error": err.Error(),
"phase": "validation",
})
}
// Validate new directories can be created (but don't create them yet)
newDirs := []string{
v.pathManager.GetConfig().NewConfigPath,
v.pathManager.GetConfig().NewStatePath,
}
for _, dir := range newDirs {
normalized, err := v.pathManager.NormalizeToAbsolute(dir)
if err != nil {
result.Valid = false
result.Errors = append(result.Errors, fmt.Sprintf("Invalid new directory %s: %v", dir, err))
continue
}
// Check if parent is writable
parent := filepath.Dir(normalized)
if _, err := os.Stat(parent); err != nil {
if os.IsNotExist(err) {
result.Warnings = append(result.Warnings, fmt.Sprintf("Parent directory for %s does not exist: %s", dir, parent))
}
}
}
// Log final validation status
v.bufferEvent("pre_validation_completed", "info", "migration_validator",
fmt.Sprintf("Pre-validation completed: %s", func() string {
if result.Valid {
return "PASSED"
}
return "FAILED"
}()),
map[string]interface{}{
"errors_count": len(result.Errors),
"warnings_count": len(result.Warnings),
"files_valid": result.Statistics.ValidFiles,
"files_invalid": result.Statistics.InvalidFiles,
"files_skipped": result.Statistics.SkippedFiles,
})
return result, nil
}
// collectAllFiles collects all files from the migration inventory
func (v *FileValidator) collectAllFiles(inventory *AgentFileInventory) []common.AgentFile {
var allFiles []common.AgentFile
if inventory != nil {
allFiles = append(allFiles, inventory.ConfigFiles...)
allFiles = append(allFiles, inventory.StateFiles...)
allFiles = append(allFiles, inventory.BinaryFiles...)
allFiles = append(allFiles, inventory.LogFiles...)
allFiles = append(allFiles, inventory.CertificateFiles...)
}
return allFiles
}
// bufferEvent logs an event to the event buffer
func (v *FileValidator) bufferEvent(eventSubtype, severity, component, message string, metadata map[string]interface{}) {
if v.eventBuffer == nil {
return
}
event := &models.SystemEvent{
ID: uuid.New(),
AgentID: &v.agentID,
EventType: models.EventTypeAgentMigration, // Using model constant
EventSubtype: eventSubtype,
Severity: severity,
Component: component,
Message: message,
Metadata: metadata,
CreatedAt: time.Now(),
}
if err := v.eventBuffer.BufferEvent(event); err != nil {
fmt.Printf("[VALIDATION] Warning: Failed to buffer event: %v\n", err)
}
}
// containsAny checks if path matches any of the patterns
func containsAny(path string, patterns []string) bool {
for _, pattern := range patterns {
if matched, _ := filepath.Match(pattern, filepath.Base(path)); matched {
return true
}
}
return false
}
// ValidateFileForBackup validates a single file before backup
func (v *FileValidator) ValidateFileForBackup(file common.AgentFile) error {
// Check if file exists
if _, err := os.Stat(file.Path); err != nil {
if os.IsNotExist(err) {
v.bufferEvent("backup_file_missing", "warning", "migration_validator",
fmt.Sprintf("Skipping backup of non-existent file: %s", file.Path),
map[string]interface{}{
"file_path": file.Path,
"phase": "backup",
})
return fmt.Errorf("file does not exist: %s", file.Path)
}
return fmt.Errorf("failed to access file %s: %w", file.Path, err)
}
// Additional validation for sensitive files
if strings.Contains(file.Path, ".key") || strings.Contains(file.Path, "config") {
// Key files should be readable only by owner
info, err := os.Stat(file.Path)
if err == nil {
perm := info.Mode().Perm()
// Check if others have read permission
if perm&0004 != 0 {
v.bufferEvent("insecure_file_permissions", "warning", "migration_validator",
fmt.Sprintf("Sensitive file has world-readable permissions: %s (0%o)", file.Path, perm),
map[string]interface{}{
"file_path": file.Path,
"permissions": perm,
})
}
}
}
return nil
}

View File

@@ -0,0 +1,31 @@
package models
import (
"time"
"github.com/google/uuid"
)
// StorageMetricReport represents storage metrics from an agent
type StorageMetricReport struct {
AgentID uuid.UUID `json:"agent_id"`
CommandID string `json:"command_id"`
Timestamp time.Time `json:"timestamp"`
Metrics []StorageMetric `json:"metrics"`
}
// StorageMetric represents a single disk/storage metric
type StorageMetric struct {
Mountpoint string `json:"mountpoint"`
Device string `json:"device"`
DiskType string `json:"disk_type"`
Filesystem string `json:"filesystem"`
TotalBytes int64 `json:"total_bytes"`
UsedBytes int64 `json:"used_bytes"`
AvailableBytes int64 `json:"available_bytes"`
UsedPercent float64 `json:"used_percent"`
IsRoot bool `json:"is_root"`
IsLargest bool `json:"is_largest"`
Severity string `json:"severity"`
Metadata map[string]interface{} `json:"metadata,omitempty"`
}

View File

@@ -68,35 +68,6 @@ func (s *StorageScanner) Name() string {
return "Disk Usage Reporter"
}
// --- Legacy Compatibility Methods ---
// Scan collects disk usage information and returns it as "updates" for reporting (LEGACY)
// This method is kept for backwards compatibility with the old Scanner interface
func (s *StorageScanner) Scan() ([]client.UpdateReportItem, error) {
metrics, err := s.ScanStorage()
if err != nil {
return nil, err
}
// Convert proper StorageMetric back to legacy UpdateReportItem format
var items []client.UpdateReportItem
for _, metric := range metrics {
item := client.UpdateReportItem{
PackageName: fmt.Sprintf("disk-%s", metric.Mountpoint),
CurrentVersion: fmt.Sprintf("%.1f%% used", metric.UsedPercent),
AvailableVersion: fmt.Sprintf("%d GB available", metric.AvailableBytes/(1024*1024*1024)),
PackageType: "storage",
Severity: metric.Severity,
PackageDescription: fmt.Sprintf("Disk: %s (%s) - %s", metric.Mountpoint, metric.Filesystem, metric.Device),
Metadata: metric.Metadata,
}
items = append(items, item)
}
return items, nil
}
// --- Typed Scanner Implementation ---
// GetType returns the scanner type

View File

@@ -7,25 +7,14 @@ import (
"time"
)
// Build-time injected version information
// These will be set via ldflags during build (SERVER AUTHORITY)
// Build-time injected version information (SERVER AUTHORITY)
// Injected by server during build via ldflags
var (
// Version is the agent version (e.g., "0.1.23.6")
// Injected by server during build: -ldflags "-X github.com/redflag/redflag/internal/version.Version=0.1.23.6"
Version = "dev"
// ConfigVersion is the config schema version this agent expects (e.g., "6")
// Injected by server during build: -ldflags "-X github.com/redflag/redflag/internal/version.ConfigVersion=6"
ConfigVersion = "dev"
// BuildTime is when this binary was built
BuildTime = "unknown"
// GitCommit is the git commit hash
GitCommit = "unknown"
// GoVersion is the Go version used to build
GoVersion = runtime.Version()
Version = "dev" // Agent version (format: 0.1.26.0)
ConfigVersion = "dev" // Config schema version (format: 0, 1, 2, etc.)
BuildTime = "unknown"
GitCommit = "unknown"
GoVersion = runtime.Version()
)
// ExtractConfigVersionFromAgent extracts the config version from the agent version

BIN
aggregator-agent/test-agent-final Executable file

Binary file not shown.

BIN
aggregator-agent/test-agent-fixed Executable file

Binary file not shown.

View File

@@ -18,23 +18,59 @@ FROM golang:1.24-alpine AS agent-builder
WORKDIR /build
# Install git for module resolution
# Install git for version detection
RUN apk add --no-cache git
# Copy .git directory to get version info
COPY .git/ ./.git/
# Generate semantic version from git (BASE_VERSION.COMMIT_COUNT)
# Examples:
# Tagged release: v0.1.26.0 → 0.1.26.0
# 5 commits after tag: 0.1.26.5
# No tags: 0.1.0.0
RUN cd /build && \
# Get latest tag or default to 0.1.0 \
if git describe --tags --dirty --always >/dev/null 2>&1; then \
LATEST_TAG=$(git describe --tags --dirty --always); \
BASE_VERSION=$(echo "$LATEST_TAG" | sed 's/^v//' | cut -d. -f1-3); \
else \
BASE_VERSION="0.1.0"; \
fi && \
# Count commits since tag (0 if on tag) \
COMMITS_SINCE=$(git rev-list $(git describe --tags --dirty --always 2>/dev/null)..HEAD 2>/dev/null | wc -l | tr -d ' ') && \
if [ "$COMMITS_SINCE" = "" ] || [ "$COMMITS_SINCE" -eq 0 ]; then BUILD=0; else BUILD=$COMMITS_SINCE; fi && \
# Write semantic version (base.commits) \
VERSION="${BASE_VERSION}.${BUILD}" && \
echo "Building agent version: $VERSION" && \
echo "$VERSION" > /build/version.txt
# Copy agent source code
COPY aggregator-agent/ ./
# Build for Linux amd64
RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o binaries/linux-amd64/redflag-agent ./cmd/agent
RUN VERSION=$(cat /build/version.txt) && \
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build \
-ldflags "-X github.com/Fimeg/RedFlag/aggregator-agent/internal/version.Version=$VERSION" \
-o binaries/linux-amd64/redflag-agent ./cmd/agent
# Build for Linux arm64
RUN CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build -o binaries/linux-arm64/redflag-agent ./cmd/agent
RUN VERSION=$(cat /build/version.txt) && \
CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build \
-ldflags "-X github.com/Fimeg/RedFlag/aggregator-agent/internal/version.Version=$VERSION" \
-o binaries/linux-arm64/redflag-agent ./cmd/agent
# Build for Windows amd64
RUN CGO_ENABLED=0 GOOS=windows GOARCH=amd64 go build -o binaries/windows-amd64/redflag-agent.exe ./cmd/agent
RUN VERSION=$(cat /build/version.txt) && \
CGO_ENABLED=0 GOOS=windows GOARCH=amd64 go build \
-ldflags "-X github.com/Fimeg/RedFlag/aggregator-agent/internal/version.Version=$VERSION" \
-o binaries/windows-amd64/redflag-agent.exe ./cmd/agent
# Build for Windows arm64
RUN CGO_ENABLED=0 GOOS=windows GOARCH=arm64 go build -o binaries/windows-arm64/redflag-agent.exe ./cmd/agent
RUN VERSION=$(cat /build/version.txt) && \
CGO_ENABLED=0 GOOS=windows GOARCH=arm64 go build \
-ldflags "-X github.com/Fimeg/RedFlag/aggregator-agent/internal/version.Version=$VERSION" \
-o binaries/windows-arm64/redflag-agent.exe ./cmd/agent
# Stage 3: Final image with server and all agent binaries
FROM alpine:latest

View File

@@ -202,6 +202,7 @@ func main() {
agentUpdateQueries := queries.NewAgentUpdateQueries(db.DB)
metricsQueries := queries.NewMetricsQueries(db.DB.DB)
dockerQueries := queries.NewDockerQueries(db.DB.DB)
storageMetricsQueries := queries.NewStorageMetricsQueries(db.DB.DB)
adminQueries := queries.NewAdminQueries(db.DB)
// Create PackageQueries for accessing signed agent update packages
@@ -307,6 +308,7 @@ func main() {
subsystemHandler := handlers.NewSubsystemHandler(subsystemQueries, commandQueries, signingService, securityLogger)
metricsHandler := handlers.NewMetricsHandler(metricsQueries, agentQueries, commandQueries)
dockerReportsHandler := handlers.NewDockerReportsHandler(dockerQueries, agentQueries, commandQueries)
storageMetricsHandler := handlers.NewStorageMetricsHandler(storageMetricsQueries)
agentSetupHandler := handlers.NewAgentSetupHandler(agentQueries)
// Initialize scanner config handler (for user-configurable scanner timeouts)
@@ -460,6 +462,9 @@ func main() {
// New dedicated endpoints for metrics and docker images (data classification fix)
agents.POST("/:id/metrics", rateLimiter.RateLimit("agent_reports", middleware.KeyByAgentID), metricsHandler.ReportMetrics)
agents.POST("/:id/docker-images", rateLimiter.RateLimit("agent_reports", middleware.KeyByAgentID), dockerReportsHandler.ReportDockerImages)
// Dedicated storage metrics endpoint (proper separation from generic metrics)
agents.POST("/:id/storage-metrics", rateLimiter.RateLimit("agent_reports", middleware.KeyByAgentID), storageMetricsHandler.ReportStorageMetrics)
}
// Dashboard/Web routes (protected by web auth)
@@ -469,6 +474,7 @@ func main() {
dashboard.GET("/stats/summary", statsHandler.GetDashboardStats)
dashboard.GET("/agents", agentHandler.ListAgents)
dashboard.GET("/agents/:id", agentHandler.GetAgent)
dashboard.GET("/agents/:id/storage-metrics", storageMetricsHandler.GetStorageMetrics)
dashboard.POST("/agents/:id/scan", agentHandler.TriggerScan)
dashboard.POST("/agents/:id/heartbeat", agentHandler.TriggerHeartbeat)
dashboard.GET("/agents/:id/heartbeat", agentHandler.GetHeartbeatStatus)

View File

@@ -0,0 +1,8 @@
#!/bin/bash
set -e
# Create config directory if it doesn't exist
mkdir -p /app/config
# Execute the main command
exec "$@"

View File

@@ -158,7 +158,7 @@ func (h *SecurityHandler) MachineBindingStatus(c *gin.Context) {
"timestamp": time.Now(),
"checks": map[string]interface{}{
"binding_enforced": true,
"min_agent_version": "v0.1.22",
"min_agent_version": "v0.1.26",
"fingerprint_required": true,
"recent_violations": 0,
"bound_agents": 0,

View File

@@ -0,0 +1,205 @@
package handlers
import (
"fmt"
"net/http"
"strconv"
"github.com/Fimeg/RedFlag/aggregator-server/internal/services"
"github.com/gin-gonic/gin"
)
// SecuritySettingsHandler handles security settings API endpoints
type SecuritySettingsHandler struct {
securitySettingsService *services.SecuritySettingsService
}
// NewSecuritySettingsHandler creates a new security settings handler
func NewSecuritySettingsHandler(securitySettingsService *services.SecuritySettingsService) *SecuritySettingsHandler {
return &SecuritySettingsHandler{
securitySettingsService: securitySettingsService,
}
}
// GetAllSecuritySettings returns all security settings for the authenticated user
func (h *SecuritySettingsHandler) GetAllSecuritySettings(c *gin.Context) {
// Get user from context
userID := c.GetString("user_id")
settings, err := h.securitySettingsService.GetAllSettings(userID)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
c.JSON(http.StatusOK, gin.H{
"settings": settings,
"user_has_permission": true, // Check actual permissions
})
}
// GetSecuritySettingsByCategory returns settings for a specific category
func (h *SecuritySettingsHandler) GetSecuritySettingsByCategory(c *gin.Context) {
category := c.Param("category") // e.g., "command_signing", "nonce_validation"
userID := c.GetString("user_id")
settings, err := h.securitySettingsService.GetSettingsByCategory(userID, category)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
c.JSON(http.StatusOK, settings)
}
// UpdateSecuritySetting updates a specific security setting
func (h *SecuritySettingsHandler) UpdateSecuritySetting(c *gin.Context) {
var req struct {
Value interface{} `json:"value" binding:"required"`
Reason string `json:"reason"` // Optional audit trail
}
if err := c.ShouldBindJSON(&req); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
}
category := c.Param("category")
key := c.Param("key")
userID := c.GetString("user_id")
// Validate before applying
if err := h.securitySettingsService.ValidateSetting(category, key, req.Value); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
}
// Apply the setting
err := h.securitySettingsService.SetSetting(category, key, req.Value, userID, req.Reason)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
// Return updated setting
setting, err := h.securitySettingsService.GetSetting(category, key)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
c.JSON(http.StatusOK, gin.H{
"message": "Setting updated successfully",
"setting": map[string]interface{}{
"category": category,
"key": key,
"value": setting,
},
})
}
// ValidateSecuritySettings validates settings without applying them
func (h *SecuritySettingsHandler) ValidateSecuritySettings(c *gin.Context) {
var req struct {
Category string `json:"category" binding:"required"`
Key string `json:"key" binding:"required"`
Value interface{} `json:"value" binding:"required"`
}
if err := c.ShouldBindJSON(&req); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
}
err := h.securitySettingsService.ValidateSetting(req.Category, req.Key, req.Value)
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{
"valid": false,
"error": err.Error(),
})
return
}
c.JSON(http.StatusOK, gin.H{
"valid": true,
"message": "Setting is valid",
})
}
// GetSecurityAuditTrail returns audit trail of security setting changes
func (h *SecuritySettingsHandler) GetSecurityAuditTrail(c *gin.Context) {
// Pagination parameters
page := c.DefaultQuery("page", "1")
pageSize := c.DefaultQuery("page_size", "50")
pageInt, _ := strconv.Atoi(page)
pageSizeInt, _ := strconv.Atoi(pageSize)
auditEntries, totalCount, err := h.securitySettingsService.GetAuditTrail(pageInt, pageSizeInt)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
c.JSON(http.StatusOK, gin.H{
"audit_entries": auditEntries,
"pagination": gin.H{
"page": pageInt,
"page_size": pageSizeInt,
"total": totalCount,
"total_pages": (totalCount + pageSizeInt - 1) / pageSizeInt,
},
})
}
// GetSecurityOverview returns current security status overview
func (h *SecuritySettingsHandler) GetSecurityOverview(c *gin.Context) {
userID := c.GetString("user_id")
overview, err := h.securitySettingsService.GetSecurityOverview(userID)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
c.JSON(http.StatusOK, overview)
}
// ApplySecuritySettings applies batch of setting changes atomically
func (h *SecuritySettingsHandler) ApplySecuritySettings(c *gin.Context) {
var req struct {
Settings map[string]map[string]interface{} `json:"settings" binding:"required"`
Reason string `json:"reason"`
}
if err := c.ShouldBindJSON(&req); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
}
userID := c.GetString("user_id")
// Validate all settings first
for category, settings := range req.Settings {
for key, value := range settings {
if err := h.securitySettingsService.ValidateSetting(category, key, value); err != nil {
c.JSON(http.StatusBadRequest, gin.H{
"error": fmt.Sprintf("Validation failed for %s.%s: %v", category, key, err),
})
return
}
}
}
// Apply all settings atomically
err := h.securitySettingsService.ApplySettingsBatch(req.Settings, userID, req.Reason)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
c.JSON(http.StatusOK, gin.H{
"message": "All settings applied successfully",
"applied_count": len(req.Settings),
})
}

View File

@@ -0,0 +1,125 @@
package handlers
import (
"encoding/json"
"fmt"
"net/http"
"time"
"github.com/Fimeg/RedFlag/aggregator-server/internal/database/queries"
"github.com/Fimeg/RedFlag/aggregator-server/internal/models"
"github.com/google/uuid"
"github.com/gorilla/mux"
"github.com/lib/pq"
)
// StorageMetricsHandler handles storage metrics endpoints
type StorageMetricsHandler struct {
queries *queries.StorageMetricsQueries
}
// NewStorageMetricsHandler creates a new storage metrics handler
func NewStorageMetricsHandler(queries *queries.StorageMetricsQueries) *StorageMetricsHandler {
return &StorageMetricsHandler{
queries: queries,
}
}
// ReportStorageMetrics handles POST /api/v1/agents/{id}/storage-metrics
func (h *StorageMetricsHandler) ReportStorageMetrics(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
agentIDStr := vars["id"]
// Parse agent ID
agentID, err := uuid.Parse(agentIDStr)
if err != nil {
http.Error(w, "Invalid agent ID", http.StatusBadRequest)
return
}
// Parse request body
var req models.StorageMetricRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
http.Error(w, "Invalid request body", http.StatusBadRequest)
return
}
// Validate agent ID matches
if req.AgentID != agentID {
http.Error(w, "Agent ID mismatch", http.StatusBadRequest)
return
}
// Insert storage metrics with error isolation
for _, metric := range req.Metrics {
dbMetric := models.StorageMetric{
ID: uuid.New(),
AgentID: req.AgentID,
Mountpoint: metric.Mountpoint,
Device: metric.Device,
DiskType: metric.DiskType,
Filesystem: metric.Filesystem,
TotalBytes: metric.TotalBytes,
UsedBytes: metric.UsedBytes,
AvailableBytes: metric.AvailableBytes,
UsedPercent: metric.UsedPercent,
Severity: metric.Severity,
Metadata: metric.Metadata,
CreatedAt: time.Now(),
}
if err := h.queries.InsertStorageMetric(r.Context(), dbMetric); err != nil {
log.Printf("[ERROR] Failed to insert storage metric for agent %s: %v\n", agentID, err)
http.Error(w, "Failed to insert storage metric", http.StatusInternalServerError)
return
}
}
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(map[string]string{
"status": "success",
"message": "Storage metrics reported successfully",
})
}
// GetStorageMetrics handles GET /api/v1/agents/{id}/storage-metrics
func (h *StorageMetricsHandler) GetStorageMetrics(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
agentIDStr := vars["id"]
// Parse agent ID
agentID, err := uuid.Parse(agentIDStr)
if err != nil {
http.Error(w, "Invalid agent ID", http.StatusBadRequest)
return
}
// Optional query parameters for pagination/limit
limit := parseIntQueryParam(r, "limit", 100)
offset := parseIntQueryParam(r, "offset", 0)
// Get storage metrics
metrics, err := h.queries.GetStorageMetricsByAgentID(r.Context(), agentID, limit, offset)
if err != nil {
log.Printf("[ERROR] Failed to retrieve storage metrics for agent %s: %v\n", agentID, err)
http.Error(w, "Failed to retrieve storage metrics", http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(map[string]interface{}{
"metrics": metrics,
"total": len(metrics),
})
}
// parseIntQueryParam safely parses integer query parameters with defaults
func parseIntQueryParam(r *http.Request, key string, defaultValue int) int {
if val := r.URL.Query().Get(key); val != "" {
var result int
if _, err := fmt.Sscanf(val, "%d", &result); err == nil && result > 0 {
return result
}
}
return defaultValue
}

View File

@@ -160,7 +160,7 @@ func loadFromEnv(cfg *Config, skipSensitive bool) error {
cfg.CheckInInterval = checkInInterval
cfg.OfflineThreshold = offlineThreshold
cfg.Timezone = getEnv("TIMEZONE", "UTC")
cfg.LatestAgentVersion = getEnv("LATEST_AGENT_VERSION", "0.1.23.6")
cfg.LatestAgentVersion = getEnv("LATEST_AGENT_VERSION", "0.1.26")
cfg.MinAgentVersion = getEnv("MIN_AGENT_VERSION", "0.1.22")
if !skipSensitive {

View File

@@ -0,0 +1,27 @@
-- Create dedicated storage_metrics table for proper storage tracking
-- This replaces the misuse of metrics table for storage data
CREATE TABLE storage_metrics (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
agent_id UUID NOT NULL REFERENCES agents(id) ON DELETE CASCADE,
mountpoint VARCHAR(255) NOT NULL,
device VARCHAR(255),
disk_type VARCHAR(50),
filesystem VARCHAR(50),
total_bytes BIGINT,
used_bytes BIGINT,
available_bytes BIGINT,
used_percent FLOAT,
severity VARCHAR(20),
metadata JSONB,
created_at TIMESTAMP NOT NULL DEFAULT NOW()
);
-- Indexes for performance
CREATE INDEX idx_storage_metrics_agent_id ON storage_metrics(agent_id);
CREATE INDEX idx_storage_metrics_created_at ON storage_metrics(created_at DESC);
CREATE INDEX idx_storage_metrics_mountpoint ON storage_metrics(mountpoint);
CREATE INDEX idx_storage_metrics_agent_mount ON storage_metrics(agent_id, mountpoint, created_at DESC);
-- Track migration
INSERT INTO schema_migrations (version, description) VALUES ('021', 'Create storage_metrics table');

View File

@@ -0,0 +1,162 @@
package queries
import (
"context"
"database/sql"
"fmt"
"time"
"github.com/alexedwards/argon2id"
"github.com/google/uuid"
"github.com/jmoiron/sqlx"
)
type AdminQueries struct {
db *sqlx.DB
}
func NewAdminQueries(db *sqlx.DB) *AdminQueries {
return &AdminQueries{db: db}
}
type Admin struct {
ID uuid.UUID `json:"id"`
Username string `json:"username"`
Email string `json:"email"`
Password string `json:"-"`
CreatedAt time.Time `json:"created_at"`
}
// CreateAdminIfNotExists creates an admin user if they don't already exist
func (q *AdminQueries) CreateAdminIfNotExists(username, email, password string) error {
ctx := context.Background()
// Check if admin already exists
var exists bool
err := q.db.QueryRowContext(ctx, "SELECT EXISTS(SELECT 1 FROM users WHERE username = $1)", username).Scan(&exists)
if err != nil {
return fmt.Errorf("failed to check if admin exists: %w", err)
}
if exists {
return nil // Admin already exists, nothing to do
}
// Hash the password
hashedPassword, err := argon2id.CreateHash(password, argon2id.DefaultParams)
if err != nil {
return fmt.Errorf("failed to hash password: %w", err)
}
// Create the admin
query := `
INSERT INTO users (username, email, password_hash, created_at)
VALUES ($1, $2, $3, NOW())
`
_, err = q.db.ExecContext(ctx, query, username, email, hashedPassword)
if err != nil {
return fmt.Errorf("failed to create admin: %w", err)
}
return nil
}
// UpdateAdminPassword updates the admin's password (always updates from .env)
func (q *AdminQueries) UpdateAdminPassword(username, password string) error {
ctx := context.Background()
// Hash the password
hashedPassword, err := argon2id.CreateHash(password, argon2id.DefaultParams)
if err != nil {
return fmt.Errorf("failed to hash password: %w", err)
}
// Update the password
query := `
UPDATE users
SET password_hash = $1
WHERE username = $2
`
result, err := q.db.ExecContext(ctx, query, hashedPassword, username)
if err != nil {
return fmt.Errorf("failed to update admin password: %w", err)
}
rowsAffected, err := result.RowsAffected()
if err != nil {
return fmt.Errorf("failed to get rows affected: %w", err)
}
if rowsAffected == 0 {
return fmt.Errorf("admin not found")
}
return nil
}
// VerifyAdminCredentials validates username and password against the database hash
func (q *AdminQueries) VerifyAdminCredentials(username, password string) (*Admin, error) {
ctx := context.Background()
var admin Admin
query := `
SELECT id, username, email, password_hash, created_at
FROM users
WHERE username = $1
`
err := q.db.QueryRowContext(ctx, query, username).Scan(
&admin.ID,
&admin.Username,
&admin.Email,
&admin.Password,
&admin.CreatedAt,
)
if err == sql.ErrNoRows {
return nil, fmt.Errorf("admin not found")
}
if err != nil {
return nil, fmt.Errorf("failed to query admin: %w", err)
}
// Verify the password
match, err := argon2id.ComparePasswordAndHash(password, admin.Password)
if err != nil {
return nil, fmt.Errorf("failed to compare password: %w", err)
}
if !match {
return nil, fmt.Errorf("invalid credentials")
}
return &admin, nil
}
// GetAdminByUsername retrieves admin by username (for JWT claims)
func (q *AdminQueries) GetAdminByUsername(username string) (*Admin, error) {
ctx := context.Background()
var admin Admin
query := `
SELECT id, username, email, created_at
FROM users
WHERE username = $1
`
err := q.db.QueryRowContext(ctx, query, username).Scan(
&admin.ID,
&admin.Username,
&admin.Email,
&admin.CreatedAt,
)
if err == sql.ErrNoRows {
return nil, fmt.Errorf("admin not found")
}
if err != nil {
return nil, fmt.Errorf("failed to query admin: %w", err)
}
return &admin, nil
}

View File

@@ -0,0 +1,35 @@
package queries
import (
"github.com/Fimeg/RedFlag/aggregator-server/internal/models"
"github.com/google/uuid"
"github.com/jmoiron/sqlx"
)
// PackageQueries provides an alias for AgentUpdateQueries to match the expected interface
// This maintains backward compatibility while using the existing agent update package system
type PackageQueries struct {
*AgentUpdateQueries
}
// NewPackageQueries creates a new PackageQueries instance
func NewPackageQueries(db *sqlx.DB) *PackageQueries {
return &PackageQueries{
AgentUpdateQueries: NewAgentUpdateQueries(db),
}
}
// StoreSignedPackage stores a signed agent package (alias for CreateUpdatePackage)
func (pq *PackageQueries) StoreSignedPackage(pkg *models.AgentUpdatePackage) error {
return pq.CreateUpdatePackage(pkg)
}
// GetSignedPackage retrieves a signed package (alias for GetUpdatePackageByVersion)
func (pq *PackageQueries) GetSignedPackage(version, platform, architecture string) (*models.AgentUpdatePackage, error) {
return pq.GetUpdatePackageByVersion(version, platform, architecture)
}
// GetSignedPackageByID retrieves a signed package by ID (alias for GetUpdatePackage)
func (pq *PackageQueries) GetSignedPackageByID(id uuid.UUID) (*models.AgentUpdatePackage, error) {
return pq.GetUpdatePackage(id)
}

View File

@@ -0,0 +1,131 @@
package queries
import (
"database/sql"
"fmt"
"time"
"github.com/jmoiron/sqlx"
)
// ScannerConfigQueries handles scanner timeout configuration in database
type ScannerConfigQueries struct {
db *sqlx.DB
}
// NewScannerConfigQueries creates new scanner config queries
func NewScannerConfigQueries(db *sqlx.DB) *ScannerConfigQueries {
return &ScannerConfigQueries{db: db}
}
// ScannerTimeoutConfig represents a scanner timeout configuration
type ScannerTimeoutConfig struct {
ScannerName string `db:"scanner_name" json:"scanner_name"`
TimeoutMs int `db:"timeout_ms" json:"timeout_ms"`
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
}
// UpsertScannerConfig inserts or updates scanner timeout configuration
func (q *ScannerConfigQueries) UpsertScannerConfig(scannerName string, timeout time.Duration) error {
if q.db == nil {
return fmt.Errorf("database connection not available")
}
query := `
INSERT INTO scanner_config (scanner_name, timeout_ms, updated_at)
VALUES ($1, $2, CURRENT_TIMESTAMP)
ON CONFLICT (scanner_name)
DO UPDATE SET
timeout_ms = EXCLUDED.timeout_ms,
updated_at = CURRENT_TIMESTAMP
`
_, err := q.db.Exec(query, scannerName, timeout.Milliseconds())
if err != nil {
return fmt.Errorf("failed to upsert scanner config: %w", err)
}
return nil
}
// GetScannerConfig retrieves scanner timeout configuration for a specific scanner
func (q *ScannerConfigQueries) GetScannerConfig(scannerName string) (*ScannerTimeoutConfig, error) {
if q.db == nil {
return nil, fmt.Errorf("database connection not available")
}
var config ScannerTimeoutConfig
query := `SELECT scanner_name, timeout_ms, updated_at FROM scanner_config WHERE scanner_name = $1`
err := q.db.Get(&config, query, scannerName)
if err != nil {
if err == sql.ErrNoRows {
return nil, nil // Return nil if not found
}
return nil, fmt.Errorf("failed to get scanner config: %w", err)
}
return &config, nil
}
// GetAllScannerConfigs retrieves all scanner timeout configurations
func (q *ScannerConfigQueries) GetAllScannerConfigs() (map[string]ScannerTimeoutConfig, error) {
if q.db == nil {
return nil, fmt.Errorf("database connection not available")
}
var configs []ScannerTimeoutConfig
query := `SELECT scanner_name, timeout_ms, updated_at FROM scanner_config ORDER BY scanner_name`
err := q.db.Select(&configs, query)
if err != nil {
return nil, fmt.Errorf("failed to get all scanner configs: %w", err)
}
// Convert slice to map
configMap := make(map[string]ScannerTimeoutConfig)
for _, cfg := range configs {
configMap[cfg.ScannerName] = cfg
}
return configMap, nil
}
// DeleteScannerConfig removes scanner timeout configuration
func (q *ScannerConfigQueries) DeleteScannerConfig(scannerName string) error {
if q.db == nil {
return fmt.Errorf("database connection not available")
}
query := `DELETE FROM scanner_config WHERE scanner_name = $1`
result, err := q.db.Exec(query, scannerName)
if err != nil {
return fmt.Errorf("failed to delete scanner config: %w", err)
}
rows, err := result.RowsAffected()
if err != nil {
return fmt.Errorf("failed to verify delete: %w", err)
}
if rows == 0 {
return sql.ErrNoRows
}
return nil
}
// GetScannerTimeoutWithDefault returns scanner timeout from DB or default value
func (q *ScannerConfigQueries) GetScannerTimeoutWithDefault(scannerName string, defaultTimeout time.Duration) time.Duration {
config, err := q.GetScannerConfig(scannerName)
if err != nil {
return defaultTimeout
}
if config == nil {
return defaultTimeout
}
return time.Duration(config.TimeoutMs) * time.Millisecond
}

View File

@@ -0,0 +1,255 @@
package queries
import (
"database/sql"
"encoding/json"
"fmt"
"time"
"github.com/Fimeg/RedFlag/aggregator-server/internal/models"
"github.com/google/uuid"
"github.com/jmoiron/sqlx"
)
type SecuritySettingsQueries struct {
db *sqlx.DB
}
func NewSecuritySettingsQueries(db *sqlx.DB) *SecuritySettingsQueries {
return &SecuritySettingsQueries{db: db}
}
// GetSetting retrieves a specific security setting by category and key
func (q *SecuritySettingsQueries) GetSetting(category, key string) (*models.SecuritySetting, error) {
query := `
SELECT id, category, key, value, is_encrypted, created_at, updated_at, created_by, updated_by
FROM security_settings
WHERE category = $1 AND key = $2
`
var setting models.SecuritySetting
err := q.db.Get(&setting, query, category, key)
if err != nil {
if err == sql.ErrNoRows {
return nil, nil
}
return nil, fmt.Errorf("failed to get security setting: %w", err)
}
return &setting, nil
}
// GetAllSettings retrieves all security settings
func (q *SecuritySettingsQueries) GetAllSettings() ([]models.SecuritySetting, error) {
query := `
SELECT id, category, key, value, is_encrypted, created_at, updated_at, created_by, updated_by
FROM security_settings
ORDER BY category, key
`
var settings []models.SecuritySetting
err := q.db.Select(&settings, query)
if err != nil {
return nil, fmt.Errorf("failed to get all security settings: %w", err)
}
return settings, nil
}
// GetSettingsByCategory retrieves all settings for a specific category
func (q *SecuritySettingsQueries) GetSettingsByCategory(category string) ([]models.SecuritySetting, error) {
query := `
SELECT id, category, key, value, is_encrypted, created_at, updated_at, created_by, updated_by
FROM security_settings
WHERE category = $1
ORDER BY key
`
var settings []models.SecuritySetting
err := q.db.Select(&settings, query, category)
if err != nil {
return nil, fmt.Errorf("failed to get security settings by category: %w", err)
}
return settings, nil
}
// CreateSetting creates a new security setting
func (q *SecuritySettingsQueries) CreateSetting(category, key string, value interface{}, isEncrypted bool, createdBy *uuid.UUID) (*models.SecuritySetting, error) {
// Convert value to JSON string
valueJSON, err := json.Marshal(value)
if err != nil {
return nil, fmt.Errorf("failed to marshal setting value: %w", err)
}
setting := &models.SecuritySetting{
ID: uuid.New(),
Category: category,
Key: key,
Value: string(valueJSON),
IsEncrypted: isEncrypted,
CreatedAt: time.Now().UTC(),
CreatedBy: createdBy,
}
query := `
INSERT INTO security_settings (
id, category, key, value, is_encrypted, created_at, created_by
) VALUES (
:id, :category, :key, :value, :is_encrypted, :created_at, :created_by
)
RETURNING *
`
rows, err := q.db.NamedQuery(query, setting)
if err != nil {
return nil, fmt.Errorf("failed to create security setting: %w", err)
}
defer rows.Close()
if rows.Next() {
var createdSetting models.SecuritySetting
if err := rows.StructScan(&createdSetting); err != nil {
return nil, fmt.Errorf("failed to scan created setting: %w", err)
}
return &createdSetting, nil
}
return nil, fmt.Errorf("failed to create security setting: no rows returned")
}
// UpdateSetting updates an existing security setting
func (q *SecuritySettingsQueries) UpdateSetting(category, key string, value interface{}, updatedBy *uuid.UUID) (*models.SecuritySetting, *string, error) {
// Get the old value first
oldSetting, err := q.GetSetting(category, key)
if err != nil {
return nil, nil, fmt.Errorf("failed to get old setting: %w", err)
}
if oldSetting == nil {
return nil, nil, fmt.Errorf("setting not found")
}
var oldValue *string
if oldSetting != nil {
oldValue = &oldSetting.Value
}
// Convert new value to JSON string
valueJSON, err := json.Marshal(value)
if err != nil {
return nil, oldValue, fmt.Errorf("failed to marshal setting value: %w", err)
}
now := time.Now().UTC()
query := `
UPDATE security_settings
SET value = $1, updated_at = $2, updated_by = $3
WHERE category = $4 AND key = $5
RETURNING id, category, key, value, is_encrypted, created_at, updated_at, created_by, updated_by
`
var updatedSetting models.SecuritySetting
err = q.db.QueryRow(query, string(valueJSON), now, updatedBy, category, key).Scan(
&updatedSetting.ID,
&updatedSetting.Category,
&updatedSetting.Key,
&updatedSetting.Value,
&updatedSetting.IsEncrypted,
&updatedSetting.CreatedAt,
&updatedSetting.UpdatedAt,
&updatedSetting.CreatedBy,
&updatedSetting.UpdatedBy,
)
if err != nil {
return nil, oldValue, fmt.Errorf("failed to update security setting: %w", err)
}
return &updatedSetting, oldValue, nil
}
// DeleteSetting deletes a security setting
func (q *SecuritySettingsQueries) DeleteSetting(category, key string) (*string, error) {
// Get the old value first
oldSetting, err := q.GetSetting(category, key)
if err != nil {
return nil, fmt.Errorf("failed to get old setting: %w", err)
}
if oldSetting == nil {
return nil, nil
}
query := `
DELETE FROM security_settings
WHERE category = $1 AND key = $2
RETURNING value
`
var oldValue string
err = q.db.QueryRow(query, category, key).Scan(&oldValue)
if err != nil {
if err == sql.ErrNoRows {
return nil, nil
}
return nil, fmt.Errorf("failed to delete security setting: %w", err)
}
return &oldValue, nil
}
// CreateAuditLog creates an audit log entry for setting changes
func (q *SecuritySettingsQueries) CreateAuditLog(settingID, userID uuid.UUID, action, oldValue, newValue, reason string) error {
audit := &models.SecuritySettingAudit{
ID: uuid.New(),
SettingID: settingID,
UserID: userID,
Action: action,
OldValue: &oldValue,
NewValue: &newValue,
Reason: reason,
CreatedAt: time.Now().UTC(),
}
// Handle null values for old/new values
if oldValue == "" {
audit.OldValue = nil
}
if newValue == "" {
audit.NewValue = nil
}
query := `
INSERT INTO security_setting_audit (
id, setting_id, user_id, action, old_value, new_value, reason, created_at
) VALUES (
:id, :setting_id, :user_id, :action, :old_value, :new_value, :reason, :created_at
)
`
_, err := q.db.NamedExec(query, audit)
if err != nil {
return fmt.Errorf("failed to create audit log: %w", err)
}
return nil
}
// GetAuditLogs retrieves audit logs for a setting
func (q *SecuritySettingsQueries) GetAuditLogs(category, key string, limit int) ([]models.SecuritySettingAudit, error) {
query := `
SELECT sa.id, sa.setting_id, sa.user_id, sa.action, sa.old_value, sa.new_value, sa.reason, sa.created_at
FROM security_setting_audit sa
INNER JOIN security_settings s ON sa.setting_id = s.id
WHERE s.category = $1 AND s.key = $2
ORDER BY sa.created_at DESC
LIMIT $3
`
var audits []models.SecuritySettingAudit
err := q.db.Select(&audits, query, category, key, limit)
if err != nil {
return nil, fmt.Errorf("failed to get audit logs: %w", err)
}
return audits, nil
}

View File

@@ -0,0 +1,167 @@
package queries
import (
"context"
"database/sql"
"fmt"
"time"
"github.com/Fimeg/RedFlag/aggregator-server/internal/models"
"github.com/google/uuid"
"github.com/lib/pq"
)
// StorageMetricsQueries handles storage metrics database operations
type StorageMetricsQueries struct {
db *sql.DB
}
// NewStorageMetricsQueries creates a new storage metrics queries instance
func NewStorageMetricsQueries(db *sql.DB) *StorageMetricsQueries {
return &StorageMetricsQueries{db: db}
}
// InsertStorageMetric inserts a new storage metric
func (q *StorageMetricsQueries) InsertStorageMetric(ctx context.Context, metric models.StorageMetric) error {
query := `
INSERT INTO storage_metrics (
id, agent_id, mountpoint, device, disk_type, filesystem,
total_bytes, used_bytes, available_bytes, used_percent,
severity, metadata, created_at
) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13)
`
_, err := q.db.ExecContext(ctx, query,
metric.ID, metric.AgentID, metric.Mountpoint, metric.Device,
metric.DiskType, metric.Filesystem, metric.TotalBytes,
metric.UsedBytes, metric.AvailableBytes, metric.UsedPercent,
metric.Severity, pq.Array(metric.Metadata), metric.CreatedAt,
)
if err != nil {
return fmt.Errorf("failed to insert storage metric: %w", err)
}
return nil
}
// GetStorageMetricsByAgentID retrieves storage metrics for an agent
func (q *StorageMetricsQueries) GetStorageMetricsByAgentID(ctx context.Context, agentID uuid.UUID, limit, offset int) ([]models.StorageMetric, error) {
query := `
SELECT id, agent_id, mountpoint, device, disk_type, filesystem,
total_bytes, used_bytes, available_bytes, used_percent,
severity, metadata, created_at
FROM storage_metrics
WHERE agent_id = $1
ORDER BY created_at DESC
LIMIT $2 OFFSET $3
`
rows, err := q.db.QueryContext(ctx, query, agentID, limit, offset)
if err != nil {
return nil, fmt.Errorf("failed to query storage metrics: %w", err)
}
defer rows.Close()
var metrics []models.StorageMetric
for rows.Next() {
var metric models.StorageMetric
var metadataMap map[string]interface{}
err := rows.Scan(
&metric.ID, &metric.AgentID, &metric.Mountpoint, &metric.Device,
&metric.DiskType, &metric.Filesystem, &metric.TotalBytes,
&metric.UsedBytes, &metric.AvailableBytes, &metric.UsedPercent,
&metric.Severity, &metadataMap, &metric.CreatedAt,
)
if err != nil {
return nil, fmt.Errorf("failed to scan storage metric: %w", err)
}
metric.Metadata = metadataMap
metrics = append(metrics, metric)
}
if err := rows.Err(); err != nil {
return nil, fmt.Errorf("error iterating storage metrics: %w", err)
}
return metrics, nil
}
// GetLatestStorageMetrics retrieves the most recent storage metrics per mountpoint
func (q *StorageMetricsQueries) GetLatestStorageMetrics(ctx context.Context, agentID uuid.UUID) ([]models.StorageMetric, error) {
query := `
SELECT DISTINCT ON (mountpoint)
id, agent_id, mountpoint, device, disk_type, filesystem,
total_bytes, used_bytes, available_bytes, used_percent,
severity, metadata, created_at
FROM storage_metrics
WHERE agent_id = $1
ORDER BY mountpoint, created_at DESC
`
rows, err := q.db.QueryContext(ctx, query, agentID)
if err != nil {
return nil, fmt.Errorf("failed to query latest storage metrics: %w", err)
}
defer rows.Close()
var metrics []models.StorageMetric
for rows.Next() {
var metric models.StorageMetric
var metadataMap map[string]interface{}
err := rows.Scan(
&metric.ID, &metric.AgentID, &metric.Mountpoint, &metric.Device,
&metric.DiskType, &metric.Filesystem, &metric.TotalBytes,
&metric.UsedBytes, &metric.AvailableBytes, &metric.UsedPercent,
&metric.Severity, &metadataMap, &metric.CreatedAt,
)
if err != nil {
return nil, fmt.Errorf("failed to scan storage metric: %w", err)
}
metric.Metadata = metadataMap
metrics = append(metrics, metric)
}
if err := rows.Err(); err != nil {
return nil, fmt.Errorf("error iterating latest storage metrics: %w", err)
}
return metrics, nil
}
// GetStorageMetricsSummary returns summary statistics for an agent
func (q *StorageMetricsQueries) GetStorageMetricsSummary(ctx context.Context, agentID uuid.UUID) (map[string]interface{}, error) {
query := `
SELECT
COUNT(*) as total_disks,
COUNT(CASE WHEN severity = 'critical' THEN 1 END) as critical_disks,
COUNT(CASE WHEN severity = 'important' THEN 1 END) as important_disks,
AVG(used_percent) as avg_used_percent,
MAX(used_percent) as max_used_percent,
MIN(created_at) as first_collected_at,
MAX(created_at) as last_collected_at
FROM storage_metrics
WHERE agent_id = $1
AND created_at >= NOW() - INTERVAL '24 hours'
`
var summary map[string]interface{}
err := q.db.QueryRowContext(ctx, query, agentID).Scan(
&summary["total_disks"],
&summary["critical_disks"],
&summary["important_disks"],
&summary["avg_used_percent"],
&summary["max_used_percent"],
&summary["first_collected_at"],
&summary["last_collected_at"],
)
if err != nil {
return nil, fmt.Errorf("failed to get storage metrics summary: %w", err)
}
return summary, nil
}

View File

@@ -0,0 +1,40 @@
package models
import (
"time"
"github.com/google/uuid"
)
// StorageMetric represents a storage metric from an agent
type StorageMetric struct {
ID uuid.UUID `json:"id" db:"id"`
AgentID uuid.UUID `json:"agent_id" db:"agent_id"`
Mountpoint string `json:"mountpoint" db:"mountpoint"`
Device string `json:"device" db:"device"`
DiskType string `json:"disk_type" db:"disk_type"`
Filesystem string `json:"filesystem" db:"filesystem"`
TotalBytes int64 `json:"total_bytes" db:"total_bytes"`
UsedBytes int64 `json:"used_bytes" db:"used_bytes"`
AvailableBytes int64 `json:"available_bytes" db:"available_bytes"`
UsedPercent float64 `json:"used_percent" db:"used_percent"`
Severity string `json:"severity" db:"severity"`
Metadata map[string]interface{} `json:"metadata,omitempty" db:"metadata"`
CreatedAt time.Time `json:"created_at" db:"created_at"`
}
// StorageMetricRequest represents the request payload for storage metrics
type StorageMetricRequest struct {
AgentID uuid.UUID `json:"agent_id"`
CommandID string `json:"command_id"`
Timestamp time.Time `json:"timestamp"`
Metrics []StorageMetric `json:"metrics"`
}
// StorageMetricsList represents a list of storage metrics with pagination
type StorageMetricsList struct {
Metrics []StorageMetric `json:"metrics"`
Total int `json:"total"`
Page int `json:"page"`
PerPage int `json:"per_page"`
}

View File

@@ -27,27 +27,31 @@ func NewInstallTemplateService() *InstallTemplateService {
func (s *InstallTemplateService) RenderInstallScript(agent *models.Agent, binaryURL, configURL string) (string, error) {
// Define template data
data := struct {
AgentID string
BinaryURL string
ConfigURL string
Platform string
Architecture string
Version string
AgentUser string
AgentHome string
ConfigDir string
LogDir string
AgentID string
BinaryURL string
ConfigURL string
Platform string
Architecture string
Version string
AgentUser string
AgentHome string
ConfigDir string
LogDir string
AgentConfigDir string
AgentLogDir string
}{
AgentID: agent.ID.String(),
BinaryURL: binaryURL,
ConfigURL: configURL,
Platform: agent.OSType,
Architecture: agent.OSArchitecture,
Version: agent.CurrentVersion,
AgentUser: "redflag-agent",
AgentHome: "/var/lib/redflag-agent",
ConfigDir: "/etc/redflag",
LogDir: "/var/log/redflag",
AgentID: agent.ID.String(),
BinaryURL: binaryURL,
ConfigURL: configURL,
Platform: agent.OSType,
Architecture: agent.OSArchitecture,
Version: agent.CurrentVersion,
AgentUser: "redflag-agent",
AgentHome: "/var/lib/redflag/agent",
ConfigDir: "/etc/redflag",
LogDir: "/var/log/redflag",
AgentConfigDir: "/etc/redflag/agent",
AgentLogDir: "/var/log/redflag/agent",
}
// Choose template based on platform
@@ -102,6 +106,8 @@ func (s *InstallTemplateService) RenderInstallScriptFromBuild(
AgentHome string
ConfigDir string
LogDir string
AgentConfigDir string
AgentLogDir string
}{
AgentID: agentID,
BinaryURL: binaryURL,
@@ -112,9 +118,11 @@ func (s *InstallTemplateService) RenderInstallScriptFromBuild(
ServerURL: serverURL,
RegistrationToken: registrationToken,
AgentUser: "redflag-agent",
AgentHome: "/var/lib/redflag-agent",
AgentHome: "/var/lib/redflag/agent",
ConfigDir: "/etc/redflag",
LogDir: "/var/log/redflag",
AgentConfigDir: "/etc/redflag/agent",
AgentLogDir: "/var/log/redflag/agent",
}
templateName := "templates/install/scripts/linux.sh.tmpl"

View File

@@ -14,7 +14,11 @@ if [ "$EUID" -ne 0 ]; then
fi
AGENT_USER="redflag-agent"
AGENT_HOME="/var/lib/redflag-agent"
BASE_DIR="/var/lib/redflag"
CONFIG_DIR="/etc/redflag"
AGENT_CONFIG_DIR="/etc/redflag/agent"
LOG_DIR="/var/log/redflag"
AGENT_LOG_DIR="/var/log/redflag/agent"
SUDOERS_FILE="/etc/sudoers.d/redflag-agent"
# Function to detect package manager
@@ -45,7 +49,7 @@ VERSION="{{.Version}}"
LOG_DIR="/var/log/redflag"
BACKUP_DIR="${CONFIG_DIR}/backups/backup.$(date +%s)"
AGENT_USER="redflag-agent"
AGENT_HOME="/var/lib/redflag-agent"
AGENT_HOME="{{.AgentHome}}"
SUDOERS_FILE="/etc/sudoers.d/redflag-agent"
echo "=== RedFlag Agent v${VERSION} Installation ==="
@@ -99,12 +103,29 @@ else
echo "✓ User $AGENT_USER created"
fi
# Create home directory
# Create home directory structure
if [ ! -d "$AGENT_HOME" ]; then
# Create nested directory structure
sudo mkdir -p "$BASE_DIR"
sudo mkdir -p "$AGENT_HOME"
sudo chown "$AGENT_USER:$AGENT_USER" "$AGENT_HOME"
sudo mkdir -p "$AGENT_HOME/cache"
sudo mkdir -p "$AGENT_HOME/state"
sudo mkdir -p "$AGENT_CONFIG_DIR"
sudo mkdir -p "$AGENT_LOG_DIR"
# Set ownership and permissions
sudo chown -R "$AGENT_USER:$AGENT_USER" "$BASE_DIR"
sudo chmod 750 "$BASE_DIR"
sudo chmod 750 "$AGENT_HOME"
echo "✓ Home directory created at $AGENT_HOME"
sudo chmod 750 "$AGENT_HOME/cache"
sudo chmod 750 "$AGENT_HOME/state"
sudo chmod 755 "$AGENT_CONFIG_DIR"
sudo chmod 755 "$AGENT_LOG_DIR"
echo "✓ Agent directory structure created:"
echo " - Agent home: $AGENT_HOME"
echo " - Config: $AGENT_CONFIG_DIR"
echo " - Logs: $AGENT_LOG_DIR"
fi
# Step 4: Install sudoers configuration with OS-specific commands
@@ -173,10 +194,10 @@ fi
# Step 4: Create directories
echo "Creating directories..."
sudo mkdir -p "${CONFIG_DIR}"
sudo mkdir -p "${CONFIG_DIR}/backups"
sudo mkdir -p "${AGENT_CONFIG_DIR}"
sudo mkdir -p "${CONFIG_DIR}/backups" # Legacy backup location
sudo mkdir -p "$AGENT_HOME"
sudo mkdir -p "/var/log/redflag"
sudo mkdir -p "$AGENT_LOG_DIR"
# Step 5: Download agent binary
echo "Downloading agent binary..."
@@ -186,14 +207,14 @@ sudo chmod +x "${INSTALL_DIR}/${SERVICE_NAME}"
# Step 6: Handle configuration
# IMPORTANT: The agent handles its own migration on first start.
# We either preserve existing config OR create a minimal template.
if [ -f "${CONFIG_DIR}/config.json" ]; then
if [ -f "${AGENT_CONFIG_DIR}/config.json" ]; then
echo "[CONFIG] Upgrade detected - preserving existing configuration"
echo "[CONFIG] Agent will handle migration automatically on first start"
echo "[CONFIG] Backup created at: ${BACKUP_DIR}"
else
echo "[CONFIG] Fresh install - generating minimal configuration with registration token"
# Create minimal config template - agent will populate missing fields on first start
sudo tee "${CONFIG_DIR}/config.json" > /dev/null <<EOF
sudo tee "${AGENT_CONFIG_DIR}/config.json" > /dev/null <<EOF
{
"version": 5,
"agent_version": "${VERSION}",
@@ -241,7 +262,7 @@ EOF
fi
# Step 7: Set permissions on config file
sudo chmod 600 "${CONFIG_DIR}/config.json"
sudo chmod 600 "${AGENT_CONFIG_DIR}/config.json"
# Step 8: Create systemd service with security hardening
echo "Creating systemd service with security configuration..."
@@ -266,7 +287,7 @@ RestartPreventExitStatus=255
# Note: NoNewPrivileges disabled to allow sudo for package management
ProtectSystem=strict
ProtectHome=true
ReadWritePaths={{.AgentHome}} {{.ConfigDir}} {{.LogDir}}
ReadWritePaths={{.AgentHome}} {{.AgentHome}}/cache {{.AgentHome}}/state {{.AgentHome}}/migration_backups {{.AgentConfigDir}} {{.AgentLogDir}}
PrivateTmp=true
ProtectKernelTunables=true
ProtectKernelModules=true
@@ -286,13 +307,36 @@ EOF
# Set proper permissions on directories
echo "Setting directory permissions..."
sudo chown -R {{.AgentUser}}:{{.AgentUser}} "{{.ConfigDir}}"
sudo chown {{.AgentUser}}:{{.AgentUser}} "{{.ConfigDir}}/config.json"
sudo chmod 600 "{{.ConfigDir}}/config.json"
sudo chown -R {{.AgentUser}}:{{.AgentUser}} "{{.AgentConfigDir}}"
sudo chown {{.AgentUser}}:{{.AgentUser}} "{{.AgentConfigDir}}/config.json"
sudo chmod 600 "{{.AgentConfigDir}}/config.json"
sudo chown -R {{.AgentUser}}:{{.AgentUser}} "{{.AgentHome}}"
sudo chmod 750 "{{.AgentHome}}"
sudo chown -R {{.AgentUser}}:{{.AgentUser}} "{{.LogDir}}"
sudo chmod 750 "{{.LogDir}}"
sudo chown -R {{.AgentUser}}:{{.AgentUser}} "{{.AgentLogDir}}"
sudo chmod 750 "{{.AgentLogDir}}"
# Register agent with server (if token provided)
if [ -n "{{.RegistrationToken}}" ]; then
echo "[INFO] [installer] [register] Registering agent with server..."
if sudo -u "{{.AgentUser}}" "${INSTALL_DIR}/${SERVICE_NAME}" --server "{{.ServerURL}}" --token "{{.RegistrationToken}}" --register; then
echo "[SUCCESS] [installer] [register] Agent registered successfully"
echo "[INFO] [installer] [register] Agent ID assigned, configuration updated"
else
echo "[ERROR] [installer] [register] Registration failed - check token validity and server connectivity"
echo "[WARN] [installer] [register] Agent installed but not registered. Service will not start."
echo ""
echo "[INFO] [installer] [register] To retry registration manually:"
echo "[INFO] [installer] [register] sudo -u {{.AgentUser}} ${INSTALL_DIR}/${SERVICE_NAME} --server {{.ServerURL}} --token YOUR_TOKEN --register"
echo "[INFO] [installer] [register] Then start service:"
echo "[INFO] [installer] [register] sudo systemctl start ${SERVICE_NAME}"
exit 1
fi
else
echo "[INFO] [installer] [register] No registration token provided - skipping registration"
echo "[INFO] [installer] [register] Service will start but agent will exit until registered"
echo "[INFO] [installer] [register] To register manually:"
echo "[INFO] [installer] [register] sudo -u {{.AgentUser}} ${INSTALL_DIR}/${SERVICE_NAME} --server {{.ServerURL}} --token YOUR_TOKEN --register"
fi
# Step 9: Enable and start service
echo "Enabling and starting service..."

View File

@@ -198,6 +198,31 @@ if (Test-Path $ConfigPath) {
Write-Host "Setting file permissions..." -ForegroundColor Yellow
icacls $ConfigPath /inheritance:r /grant:r "SYSTEM:(OI)(CI)F" /grant:r "Administrators:(OI)(CI)F" | Out-Null
# Register agent with server (if token provided)
if ("{{.RegistrationToken}}" -ne "") {
Write-Host "[INFO] [installer] [register] Registering agent with server..." -ForegroundColor Cyan
$AgentBinary = if ($AgentPath) { "$AgentPath" } else { "$AgentDir\redflag-agent.exe" }
$RegisterProcess = Start-Process -FilePath $AgentBinary -ArgumentList "--server", "{{.ServerURL}}", "--token", "{{.RegistrationToken}}", "--register" -Wait -PassThru -NoNewWindow
if ($RegisterProcess.ExitCode -eq 0) {
Write-Host "[SUCCESS] [installer] [register] Agent registered successfully" -ForegroundColor Green
Write-Host "[INFO] [installer] [register] Agent ID assigned, configuration updated" -ForegroundColor Gray
} else {
Write-Host "[ERROR] [installer] [register] Registration failed - check token validity and server connectivity" -ForegroundColor Red
Write-Host "[WARN] [installer] [register] Agent installed but not registered. Service will not start." -ForegroundColor Yellow
Write-Host ""
Write-Host "[INFO] [installer] [register] To retry registration manually:" -ForegroundColor Gray
Write-Host "[INFO] [installer] [register] $AgentBinary --server {{.ServerURL}} --token YOUR_TOKEN --register" -ForegroundColor Gray
Write-Host "[INFO] [installer] [register] Then start service:" -ForegroundColor Gray
Write-Host "[INFO] [installer] [register] Start-Service -Name $ServiceName" -ForegroundColor Gray
exit 1
}
} else {
Write-Host "[INFO] [installer] [register] No registration token provided - skipping registration" -ForegroundColor Gray
Write-Host "[INFO] [installer] [register] Service will start but agent will exit until registered" -ForegroundColor Gray
Write-Host "[INFO] [installer] [register] To register manually:" -ForegroundColor Gray
Write-Host "[INFO] [installer] [register] $AgentBinary --server {{.ServerURL}} --token YOUR_TOKEN --register" -ForegroundColor Gray
}
# Step 6: Install Windows service (if not skipped)
if (-not $SkipServiceInstall) {
Write-Host "Creating Windows service..." -ForegroundColor Yellow

View File

@@ -45,8 +45,8 @@ interface StorageMetrics {
export function AgentStorage({ agentId }: AgentStorageProps) {
const [isScanning, setIsScanning] = useState(false);
// Fetch agent's latest system info with enhanced disk data
const { data: agentData, refetch: refetchAgent } = useQuery({
// Fetch agent details and storage metrics
const { data: agentData } = useQuery({
queryKey: ['agent', agentId],
queryFn: async () => {
return await agentApi.getAgent(agentId);
@@ -54,6 +54,15 @@ export function AgentStorage({ agentId }: AgentStorageProps) {
refetchInterval: 30000, // Refresh every 30 seconds
});
// Fetch storage metrics from dedicated endpoint
const { data: storageData, refetch: refetchStorage } = useQuery({
queryKey: ['storage-metrics', agentId],
queryFn: async () => {
return await storageMetricsApi.getStorageMetrics(agentId);
},
refetchInterval: 30000, // Refresh every 30 seconds
});
const handleFullStorageScan = async () => {
setIsScanning(true);
try {
@@ -72,28 +81,49 @@ export function AgentStorage({ agentId }: AgentStorageProps) {
}
};
// Extract storage metrics from agent metadata
const storageMetrics: StorageMetrics | null = agentData ? {
cpu_percent: 0,
memory_percent: agentData.metadata?.memory_percent || 0,
memory_used_gb: agentData.metadata?.memory_used_gb || 0,
memory_total_gb: agentData.metadata?.memory_total_gb || 0,
disk_used_gb: agentData.metadata?.disk_used_gb || 0,
disk_total_gb: agentData.metadata?.disk_total_gb || 0,
disk_percent: agentData.metadata?.disk_percent || 0,
largest_disk_used_gb: agentData.metadata?.largest_disk_used_gb || 0,
largest_disk_total_gb: agentData.metadata?.largest_disk_total_gb || 0,
largest_disk_percent: agentData.metadata?.largest_disk_percent || 0,
largest_disk_mount: agentData.metadata?.largest_disk_mount || '',
uptime: agentData.metadata?.uptime || '',
} : null;
// Process storage metrics data
const storageMetrics: StorageMetrics | null = useMemo(() => {
if (!storageData?.metrics || storageData.metrics.length === 0) {
return null;
}
// Parse disk info from system information if available
// Find root disk for summary metrics
const rootDisk = storageData.metrics.find((m: any) => m.is_root) || storageData.metrics[0];
const largestDisk = storageData.metrics.find((m: any) => m.is_largest) || rootDisk;
return {
cpu_percent: 0, // CPU not included in storage metrics, comes from system metrics
memory_percent: 0, // Memory not included in storage metrics, comes from system metrics
memory_used_gb: 0,
memory_total_gb: 0,
disk_used_gb: largestDisk ? largestDisk.used_bytes / (1024 * 1024 * 1024) : 0,
disk_total_gb: largestDisk ? largestDisk.total_bytes / (1024 * 1024 * 1024) : 0,
disk_percent: largestDisk ? largestDisk.used_percent : 0,
largest_disk_used_gb: largestDisk ? largestDisk.used_bytes / (1024 * 1024 * 1024) : 0,
largest_disk_total_gb: largestDisk ? largestDisk.total_bytes / (1024 * 1024 * 1024) : 0,
largest_disk_percent: largestDisk ? largestDisk.used_percent : 0,
largest_disk_mount: largestDisk ? largestDisk.mountpoint : '',
uptime: '', // Uptime not included in storage metrics
};
}, [storageData]);
// Parse disk info from storage metrics
const parseDiskInfo = (): DiskInfo[] => {
const systemInfo = agentData?.system_info;
if (!systemInfo?.disk_info) return [];
if (!storageData?.metrics) return [];
return systemInfo.disk_info.map((disk: any) => ({
return storageData.metrics.map((disk: any) => ({
mountpoint: disk.mountpoint,
device: disk.device,
disk_type: disk.disk_type,
total: disk.total_bytes,
available: disk.available_bytes,
used: disk.used_bytes,
used_percent: disk.used_percent,
filesystem: disk.filesystem,
is_root: disk.is_root || false,
is_largest: disk.is_largest || false,
}));
};
mountpoint: disk.mountpoint,
total: disk.total,
available: disk.available,
@@ -170,29 +200,103 @@ export function AgentStorage({ agentId }: AgentStorageProps) {
</div>
)}
{/* All Disks from system_info.disk_info - BLUE matching Overview */}
{disks.length > 0 && disks.map((disk, index) => (
<div key={index}>
<div className="flex items-center justify-between">
<p className="text-sm text-gray-600 flex items-center">
<HardDrive className="h-4 w-4 mr-1" />
Disk ({disk.mountpoint})
</p>
<p className="text-sm font-medium text-gray-900">
{formatBytes(disk.used)} / {formatBytes(disk.total)}
</p>
</div>
<div className="w-full bg-gray-200 rounded-full h-2 mt-1">
<div
className="bg-blue-600 h-2 rounded-full transition-all"
style={{ width: `${Math.min(disk.used_percent, 100)}%` }}
/>
</div>
<p className="text-xs text-gray-500 mt-1">
{disk.used_percent.toFixed(0)}% used
</p>
{/* Quick Overview - Simple disk bars for at-a-glance view */}
{disks.length > 0 && (
<div className="space-y-3">
<h3 className="text-sm font-medium text-gray-900">Disk Usage (Overview)</h3>
{disks.map((disk, index) => (
<div key={`overview-${index}`}>
<div className="flex items-center justify-between">
<p className="text-sm text-gray-600 flex items-center">
<HardDrive className="h-4 w-4 mr-1" />
{disk.mountpoint} ({disk.filesystem})
</p>
<p className="text-sm font-medium text-gray-900">
{formatBytes(disk.used)} / {formatBytes(disk.total)} ({disk.used_percent.toFixed(0)}%)
</p>
</div>
<div className="w-full bg-gray-200 rounded-full h-2 mt-1">
<div
className="bg-blue-600 h-2 rounded-full transition-all"
style={{ width: `${Math.min(disk.used_percent, 100)}%` }}
/>
</div>
</div>
))}
</div>
))}
)}
{/* Enhanced Disk Table - Shows all partitions with full details */}
{disks.length > 0 && (
<div className="overflow-hidden">
<div className="flex items-center justify-between mb-3">
<h3 className="text-sm font-medium text-gray-900">Disk Partitions (Detailed)</h3>
<span className="text-xs text-gray-500">{disks.length} {disks.length === 1 ? 'partition' : 'partitions'} detected</span>
</div>
<div className="overflow-x-auto">
<table className="min-w-full text-sm">
<thead className="bg-gray-50">
<tr className="border-b border-gray-200">
<th className="text-left py-2 px-3 font-medium text-gray-700">Mount</th>
<th className="text-left py-2 px-3 font-medium text-gray-700">Device</th>
<th className="text-left py-2 px-3 font-medium text-gray-700">Type</th>
<th className="text-left py-2 px-3 font-medium text-gray-700">FS</th>
<th className="text-right py-2 px-3 font-medium text-gray-700">Size</th>
<th className="text-right py-2 px-3 font-medium text-gray-700">Used</th>
<th className="text-center py-2 px-3 font-medium text-gray-700">%</th>
<th className="text-center py-2 px-3 font-medium text-gray-700">Flags</th>
</tr>
</thead>
<tbody className="divide-y divide-gray-100">
{disks.map((disk, index) => (
<tr key={index} className="hover:bg-gray-50">
<td className="py-2 px-3 text-gray-900">
<div className="flex items-center">
<HardDrive className="h-3 w-3 mr-1 text-gray-400" />
<span className="font-medium text-xs">{disk.mountpoint}</span>
{disk.is_root && <span className="ml-1 text-[10px] text-blue-600 bg-blue-100 px-1 rounded">ROOT</span>}
{disk.is_largest && <span className="ml-1 text-[10px] text-purple-600 bg-purple-100 px-1 rounded">LARGEST</span>}
</div>
</td>
<td className="py-2 px-3 text-gray-700 text-xs font-mono">{disk.device}</td>
<td className="py-2 px-3 text-gray-700 text-xs capitalize">{disk.disk_type.toLowerCase()}</td>
<td className="py-2 px-3 text-gray-700 text-xs font-mono">{disk.filesystem}</td>
<td className="py-2 px-3 text-gray-900 text-xs text-right">{formatBytes(disk.total)}</td>
<td className="py-2 px-3 text-gray-900 text-xs text-right">{formatBytes(disk.used)}</td>
<td className="py-2 px-3 text-center">
<div className="inline-flex items-center">
<div className="w-12 bg-gray-200 rounded-full h-1.5 mr-2">
<div
className="bg-blue-600 h-1.5 rounded-full"
style={{ width: `${Math.min(disk.used_percent, 100)}%` }}
/>
</div>
<span className="text-xs font-medium">{disk.used_percent.toFixed(0)}%</span>
</div>
</td>
<td className="py-2 px-3 text-center text-xs text-gray-500">
{disk.severity !== 'low' && (
<span className={cn(
disk.severity === 'critical' ? 'text-red-600' :
disk.severity === 'important' ? 'text-amber-600' :
'text-yellow-600'
)}>
{disk.severity.toUpperCase()}
</span>
)}
</td>
</tr>
))}
</tbody>
</table>
</div>
<div className="mt-3 text-xs text-gray-500">
Showing {disks.length} disk partitions Auto-refreshes every 30 seconds
</div>
</div>
)}
{/* Fallback if no disk array but we have metadata */}
{disks.length === 0 && storageMetrics && storageMetrics.disk_total_gb > 0 && (

View File

@@ -862,4 +862,18 @@ export const securityApi = {
},
};
// Storage Metrics API
export const storageMetricsApi = {
// Report storage metrics (agent only)
async reportStorageMetrics(agentID: string, data: any): Promise<void> {
await api.post(`/agents/${agentID}/storage-metrics`, data);
},
// Get storage metrics for an agent
async getStorageMetrics(agentID: string): Promise<any> {
const response = await api.get(`/agents/${agentID}/storage-metrics`);
return response.data;
},
};
export default api;

809
config_builder.go.restored Normal file
View File

@@ -0,0 +1,809 @@
package services
import (
"crypto/rand"
"encoding/hex"
"encoding/json"
"fmt"
"net/http"
"time"
"github.com/Fimeg/RedFlag/aggregator-server/internal/database/queries"
"github.com/google/uuid"
)
// AgentTemplate defines a template for different agent types
type AgentTemplate struct {
Name string `json:"name"`
Description string `json:"description"`
BaseConfig map[string]interface{} `json:"base_config"`
Secrets []string `json:"required_secrets"`
Validation ValidationRules `json:"validation"`
}
// ValidationRules defines validation rules for configuration
type ValidationRules struct {
RequiredFields []string `json:"required_fields"`
AllowedValues map[string][]string `json:"allowed_values"`
Patterns map[string]string `json:"patterns"`
Constraints map[string]interface{} `json:"constraints"`
}
// PublicKeyResponse represents the server's public key response
type PublicKeyResponse struct {
PublicKey string `json:"public_key"`
Fingerprint string `json:"fingerprint"`
Algorithm string `json:"algorithm"`
KeySize int `json:"key_size"`
}
// ConfigBuilder handles dynamic agent configuration generation
type ConfigBuilder struct {
serverURL string
templates map[string]AgentTemplate
httpClient *http.Client
publicKeyCache map[string]string
scannerConfigQ *queries.ScannerConfigQueries
}
// NewConfigBuilder creates a new configuration builder
func NewConfigBuilder(serverURL string, db queries.DBInterface) *ConfigBuilder {
return &ConfigBuilder{
serverURL: serverURL,
templates: getAgentTemplates(),
httpClient: &http.Client{
Timeout: 30 * time.Second,
},
publicKeyCache: make(map[string]string),
scannerConfigQ: queries.NewScannerConfigQueries(db),
}
}
// AgentSetupRequest represents a request to set up a new agent
type AgentSetupRequest struct {
ServerURL string `json:"server_url" binding:"required"`
Environment string `json:"environment" binding:"required"`
AgentType string `json:"agent_type" binding:"required,oneof=linux-server windows-workstation docker-host"`
Organization string `json:"organization" binding:"required"`
CustomSettings map[string]interface{} `json:"custom_settings,omitempty"`
DeploymentID string `json:"deployment_id,omitempty"`
AgentID string `json:"agent_id,omitempty"` // Optional: existing agent ID for upgrades
}
// BuildAgentConfig builds a complete agent configuration
func (cb *ConfigBuilder) BuildAgentConfig(req AgentSetupRequest) (*AgentConfiguration, error) {
// Validate request
if err := cb.validateRequest(req); err != nil {
return nil, err
}
// Determine agent ID - use existing if provided and valid, otherwise generate new
agentID := cb.determineAgentID(req.AgentID)
// Fetch server public key
serverPublicKey, err := cb.fetchServerPublicKey(req.ServerURL)
if err != nil {
return nil, fmt.Errorf("failed to fetch server public key: %w", err)
}
// Generate registration token
registrationToken, err := cb.generateRegistrationToken(agentID)
if err != nil {
return nil, fmt.Errorf("failed to generate registration token: %w", err)
}
// Get template
template, exists := cb.templates[req.AgentType]
if !exists {
return nil, fmt.Errorf("unknown agent type: %s", req.AgentType)
}
// Build base configuration
config := cb.buildFromTemplate(template, req.CustomSettings)
// Override scanner timeouts from database (user-configurable)
cb.overrideScannerTimeoutsFromDB(config)
// Inject deployment-specific values
cb.injectDeploymentValues(config, req, agentID, registrationToken, serverPublicKey)
// Apply environment-specific defaults
cb.applyEnvironmentDefaults(config, req.Environment)
// Validate request
if err := cb.validateRequest(req); err != nil {
return nil, err
}
// Determine agent ID - use existing if provided and valid, otherwise generate new
agentID := cb.determineAgentID(req.AgentID)
// Fetch server public key
serverPublicKey, err := cb.fetchServerPublicKey(req.ServerURL)
if err != nil {
return nil, fmt.Errorf("failed to fetch server public key: %w", err)
}
// Generate registration token
registrationToken, err := cb.generateRegistrationToken(agentID)
if err != nil {
return nil, fmt.Errorf("failed to generate registration token: %w", err)
}
// Get template
template, exists := cb.templates[req.AgentType]
if !exists {
return nil, fmt.Errorf("unknown agent type: %s", req.AgentType)
}
// Build base configuration
config := cb.buildFromTemplate(template, req.CustomSettings)
// Inject deployment-specific values
cb.injectDeploymentValues(config, req, agentID, registrationToken, serverPublicKey)
// Apply environment-specific defaults
cb.applyEnvironmentDefaults(config, req.Environment)
// Validate final configuration
if err := cb.validateConfiguration(config, template); err != nil {
return nil, fmt.Errorf("configuration validation failed: %w", err)
}
// Separate sensitive and non-sensitive data
publicConfig, secrets := cb.separateSecrets(config)
// Create Docker secrets if needed
var secretsCreated bool
var secretsPath string
if len(secrets) > 0 {
secretsManager := NewSecretsManager()
// Generate encryption key if not set
if secretsManager.GetEncryptionKey() == "" {
key, err := secretsManager.GenerateEncryptionKey()
if err != nil {
return nil, fmt.Errorf("failed to generate encryption key: %w", err)
}
secretsManager.SetEncryptionKey(key)
}
// Create Docker secrets
if err := secretsManager.CreateDockerSecrets(secrets); err != nil {
return nil, fmt.Errorf("failed to create Docker secrets: %w", err)
}
secretsCreated = true
secretsPath = secretsManager.GetSecretsPath()
}
// Determine platform from agent type
platform := "linux-amd64" // Default
if req.AgentType == "windows-workstation" {
platform = "windows-amd64"
}
return &AgentConfiguration{
AgentID: agentID,
PublicConfig: publicConfig,
Secrets: secrets,
Template: req.AgentType,
Environment: req.Environment,
ServerURL: req.ServerURL,
Organization: req.Organization,
Platform: platform,
ConfigVersion: "5", // Config schema version
AgentVersion: "0.1.23.6", // Agent binary version
BuildTime: time.Now(),
SecretsCreated: secretsCreated,
SecretsPath: secretsPath,
}, nil
}
// AgentConfiguration represents a complete agent configuration
type AgentConfiguration struct {
AgentID string `json:"agent_id"`
PublicConfig map[string]interface{} `json:"public_config"`
Secrets map[string]string `json:"secrets"`
Template string `json:"template"`
Environment string `json:"environment"`
ServerURL string `json:"server_url"`
Organization string `json:"organization"`
Platform string `json:"platform"`
ConfigVersion string `json:"config_version"` // Config schema version (e.g., "5")
AgentVersion string `json:"agent_version"` // Agent binary version (e.g., "0.1.23.6")
BuildTime time.Time `json:"build_time"`
SecretsCreated bool `json:"secrets_created"`
SecretsPath string `json:"secrets_path,omitempty"`
}
// validateRequest validates the setup request
func (cb *ConfigBuilder) validateRequest(req AgentSetupRequest) error {
if req.ServerURL == "" {
return fmt.Errorf("server_url is required")
}
if req.Environment == "" {
return fmt.Errorf("environment is required")
}
if req.AgentType == "" {
return fmt.Errorf("agent_type is required")
}
if req.Organization == "" {
return fmt.Errorf("organization is required")
}
// Check if agent type exists
if _, exists := cb.templates[req.AgentType]; !exists {
return fmt.Errorf("unknown agent type: %s", req.AgentType)
}
return nil
}
// fetchServerPublicKey fetches the server's public key with caching
func (cb *ConfigBuilder) fetchServerPublicKey(serverURL string) (string, error) {
// Check cache first
if cached, exists := cb.publicKeyCache[serverURL]; exists {
return cached, nil
}
// Fetch from server
resp, err := cb.httpClient.Get(serverURL + "/api/v1/public-key")
if err != nil {
return "", fmt.Errorf("failed to fetch public key: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return "", fmt.Errorf("server returned status %d", resp.StatusCode)
}
var keyResp PublicKeyResponse
if err := json.NewDecoder(resp.Body).Decode(&keyResp); err != nil {
return "", fmt.Errorf("failed to decode public key response: %w", err)
}
// Cache the key
cb.publicKeyCache[serverURL] = keyResp.PublicKey
return keyResp.PublicKey, nil
}
// generateRegistrationToken generates a secure registration token
func (cb *ConfigBuilder) generateRegistrationToken(agentID string) (string, error) {
bytes := make([]byte, 32)
if _, err := rand.Read(bytes); err != nil {
return "", err
}
// Combine agent ID with random bytes for uniqueness
data := append([]byte(agentID), bytes...)
token := hex.EncodeToString(data)
// Ensure token doesn't exceed reasonable length
if len(token) > 128 {
token = token[:128]
}
return token, nil
}
// buildFromTemplate builds configuration from template
func (cb *ConfigBuilder) buildFromTemplate(template AgentTemplate, customSettings map[string]interface{}) map[string]interface{} {
config := make(map[string]interface{})
// Deep copy base configuration
for k, v := range template.BaseConfig {
config[k] = cb.deepCopy(v)
}
// Apply custom settings
if customSettings != nil {
cb.mergeSettings(config, customSettings)
}
return config
}
// injectDeploymentValues injects deployment-specific values into configuration
func (cb *ConfigBuilder) injectDeploymentValues(config map[string]interface{}, req AgentSetupRequest, agentID, registrationToken, serverPublicKey string) {
config["version"] = "5" // Config schema version (for migration system)
config["agent_version"] = "0.1.23.6" // Agent binary version (MUST match the binary being served)
config["server_url"] = req.ServerURL
config["agent_id"] = agentID
config["registration_token"] = registrationToken
config["server_public_key"] = serverPublicKey
config["organization"] = req.Organization
config["environment"] = req.Environment
config["agent_type"] = req.AgentType
if req.DeploymentID != "" {
config["deployment_id"] = req.DeploymentID
}
}
// determineAgentID checks if an existing agent ID is provided and valid, otherwise generates new
func (cb *ConfigBuilder) determineAgentID(providedAgentID string) string {
if providedAgentID != "" {
// Validate it's a proper UUID
if _, err := uuid.Parse(providedAgentID); err == nil {
return providedAgentID
}
}
// Generate new UUID if none provided or invalid
return uuid.New().String()
}
// applyEnvironmentDefaults applies environment-specific configuration defaults
func (cb *ConfigBuilder) applyEnvironmentDefaults(config map[string]interface{}, environment string) {
environmentDefaults := map[string]interface{}{
"development": map[string]interface{}{
"logging": map[string]interface{}{
"level": "debug",
"max_size": 50,
"max_backups": 2,
"max_age": 7,
},
"check_in_interval": 60, // More frequent polling in development
},
"staging": map[string]interface{}{
"logging": map[string]interface{}{
"level": "info",
"max_size": 100,
"max_backups": 3,
"max_age": 14,
},
"check_in_interval": 180,
},
"production": map[string]interface{}{
"logging": map[string]interface{}{
"level": "warn",
"max_size": 200,
"max_backups": 5,
"max_age": 30,
},
"check_in_interval": 300, // 5 minutes for production
},
"testing": map[string]interface{}{
"logging": map[string]interface{}{
"level": "debug",
"max_size": 10,
"max_backups": 1,
"max_age": 1,
},
"check_in_interval": 30, // Very frequent for testing
},
}
if defaults, exists := environmentDefaults[environment]; exists {
if defaultsMap, ok := defaults.(map[string]interface{}); ok {
cb.mergeSettings(config, defaultsMap)
}
}
}
// validateConfiguration validates the final configuration
func (cb *ConfigBuilder) validateConfiguration(config map[string]interface{}, template AgentTemplate) error {
// Check required fields
for _, field := range template.Validation.RequiredFields {
if _, exists := config[field]; !exists {
return fmt.Errorf("required field missing: %s", field)
}
}
// Validate allowed values
for field, allowedValues := range template.Validation.AllowedValues {
if value, exists := config[field]; exists {
if strValue, ok := value.(string); ok {
if !cb.containsString(allowedValues, strValue) {
return fmt.Errorf("invalid value for %s: %s (allowed: %v)", field, strValue, allowedValues)
}
}
}
}
// Validate constraints
for field, constraint := range template.Validation.Constraints {
if value, exists := config[field]; exists {
if err := cb.validateConstraint(field, value, constraint); err != nil {
return err
}
}
}
return nil
}
// separateSecrets separates sensitive data from public configuration
func (cb *ConfigBuilder) separateSecrets(config map[string]interface{}) (map[string]interface{}, map[string]string) {
publicConfig := make(map[string]interface{})
secrets := make(map[string]string)
// Copy all values to public config initially
for k, v := range config {
publicConfig[k] = cb.deepCopy(v)
}
// Extract known sensitive fields
sensitiveFields := []string{
"registration_token",
"server_public_key",
}
for _, field := range sensitiveFields {
if value, exists := publicConfig[field]; exists {
if strValue, ok := value.(string); ok {
secrets[field] = strValue
delete(publicConfig, field)
}
}
}
// Extract nested sensitive fields
if proxy, exists := publicConfig["proxy"].(map[string]interface{}); exists {
if username, exists := proxy["username"].(string); exists && username != "" {
secrets["proxy_username"] = username
delete(proxy, "username")
}
if password, exists := proxy["password"].(string); exists && password != "" {
secrets["proxy_password"] = password
delete(proxy, "password")
}
}
if tls, exists := publicConfig["tls"].(map[string]interface{}); exists {
if certFile, exists := tls["cert_file"].(string); exists && certFile != "" {
secrets["tls_cert"] = certFile
delete(tls, "cert_file")
}
if keyFile, exists := tls["key_file"].(string); exists && keyFile != "" {
secrets["tls_key"] = keyFile
delete(tls, "key_file")
}
if caFile, exists := tls["ca_file"].(string); exists && caFile != "" {
secrets["tls_ca"] = caFile
delete(tls, "ca_file")
}
}
return publicConfig, secrets
}
// Helper functions
func (cb *ConfigBuilder) deepCopy(value interface{}) interface{} {
if m, ok := value.(map[string]interface{}); ok {
result := make(map[string]interface{})
for k, v := range m {
result[k] = cb.deepCopy(v)
}
return result
}
if s, ok := value.([]interface{}); ok {
result := make([]interface{}, len(s))
for i, v := range s {
result[i] = cb.deepCopy(v)
}
return result
}
return value
}
func (cb *ConfigBuilder) mergeSettings(target map[string]interface{}, source map[string]interface{}) {
for key, value := range source {
if existing, exists := target[key]; exists {
if existingMap, ok := existing.(map[string]interface{}); ok {
if sourceMap, ok := value.(map[string]interface{}); ok {
cb.mergeSettings(existingMap, sourceMap)
continue
}
}
}
target[key] = cb.deepCopy(value)
}
}
func (cb *ConfigBuilder) containsString(slice []string, item string) bool {
for _, s := range slice {
if s == item {
return true
}
}
return false
}
// GetTemplates returns the available agent templates
func (cb *ConfigBuilder) GetTemplates() map[string]AgentTemplate {
return getAgentTemplates()
}
// GetTemplate returns a specific agent template
func (cb *ConfigBuilder) GetTemplate(agentType string) (AgentTemplate, bool) {
template, exists := getAgentTemplates()[agentType]
return template, exists
}
func (cb *ConfigBuilder) validateConstraint(field string, value interface{}, constraint interface{}) error {
constraints, ok := constraint.(map[string]interface{})
if !ok {
return nil
}
if numValue, ok := value.(float64); ok {
if min, exists := constraints["min"].(float64); exists && numValue < min {
return fmt.Errorf("value for %s is below minimum: %f < %f", field, numValue, min)
}
if max, exists := constraints["max"].(float64); exists && numValue > max {
return fmt.Errorf("value for %s is above maximum: %f > %f", field, numValue, max)
}
}
return nil
}
// getAgentTemplates returns the available agent templates
// overrideScannerTimeoutsFromDB overrides scanner timeouts with values from database
// This allows users to configure scanner timeouts via the web UI
func (cb *ConfigBuilder) overrideScannerTimeoutsFromDB(config map[string]interface{}) {
if cb.scannerConfigQ == nil {
// No database connection, use defaults
return
}
// Get subsystems section
subsystems, exists := config["subsystems"].(map[string]interface{})
if !exists {
return
}
// List of scanners that can have configurable timeouts
scannerNames := []string{"apt", "dnf", "docker", "windows", "winget", "system", "storage", "updates"}
for _, scannerName := range scannerNames {
scannerConfig, exists := subsystems[scannerName].(map[string]interface{})
if !exists {
continue
}
// Get timeout from database
timeout := cb.scannerConfigQ.GetScannerTimeoutWithDefault(scannerName, 30*time.Minute)
scannerConfig["timeout"] = int(timeout.Nanoseconds())
}
}
func getAgentTemplates() map[string]AgentTemplate {
return map[string]AgentTemplate{
"linux-server": {
Name: "Linux Server Agent",
Description: "Optimized for Linux server deployments with package management",
BaseConfig: map[string]interface{}{
"check_in_interval": 300,
"network": map[string]interface{}{
"timeout": 30000000000,
"retry_count": 3,
"retry_delay": 5000000000,
"max_idle_conn": 10,
},
"proxy": map[string]interface{}{
"enabled": false,
},
"tls": map[string]interface{}{
"insecure_skip_verify": false,
},
"logging": map[string]interface{}{
"level": "info",
"max_size": 100,
"max_backups": 3,
"max_age": 28,
},
"subsystems": map[string]interface{}{
"apt": map[string]interface{}{
"enabled": true,
"timeout": 30000000000,
"circuit_breaker": map[string]interface{}{
"enabled": true,
"failure_threshold": 3,
"failure_window": 600000000000,
"open_duration": 1800000000000,
"half_open_attempts": 2,
},
},
"dnf": map[string]interface{}{
"enabled": true,
"timeout": 1800000000000, // 30 minutes - configurable via server settings
"circuit_breaker": map[string]interface{}{
"enabled": true,
"failure_threshold": 3,
"failure_window": 600000000000,
"open_duration": 1800000000000,
"half_open_attempts": 2,
},
},
"docker": map[string]interface{}{
"enabled": true,
"timeout": 60000000000,
"circuit_breaker": map[string]interface{}{
"enabled": true,
"failure_threshold": 3,
"failure_window": 600000000000,
"open_duration": 1800000000000,
"half_open_attempts": 2,
},
},
"windows": map[string]interface{}{
"enabled": false,
},
"winget": map[string]interface{}{
"enabled": false,
},
"storage": map[string]interface{}{
"enabled": true,
"timeout": 10000000000,
"circuit_breaker": map[string]interface{}{
"enabled": true,
"failure_threshold": 3,
"failure_window": 600000000000,
"open_duration": 1800000000000,
"half_open_attempts": 2,
},
},
},
},
Secrets: []string{"registration_token", "server_public_key"},
Validation: ValidationRules{
RequiredFields: []string{"server_url", "organization"},
AllowedValues: map[string][]string{
"environment": {"development", "staging", "production", "testing"},
},
Patterns: map[string]string{
"server_url": "^https?://.+",
},
Constraints: map[string]interface{}{
"check_in_interval": map[string]interface{}{"min": 30, "max": 3600},
},
},
},
"windows-workstation": {
Name: "Windows Workstation Agent",
Description: "Optimized for Windows workstation deployments",
BaseConfig: map[string]interface{}{
"check_in_interval": 300,
"network": map[string]interface{}{
"timeout": 30000000000,
"retry_count": 3,
"retry_delay": 5000000000,
"max_idle_conn": 10,
},
"proxy": map[string]interface{}{
"enabled": false,
},
"tls": map[string]interface{}{
"insecure_skip_verify": false,
},
"logging": map[string]interface{}{
"level": "info",
"max_size": 100,
"max_backups": 3,
"max_age": 28,
},
"subsystems": map[string]interface{}{
"apt": map[string]interface{}{
"enabled": false,
},
"dnf": map[string]interface{}{
"enabled": false,
},
"docker": map[string]interface{}{
"enabled": false,
},
"windows": map[string]interface{}{
"enabled": true,
"timeout": 600000000000,
"circuit_breaker": map[string]interface{}{
"enabled": true,
"failure_threshold": 2,
"failure_window": 900000000000,
"open_duration": 3600000000000,
"half_open_attempts": 3,
},
},
"winget": map[string]interface{}{
"enabled": true,
"timeout": 120000000000,
"circuit_breaker": map[string]interface{}{
"enabled": true,
"failure_threshold": 3,
"failure_window": 600000000000,
"open_duration": 1800000000000,
"half_open_attempts": 2,
},
},
"storage": map[string]interface{}{
"enabled": false,
},
},
},
Secrets: []string{"registration_token", "server_public_key"},
Validation: ValidationRules{
RequiredFields: []string{"server_url", "organization"},
AllowedValues: map[string][]string{
"environment": {"development", "staging", "production", "testing"},
},
Patterns: map[string]string{
"server_url": "^https?://.+",
},
Constraints: map[string]interface{}{
"check_in_interval": map[string]interface{}{"min": 30, "max": 3600},
},
},
},
"docker-host": {
Name: "Docker Host Agent",
Description: "Optimized for Docker host deployments",
BaseConfig: map[string]interface{}{
"check_in_interval": 300,
"network": map[string]interface{}{
"timeout": 30000000000,
"retry_count": 3,
"retry_delay": 5000000000,
"max_idle_conn": 10,
},
"proxy": map[string]interface{}{
"enabled": false,
},
"tls": map[string]interface{}{
"insecure_skip_verify": false,
},
"logging": map[string]interface{}{
"level": "info",
"max_size": 100,
"max_backups": 3,
"max_age": 28,
},
"subsystems": map[string]interface{}{
"apt": map[string]interface{}{
"enabled": false,
},
"dnf": map[string]interface{}{
"enabled": false,
},
"docker": map[string]interface{}{
"enabled": true,
"timeout": 60000000000,
"circuit_breaker": map[string]interface{}{
"enabled": true,
"failure_threshold": 3,
"failure_window": 600000000000,
"open_duration": 1800000000000,
"half_open_attempts": 2,
},
},
"windows": map[string]interface{}{
"enabled": false,
},
"winget": map[string]interface{}{
"enabled": false,
},
"storage": map[string]interface{}{
"enabled": false,
},
},
},
Secrets: []string{"registration_token", "server_public_key"},
Validation: ValidationRules{
RequiredFields: []string{"server_url", "organization"},
AllowedValues: map[string][]string{
"environment": {"development", "staging", "production", "testing"},
},
Patterns: map[string]string{
"server_url": "^https?://.+",
},
Constraints: map[string]interface{}{
"check_in_interval": map[string]interface{}{"min": 30, "max": 3600},
},
},
},
}
}

56
db_investigation.sh Normal file
View File

@@ -0,0 +1,56 @@
#!/bin/bash
echo "=== RedFlag Database Investigation ==="
echo
# Check if containers are running
echo "1. Checking container status..."
docker ps | grep -E "redflag|postgres"
echo
echo "2. Testing database connection with different credentials..."
# Try with postgres credentials
echo "Trying with postgres user:"
docker exec redflag-postgres psql -U postgres -c "SELECT current_database(), current_user;" 2>/dev/null
# Try with redflag credentials
echo "Trying with redflag user:"
docker exec redflag-postgres psql -U redflag -d redflag -c "SELECT current_database(), current_user;" 2>/dev/null
echo
echo "3. Listing databases:"
docker exec redflag-postgres psql -U postgres -c "\l" 2>/dev/null
echo
echo "4. Checking tables in redflag database:"
docker exec redflag-postgres psql -U postgres -d redflag -c "\dt" 2>/dev/null || echo "Failed to list tables"
echo
echo "5. Checking migration status:"
docker exec redflag-postgres psql -U postgres -d redflag -c "SELECT version, applied_at FROM schema_migrations ORDER BY version;" 2>/dev/null || echo "No schema_migrations table found"
echo
echo "6. Checking users table:"
docker exec redflag-postgres psql -U postgres -d redflag -c "SELECT id, username, email, created_at FROM users LIMIT 5;" 2>/dev/null || echo "Users table not found"
echo
echo "7. Checking for security_* tables:"
docker exec redflag-postgres psql -U postgres -d redflag -c "\dt security_*" 2>/dev/null || echo "No security_* tables found"
echo
echo "8. Checking agent_commands table for signature column:"
docker exec redflag-postgres psql -U postgres -d redflag -c "\d agent_commands" 2>/dev/null | grep signature || echo "Signature column not found"
echo
echo "9. Checking recent logs from server:"
docker logs redflag-server 2>&1 | tail -20
echo
echo "10. Password configuration check:"
echo "From docker-compose.yml POSTGRES_PASSWORD:"
grep "POSTGRES_PASSWORD:" docker-compose.yml
echo "From config/.env POSTGRES_PASSWORD:"
grep "POSTGRES_PASSWORD:" config/.env
echo "From config/.env REDFLAG_DB_PASSWORD:"
grep "REDFLAG_DB_PASSWORD:" config/.env

0
docker Normal file
View File

136
fix_agent_permissions.sh Normal file
View File

@@ -0,0 +1,136 @@
#!/bin/bash
# Fix RedFlag Agent Permissions Script
# This script fixes the systemd service permissions for the agent
set -e
echo "🔧 RedFlag Agent Permission Fix Script"
echo "======================================"
echo ""
# Check if running as root or with sudo
if [ "$EUID" -ne 0 ]; then
echo "This script needs sudo privileges to modify systemd service files."
echo "You'll be prompted for your password."
echo ""
exec sudo "$0" "$@"
fi
echo "✅ Running with sudo privileges"
echo ""
# Step 1: Check current systemd service
echo "📋 Step 1: Checking current systemd service..."
SERVICE_FILE="/etc/systemd/system/redflag-agent.service"
if [ ! -f "$SERVICE_FILE" ]; then
echo "❌ Service file not found: $SERVICE_FILE"
exit 1
fi
echo "✅ Service file found: $SERVICE_FILE"
echo ""
# Step 2: Check if ReadWritePaths is already configured
echo "📋 Step 2: Checking current service configuration..."
if grep -q "ReadWritePaths=" "$SERVICE_FILE"; then
echo "✅ ReadWritePaths already configured"
grep "ReadWritePaths=" "$SERVICE_FILE"
else
echo "⚠️ ReadWritePaths not found - needs to be added"
fi
echo ""
# Step 3: Backup original service file
echo "💾 Step 3: Creating backup of service file..."
cp "$SERVICE_FILE" "${SERVICE_FILE}.backup.$(date +%Y%m%d_%H%M%S)"
echo "✅ Backup created"
echo ""
# Step 4: Add ReadWritePaths to service file
echo "🔧 Step 4: Adding ReadWritePaths to service file..."
# Check if [Service] section exists
if ! grep -q "^\[Service\]" "$SERVICE_FILE"; then
echo "❌ [Service] section not found in service file"
exit 1
fi
# Add ReadWritePaths after [Service] section if not already present
if ! grep -q "ReadWritePaths=/var/lib/redflag" "$SERVICE_FILE"; then
# Use sed to add the line after [Service]
sed -i '/^\[Service\]/a ReadWritePaths=/var/lib/redflag /etc/redflag /var/log/redflag' "$SERVICE_FILE"
echo "✅ ReadWritePaths added to service file"
else
echo "✅ ReadWritePaths already present"
fi
echo ""
# Step 5: Show the updated service file
echo "📄 Step 5: Updated service file:"
echo "--------------------------------"
grep -A 20 "^\[Service\]" "$SERVICE_FILE" | head -25
echo "--------------------------------"
echo ""
# Step 6: Create necessary directories
echo "📁 Step 6: Creating necessary directories..."
mkdir -p /var/lib/redflag/migration_backups
mkdir -p /var/log/redflag
mkdir -p /etc/redflag
echo "✅ Directories created/verified"
echo ""
# Step 7: Set proper permissions
echo "🔐 Step 7: Setting permissions..."
if id "redflag-agent" &>/dev/null; then
chown -R redflag-agent:redflag-agent /var/lib/redflag
chown -R redflag-agent:redflag-agent /var/log/redflag
echo "✅ Permissions set for redflag-agent user"
else
echo "⚠️ redflag-agent user not found - skipping permission setting"
fi
echo ""
# Step 8: Reload systemd
echo "🔄 Step 8: Reloading systemd..."
systemctl daemon-reload
sleep 2
echo "✅ Systemd reloaded"
echo ""
# Step 9: Restart the agent
echo "🚀 Step 9: Restarting redflag-agent service..."
systemctl restart redflag-agent
sleep 3
echo "✅ Service restarted"
echo ""
# Step 10: Check service status
echo "📊 Step 10: Checking service status..."
echo "--------------------------------"
systemctl status redflag-agent --no-pager -n 10
echo "--------------------------------"
echo ""
# Step 11: Check logs
echo "📝 Step 11: Recent logs..."
echo "--------------------------------"
journalctl -u redflag-agent -n 20 --no-pager
echo "--------------------------------"
echo ""
echo "🎉 Script completed!"
echo ""
echo "Next steps:"
echo "1. Wait 30 seconds for agent to stabilize"
echo "2. Run: sudo journalctl -u redflag-agent -f"
echo "3. Check if agent registers successfully"
echo "4. Verify in UI: http://localhost:3000/agents"
echo ""
echo "If the agent still fails, check:"
echo "- Database connection in /etc/redflag/config.json"
echo "- Network connectivity to aggregator-server"
echo "- Token validity in the database"

35
restart_and_fix.sh Normal file
View File

@@ -0,0 +1,35 @@
#!/bin/bash
echo "=== RedFlag Database Recovery Script ==="
echo
echo "1. Stopping containers..."
docker-compose down
echo
echo "2. Removing postgres volume to reset database (WARNING: This will delete all data)..."
docker volume rm redflag_postgres-data 2>/dev/null
echo
echo "3. Starting containers with fixed configuration..."
docker-compose up -d
echo
echo "4. Waiting for database to be ready..."
sleep 10
docker exec redflag-postgres pg_isready -U redflag
echo
echo "5. Checking server logs (tail):"
echo "=========================="
docker logs redflag-server --tail 50
echo
echo "=========================="
echo "If migrations ran successfully, you should see:"
echo "- Database migrations completed message"
echo "- Default security settings initialized"
echo "- Admin user ensured"
echo
echo "The server should now be accessible at http://localhost:8080"
echo "Login with admin / Qu@ntum21!"

2
sudo Normal file
View File

@@ -0,0 +1,2 @@
# Error: registration token is required
# Please include token in URL: ?token=YOUR_TOKEN

BIN
test-binary Normal file

Binary file not shown.

54
test_install_commands.sh Normal file
View File

@@ -0,0 +1,54 @@
#!/bin/bash
# Test script to verify the installation command generation fixes
echo "=== Testing RedFlag Agent Installation Command Generation ==="
echo
# Test 1: Verify the API endpoint exists and is accessible
echo "1. Testing API endpoint availability..."
if curl -sfL "http://localhost:8080/api/v1/install/linux" > /dev/null 2>&1; then
echo "✓ API endpoint /api/v1/install/linux is accessible"
else
echo "⚠ API endpoint not accessible (server may not be running)"
fi
echo
# Test 2: Show examples of corrected commands
echo "2. Corrected Installation Commands:"
echo "-----------------------------------"
echo
echo "For Registration Token API (when creating a new token):"
echo 'curl -sfL "http://localhost:8080/api/v1/install/linux?token=YOUR_TOKEN_HERE" | sudo bash'
echo
echo "For Agent Settings UI (Linux):"
echo 'curl -sfL "http://localhost:8080/api/v1/install/linux?token=YOUR_TOKEN_HERE" | sudo bash'
echo
echo "For Agent Settings UI (Windows PowerShell):"
echo 'iwr "http://localhost:8080/api/v1/install/windows?token=YOUR_TOKEN_HERE" -OutFile install.bat; .\install.bat'
echo
# Test 3: Verify template variables
echo "3. Template Variables Check:"
echo "-----------------------------"
echo "The following variables are now provided to the install template:"
echo "- AgentUser: redflag-agent"
echo "- AgentHome: /var/lib/redflag-agent"
echo "- ConfigDir: /etc/redflag"
echo "- LogDir: /var/log/redflag"
echo
echo "=== Summary of Fixes ==="
echo "✓ Fixed registration token API command generation"
echo "✓ Fixed agent settings UI command generation (both TokenManagement and AgentManagement)"
echo "✓ Fixed template error by adding .AgentUser and related variables"
echo "✓ Changed from command-line args to query parameters for token passing"
echo "✓ Added proper protocol handling (http:// for localhost)"
echo "✓ Added sudo to the bash command for proper permissions"
echo
echo "All installation commands now use the correct format:"
echo 'curl -sfL "http://localhost:8080/api/v1/install/linux?token=<TOKEN>" | sudo bash'
echo