From c95cc7d91f5b725b7f5e1de0f8b370bcacd08af6 Mon Sep 17 00:00:00 2001
From: Fimeg
Date: Mon, 10 Nov 2025 21:20:42 -0500
Subject: [PATCH] cleanup: remove 2,369 lines of dead code
Removed backup files and unused legacy scanner function.
All code verified as unreferenced.
---
.MIGRATION_STRATEGY.md.kate-swp | Bin 0 -> 139 bytes
README.md | 61 +-
aggregator-agent/cmd/agent/main.go | 171 +--
aggregator-agent/internal/config/config.go | 6 +-
aggregator-agent/internal/config/docker.go | 183 +++
.../internal/migration/detection.go | 37 +-
aggregator-agent/internal/migration/docker.go | 393 +++++++
.../internal/migration/docker_executor.go | 342 ++++++
.../internal/migration/executor.go | 17 +-
aggregator-server/cmd/server/main.go | 78 +-
.../internal/api/handlers/agent_build.go | 186 +++
.../internal/api/handlers/agent_setup.go | 79 ++
.../api/handlers/build_orchestrator.go | 229 ++++
.../internal/api/handlers/downloads.go | 1000 +++++++++++------
.../internal/api/handlers/security.go | 2 +
.../internal/api/handlers/setup.go | 7 +
.../api/middleware/machine_binding.go | 129 +++
.../internal/database/queries/agents.go | 44 +
.../internal/services/agent_builder.go | 380 +++++++
.../internal/services/build_types.go | 318 ++++++
.../internal/services/config_builder.go | 727 ++++++++++++
.../internal/services/secrets_manager.go | 263 +++++
.../internal/services/update_nonce.go | 90 ++
aggregator-web/src/components/AgentUpdate.tsx | 200 ++++
aggregator-web/src/components/RelayList.tsx | 208 ++++
aggregator-web/src/hooks/useAgentUpdate.ts | 159 +++
aggregator-web/src/hooks/useSecurity.ts | 25 +
aggregator-web/src/pages/Agents.tsx | 23 +-
aggregator-web/src/pages/Dashboard.tsx | 16 +
.../src/pages/settings/AgentManagement.tsx | 86 ++
discord/discord_manager.py | 624 +++++++++-
install.sh | 383 +++++++
32 files changed, 5899 insertions(+), 567 deletions(-)
create mode 100644 .MIGRATION_STRATEGY.md.kate-swp
create mode 100644 aggregator-agent/internal/config/docker.go
create mode 100644 aggregator-agent/internal/migration/docker.go
create mode 100644 aggregator-agent/internal/migration/docker_executor.go
create mode 100644 aggregator-server/internal/api/handlers/agent_build.go
create mode 100644 aggregator-server/internal/api/handlers/agent_setup.go
create mode 100644 aggregator-server/internal/api/handlers/build_orchestrator.go
create mode 100644 aggregator-server/internal/services/agent_builder.go
create mode 100644 aggregator-server/internal/services/build_types.go
create mode 100644 aggregator-server/internal/services/config_builder.go
create mode 100644 aggregator-server/internal/services/secrets_manager.go
create mode 100644 aggregator-server/internal/services/update_nonce.go
create mode 100644 aggregator-web/src/components/AgentUpdate.tsx
create mode 100644 aggregator-web/src/components/RelayList.tsx
create mode 100644 aggregator-web/src/hooks/useAgentUpdate.ts
create mode 100644 aggregator-web/src/hooks/useSecurity.ts
create mode 100755 install.sh
diff --git a/.MIGRATION_STRATEGY.md.kate-swp b/.MIGRATION_STRATEGY.md.kate-swp
new file mode 100644
index 0000000000000000000000000000000000000000..aa66ab2223b568b5b97b4b92beeaa623785f90c3
GIT binary patch
literal 139
zcmZQzU=Z?7EJ;-eE>A2_aLdd|RWQ;sU|?Vnk(qL4XT7?fro*xCQ(w#qUaY+9_JQCa
t1_nk)pez><^Lrw37;{{MVSH|&Bx5Fm&jaG;A^5x?eky{`2jUmG0syB186p4x
literal 0
HcmV?d00001
diff --git a/README.md b/README.md
index 1b311c7..5c8d4c5 100644
--- a/README.md
+++ b/README.md
@@ -65,58 +65,35 @@ RedFlag lets you manage software updates across all your servers from one dashbo
---
-## ๐จ Breaking Changes (v0.1.23)
+## ๐จ Breaking Changes & Automatic Migration (v0.1.23)
-**THIS IS NOT A SIMPLE UPDATE** - Complete rearchitecture from monolithic to multi-subsystem security architecture.
+**THIS IS NOT A SIMPLE UPDATE** - This version introduces a complete rearchitecture from a monolithic to a multi-subsystem security architecture. However, we've built a comprehensive migration system to handle the upgrade for you.
### **What Changed**
-- **Security**: Machine binding enforcement (v0.1.22+ minimum), Ed25519 signing required
-- **Architecture**: Single scan โ Multi-subsystem (storage, system, docker, packages)
-- **Paths**: `/var/lib/aggregator/` โ `/var/lib/redflag/agent/`, `/etc/aggregator/` โ `/etc/redflag/agent/`
-- **Database**: Separate tables for metrics, docker images, storage metrics
-- **UI**: New approval/reject workflow, real security metrics, frosted glass design
+- **Security**: Machine binding enforcement (v0.1.22+ minimum), Ed25519 signing required.
+- **Architecture**: Single scan โ Multi-subsystem (storage, system, docker, packages).
+- **Paths**: The agent now uses `/etc/redflag/` and `/var/lib/redflag/`. The migration system will move your old files from `/etc/aggregator/` and `/var/lib/aggregator/`.
+- **Database**: The server now uses separate tables for metrics, docker images, and storage metrics.
+- **UI**: New approval/reject workflow, real security metrics, and a frosted glass design.
-### **RECOMMENDED: Full Uninstall & Fresh Install**
+### **Automatic Migration**
+The agent now includes an automatic migration system that will run on the first start after the upgrade. Here's how it works:
-```bash
-# COMPLETE UNINSTALL - Remove all previous versions
-sudo systemctl stop redflag-agent 2>/dev/null || true
-sudo systemctl disable redflag-agent 2>/dev/null || true
-sudo rm -f /etc/systemd/system/redflag-agent.service
-sudo systemctl daemon-reload
-sudo userdel redflag-agent 2>/dev/null || true
+1. **Detection**: The agent will detect your old installation (`/etc/aggregator`, old config version).
+2. **Backup**: It will create a timestamped backup of your old configuration and state in `/etc/redflag.backup.{timestamp}/`.
+3. **Migration**: It will move your files to the new paths (`/etc/redflag/`, `/var/lib/redflag/`), update your configuration file to the latest version, and enable the new security features.
+4. **Validation**: The agent will validate the migration and then start normally.
-# REMOVE ALL OLD DATA
-sudo rm -rf /var/lib/aggregator/
-sudo rm -rf /var/lib/redflag/
-sudo rm -rf /etc/aggregator/
-sudo rm -rf /etc/redflag/
+**What you need to do:**
-# REMOVE DOCKER STUFF (BE SURE YOU'RE IN REDFLAG FOLDER)
-cd /path/to/RedFlag # IMPORTANT: Be in RedFlag directory
-docker-compose down -v 2>/dev/null || true
-docker system prune -f 2>/dev/null || true
-```
+- **Run the agent with elevated privileges (sudo) for the first run after the upgrade.** The migration process needs root access to move files and create backups in `/etc/`.
+- That's it. The agent will handle the rest.
-### **Manual Migration (Advanced Users Only)**
-
-If you really need to preserve data:
-
-1. **Backup old data**:
-```bash
-sudo cp -r /var/lib/aggregator/ ~/aggregator-backup
-sudo cp -r /etc/aggregator/ ~/aggregator-config-backup
-```
-
-2. **Follow fresh install instructions** below
-3. **Manual data migration** (not supported - you're on your own)
-
-### **No Support for Automatic Migration**
-
-At this alpha stage, automated migration is not worth the complexity. The new architecture is fundamentally different and migration would be fragile.
+### **Manual Intervention (Only if something goes wrong)**
+If the automatic migration fails, you can find a backup of your old configuration in `/etc/redflag.backup.{timestamp}/`. You can then manually restore your old setup and report the issue.
**Need Migration Help?**
-If you're one of the few existing v0.1.18 users who needs migration support, join our Discord server and ask - I'll help you through it manually.
+If you run into any issues with the automatic migration, join our Discord server and ask for help.
---
diff --git a/aggregator-agent/cmd/agent/main.go b/aggregator-agent/cmd/agent/main.go
index 0cf2097..8075c4a 100644
--- a/aggregator-agent/cmd/agent/main.go
+++ b/aggregator-agent/cmd/agent/main.go
@@ -237,6 +237,7 @@ func main() {
Detection: migrationDetection,
TargetVersion: AgentVersion,
Config: migrationConfig,
+ BackupPath: filepath.Join(getStatePath(), "migration_backups"), // Set backup path within agent's state directory
}
// Execute migration
@@ -981,176 +982,6 @@ func subsystemScan(name string, cb *circuitbreaker.CircuitBreaker, timeout time.
return updates, scanErr
}
-func handleScanUpdates(apiClient *client.Client, cfg *config.Config, ackTracker *acknowledgment.Tracker, aptScanner *scanner.APTScanner, dnfScanner *scanner.DNFScanner, dockerScanner *scanner.DockerScanner, windowsUpdateScanner *scanner.WindowsUpdateScanner, wingetScanner *scanner.WingetScanner, aptCB, dnfCB, dockerCB, windowsCB, wingetCB *circuitbreaker.CircuitBreaker, commandID string) error {
- log.Println("Scanning for updates...")
-
- var allUpdates []client.UpdateReportItem
- var scanErrors []string
- var scanResults []string
-
- // Scan APT updates
- if aptScanner.IsAvailable() && cfg.Subsystems.APT.Enabled {
- log.Println(" - Scanning APT packages...")
- updates, err := subsystemScan("APT", aptCB, cfg.Subsystems.APT.Timeout, aptScanner.Scan)
- if err != nil {
- errorMsg := fmt.Sprintf("APT scan failed: %v", err)
- log.Printf(" %s\n", errorMsg)
- scanErrors = append(scanErrors, errorMsg)
- } else {
- resultMsg := fmt.Sprintf("Found %d APT updates", len(updates))
- log.Printf(" %s\n", resultMsg)
- scanResults = append(scanResults, resultMsg)
- allUpdates = append(allUpdates, updates...)
- }
- } else if !cfg.Subsystems.APT.Enabled {
- scanResults = append(scanResults, "APT scanner disabled")
- } else {
- scanResults = append(scanResults, "APT scanner not available")
- }
-
- // Scan DNF updates
- if dnfScanner.IsAvailable() && cfg.Subsystems.DNF.Enabled {
- log.Println(" - Scanning DNF packages...")
- updates, err := subsystemScan("DNF", dnfCB, cfg.Subsystems.DNF.Timeout, dnfScanner.Scan)
- if err != nil {
- errorMsg := fmt.Sprintf("DNF scan failed: %v", err)
- log.Printf(" %s\n", errorMsg)
- scanErrors = append(scanErrors, errorMsg)
- } else {
- resultMsg := fmt.Sprintf("Found %d DNF updates", len(updates))
- log.Printf(" %s\n", resultMsg)
- scanResults = append(scanResults, resultMsg)
- allUpdates = append(allUpdates, updates...)
- }
- } else if !cfg.Subsystems.DNF.Enabled {
- scanResults = append(scanResults, "DNF scanner disabled")
- } else {
- scanResults = append(scanResults, "DNF scanner not available")
- }
-
- // Scan Docker updates
- if dockerScanner != nil && dockerScanner.IsAvailable() && cfg.Subsystems.Docker.Enabled {
- log.Println(" - Scanning Docker images...")
- updates, err := subsystemScan("Docker", dockerCB, cfg.Subsystems.Docker.Timeout, dockerScanner.Scan)
- if err != nil {
- errorMsg := fmt.Sprintf("Docker scan failed: %v", err)
- log.Printf(" %s\n", errorMsg)
- scanErrors = append(scanErrors, errorMsg)
- } else {
- resultMsg := fmt.Sprintf("Found %d Docker image updates", len(updates))
- log.Printf(" %s\n", resultMsg)
- scanResults = append(scanResults, resultMsg)
- allUpdates = append(allUpdates, updates...)
- }
- } else if !cfg.Subsystems.Docker.Enabled {
- scanResults = append(scanResults, "Docker scanner disabled")
- } else {
- scanResults = append(scanResults, "Docker scanner not available")
- }
-
- // Scan Windows updates
- if windowsUpdateScanner.IsAvailable() && cfg.Subsystems.Windows.Enabled {
- log.Println(" - Scanning Windows updates...")
- updates, err := subsystemScan("Windows Update", windowsCB, cfg.Subsystems.Windows.Timeout, windowsUpdateScanner.Scan)
- if err != nil {
- errorMsg := fmt.Sprintf("Windows Update scan failed: %v", err)
- log.Printf(" %s\n", errorMsg)
- scanErrors = append(scanErrors, errorMsg)
- } else {
- resultMsg := fmt.Sprintf("Found %d Windows updates", len(updates))
- log.Printf(" %s\n", resultMsg)
- scanResults = append(scanResults, resultMsg)
- allUpdates = append(allUpdates, updates...)
- }
- } else if !cfg.Subsystems.Windows.Enabled {
- scanResults = append(scanResults, "Windows Update scanner disabled")
- } else {
- scanResults = append(scanResults, "Windows Update scanner not available")
- }
-
- // Scan Winget packages
- if wingetScanner.IsAvailable() && cfg.Subsystems.Winget.Enabled {
- log.Println(" - Scanning Winget packages...")
- updates, err := subsystemScan("Winget", wingetCB, cfg.Subsystems.Winget.Timeout, wingetScanner.Scan)
- if err != nil {
- errorMsg := fmt.Sprintf("Winget scan failed: %v", err)
- log.Printf(" %s\n", errorMsg)
- scanErrors = append(scanErrors, errorMsg)
- } else {
- resultMsg := fmt.Sprintf("Found %d Winget package updates", len(updates))
- log.Printf(" %s\n", resultMsg)
- scanResults = append(scanResults, resultMsg)
- allUpdates = append(allUpdates, updates...)
- }
- } else if !cfg.Subsystems.Winget.Enabled {
- scanResults = append(scanResults, "Winget scanner disabled")
- } else {
- scanResults = append(scanResults, "Winget scanner not available")
- }
-
- // Report scan results to server (both successes and failures)
- success := len(allUpdates) > 0 || len(scanErrors) == 0
- var combinedOutput string
-
- // Combine all scan results
- if len(scanResults) > 0 {
- combinedOutput += "Scan Results:\n" + strings.Join(scanResults, "\n")
- }
- if len(scanErrors) > 0 {
- if combinedOutput != "" {
- combinedOutput += "\n"
- }
- combinedOutput += "Scan Errors:\n" + strings.Join(scanErrors, "\n")
- }
- if len(allUpdates) > 0 {
- if combinedOutput != "" {
- combinedOutput += "\n"
- }
- combinedOutput += fmt.Sprintf("Total Updates Found: %d", len(allUpdates))
- }
-
- // Create scan log entry
- logReport := client.LogReport{
- CommandID: commandID,
- Action: "scan_updates",
- Result: map[bool]string{true: "success", false: "failure"}[success],
- Stdout: combinedOutput,
- Stderr: strings.Join(scanErrors, "\n"),
- ExitCode: map[bool]int{true: 0, false: 1}[success],
- DurationSeconds: 0, // Could track scan duration if needed
- }
-
- // Report the scan log
- if err := reportLogWithAck(apiClient, cfg, ackTracker, logReport); err != nil {
- log.Printf("Failed to report scan log: %v\n", err)
- // Continue anyway - updates are more important
- }
-
- // Report updates to server if any were found
- if len(allUpdates) > 0 {
- report := client.UpdateReport{
- CommandID: commandID,
- Timestamp: time.Now(),
- Updates: allUpdates,
- }
-
- if err := apiClient.ReportUpdates(cfg.AgentID, report); err != nil {
- return fmt.Errorf("failed to report updates: %w", err)
- }
-
- log.Printf("โ Reported %d updates to server\n", len(allUpdates))
- } else {
- log.Println("โ No updates found")
- }
-
- // Return error if there were any scan failures
- if len(scanErrors) > 0 && len(allUpdates) == 0 {
- return fmt.Errorf("all scanners failed: %s", strings.Join(scanErrors, "; "))
- }
-
- return nil
-}
-
// handleScanCommand performs a local scan and displays results
func handleScanCommand(cfg *config.Config, exportFormat string) error {
// Initialize scanners
diff --git a/aggregator-agent/internal/config/config.go b/aggregator-agent/internal/config/config.go
index 71fa255..adadd8b 100644
--- a/aggregator-agent/internal/config/config.go
+++ b/aggregator-agent/internal/config/config.go
@@ -206,9 +206,9 @@ func loadFromFile(configPath string) (*Config, error) {
// migrateConfig handles specific known migrations between config versions
func migrateConfig(cfg *Config) {
// Update config schema version to latest
- if cfg.Version != "4" {
- fmt.Printf("[CONFIG] Migrating config schema from version %s to 4\n", cfg.Version)
- cfg.Version = "4"
+ if cfg.Version != "5" {
+ fmt.Printf("[CONFIG] Migrating config schema from version %s to 5\n", cfg.Version)
+ cfg.Version = "5"
}
// Migration 1: Ensure minimum check-in interval (30 seconds)
diff --git a/aggregator-agent/internal/config/docker.go b/aggregator-agent/internal/config/docker.go
new file mode 100644
index 0000000..974ccff
--- /dev/null
+++ b/aggregator-agent/internal/config/docker.go
@@ -0,0 +1,183 @@
+package config
+
+import (
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+)
+
+// DockerSecretsConfig holds Docker secrets configuration
+type DockerSecretsConfig struct {
+ Enabled bool `json:"enabled"`
+ SecretsPath string `json:"secrets_path"`
+ EncryptionKey string `json:"encryption_key,omitempty"`
+ Secrets map[string]string `json:"secrets,omitempty"`
+}
+
+// LoadDockerConfig loads Docker configuration if available
+func LoadDockerConfig(configPath string) (*DockerSecretsConfig, error) {
+ dockerConfigPath := filepath.Join(configPath, "docker.json")
+
+ // Check if Docker config exists
+ if _, err := os.Stat(dockerConfigPath); os.IsNotExist(err) {
+ return &DockerSecretsConfig{Enabled: false}, nil
+ }
+
+ data, err := ioutil.ReadFile(dockerConfigPath)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read Docker config: %w", err)
+ }
+
+ var dockerConfig DockerSecretsConfig
+ if err := json.Unmarshal(data, &dockerConfig); err != nil {
+ return nil, fmt.Errorf("failed to parse Docker config: %w", err)
+ }
+
+ // Set default secrets path if not specified
+ if dockerConfig.SecretsPath == "" {
+ dockerConfig.SecretsPath = getDefaultSecretsPath()
+ }
+
+ return &dockerConfig, nil
+}
+
+// getDefaultSecretsPath returns the default Docker secrets path for the platform
+func getDefaultSecretsPath() string {
+ if runtime.GOOS == "windows" {
+ return `C:\ProgramData\Docker\secrets`
+ }
+ return "/run/secrets"
+}
+
+// ReadSecret reads a secret from Docker secrets or falls back to file
+func ReadSecret(secretName, fallbackPath string, dockerConfig *DockerSecretsConfig) ([]byte, error) {
+ // Try Docker secrets first if enabled
+ if dockerConfig != nil && dockerConfig.Enabled {
+ secretPath := filepath.Join(dockerConfig.SecretsPath, secretName)
+ if data, err := ioutil.ReadFile(secretPath); err == nil {
+ fmt.Printf("[DOCKER] Read secret from Docker: %s\n", secretName)
+ return data, nil
+ }
+ }
+
+ // Fall back to file system
+ if fallbackPath != "" {
+ if data, err := ioutil.ReadFile(fallbackPath); err == nil {
+ fmt.Printf("[CONFIG] Read secret from file: %s\n", fallbackPath)
+ return data, nil
+ }
+ }
+
+ return nil, fmt.Errorf("secret not found: %s", secretName)
+}
+
+// MergeConfigWithSecrets merges configuration with Docker secrets
+func MergeConfigWithSecrets(config *Config, dockerConfig *DockerSecretsConfig) error {
+ if dockerConfig == nil || !dockerConfig.Enabled {
+ return nil
+ }
+
+ // If there's an encrypted config, decrypt and merge it
+ if encryptedConfigPath, exists := dockerConfig.Secrets["config"]; exists {
+ if err := mergeEncryptedConfig(config, encryptedConfigPath, dockerConfig.EncryptionKey); err != nil {
+ return fmt.Errorf("failed to merge encrypted config: %w", err)
+ }
+ }
+
+ // Apply other secrets to configuration
+ if err := applySecretsToConfig(config, dockerConfig); err != nil {
+ return fmt.Errorf("failed to apply secrets to config: %w", err)
+ }
+
+ return nil
+}
+
+// mergeEncryptedConfig decrypts and merges encrypted configuration
+func mergeEncryptedConfig(config *Config, encryptedPath, encryptionKey string) error {
+ if encryptionKey == "" {
+ return fmt.Errorf("no encryption key available for encrypted config")
+ }
+
+ // Create temporary file for decrypted config
+ tempPath := encryptedPath + ".tmp"
+ defer os.Remove(tempPath)
+
+ // Decrypt the config file
+ // Note: This would need to import the migration package's DecryptFile function
+ // For now, we'll assume the decryption happens elsewhere
+ return fmt.Errorf("encrypted config merge not yet implemented")
+}
+
+// applySecretsToConfig applies Docker secrets to configuration fields
+func applySecretsToConfig(config *Config, dockerConfig *DockerSecretsConfig) error {
+ // Apply proxy secrets
+ if proxyUsername, exists := dockerConfig.Secrets["proxy_username"]; exists {
+ config.Proxy.Username = proxyUsername
+ }
+ if proxyPassword, exists := dockerConfig.Secrets["proxy_password"]; exists {
+ config.Proxy.Password = proxyPassword
+ }
+
+ // Apply TLS secrets
+ if certFile, exists := dockerConfig.Secrets["tls_cert"]; exists {
+ config.TLS.CertFile = certFile
+ }
+ if keyFile, exists := dockerConfig.Secrets["tls_key"]; exists {
+ config.TLS.KeyFile = keyFile
+ }
+ if caFile, exists := dockerConfig.Secrets["tls_ca"]; exists {
+ config.TLS.CAFile = caFile
+ }
+
+ // Apply registration token
+ if regToken, exists := dockerConfig.Secrets["registration_token"]; exists {
+ config.RegistrationToken = regToken
+ }
+
+ return nil
+}
+
+// IsDockerEnvironment checks if the agent is running in Docker
+func IsDockerEnvironment() bool {
+ // Check for .dockerenv file
+ if _, err := os.Stat("/.dockerenv"); err == nil {
+ return true
+ }
+
+ // Check for Docker in cgroup
+ if data, err := ioutil.ReadFile("/proc/1/cgroup"); err == nil {
+ if contains(string(data), "docker") {
+ return true
+ }
+ }
+
+ return false
+}
+
+// SaveDockerConfig saves Docker configuration to disk
+func SaveDockerConfig(dockerConfig *DockerSecretsConfig, configPath string) error {
+ dockerConfigPath := filepath.Join(configPath, "docker.json")
+
+ data, err := json.MarshalIndent(dockerConfig, "", " ")
+ if err != nil {
+ return fmt.Errorf("failed to marshal Docker config: %w", err)
+ }
+
+ if err := ioutil.WriteFile(dockerConfigPath, data, 0600); err != nil {
+ return fmt.Errorf("failed to write Docker config: %w", err)
+ }
+
+ fmt.Printf("[DOCKER] Saved Docker config: %s\n", dockerConfigPath)
+ return nil
+}
+
+// contains checks if a string contains a substring (case-insensitive)
+func contains(s, substr string) bool {
+ s = strings.ToLower(s)
+ substr = strings.ToLower(substr)
+ return strings.Contains(s, substr)
+}
\ No newline at end of file
diff --git a/aggregator-agent/internal/migration/detection.go b/aggregator-agent/internal/migration/detection.go
index 10aab32..aca8dd6 100644
--- a/aggregator-agent/internal/migration/detection.go
+++ b/aggregator-agent/internal/migration/detection.go
@@ -36,13 +36,14 @@ type AgentFileInventory struct {
// MigrationDetection represents the result of migration detection
type MigrationDetection struct {
- CurrentAgentVersion string `json:"current_agent_version"`
- CurrentConfigVersion int `json:"current_config_version"`
- RequiresMigration bool `json:"requires_migration"`
- RequiredMigrations []string `json:"required_migrations"`
- MissingSecurityFeatures []string `json:"missing_security_features"`
+ CurrentAgentVersion string `json:"current_agent_version"`
+ CurrentConfigVersion int `json:"current_config_version"`
+ RequiresMigration bool `json:"requires_migration"`
+ RequiredMigrations []string `json:"required_migrations"`
+ MissingSecurityFeatures []string `json:"missing_security_features"`
Inventory *AgentFileInventory `json:"inventory"`
- DetectionTime time.Time `json:"detection_time"`
+ DockerDetection *DockerDetection `json:"docker_detection,omitempty"`
+ DetectionTime time.Time `json:"detection_time"`
}
// SecurityFeature represents a security feature that may be missing
@@ -104,6 +105,15 @@ func DetectMigrationRequirements(config *FileDetectionConfig) (*MigrationDetecti
missingFeatures := identifyMissingSecurityFeatures(detection)
detection.MissingSecurityFeatures = missingFeatures
+ // Detect Docker secrets requirements if in Docker environment
+ if IsDockerEnvironment() {
+ dockerDetection, err := DetectDockerSecretsRequirements(config)
+ if err != nil {
+ return nil, fmt.Errorf("failed to detect Docker secrets requirements: %w", err)
+ }
+ detection.DockerDetection = dockerDetection
+ }
+
return detection, nil
}
@@ -143,8 +153,9 @@ func scanAgentFiles(config *FileDetectionConfig) (*AgentFileInventory, error) {
},
}
- // Scan old directory paths
- for _, dirPath := range inventory.OldDirectoryPaths {
+ // Scan both old and new directory paths
+ allPaths := append(inventory.OldDirectoryPaths, inventory.NewDirectoryPaths...)
+ for _, dirPath := range allPaths {
if _, err := os.Stat(dirPath); err == nil {
files, err := scanDirectory(dirPath, filePatterns)
if err != nil {
@@ -292,6 +303,16 @@ func determineRequiredMigrations(detection *MigrationDetection, config *FileDete
migrations = append(migrations, "config_migration")
}
+ // Check if Docker secrets migration is needed (v5)
+ if detection.CurrentConfigVersion < 5 {
+ migrations = append(migrations, "config_v5_migration")
+ }
+
+ // Check if Docker secrets migration is needed
+ if detection.DockerDetection != nil && detection.DockerDetection.MigrateToSecrets {
+ migrations = append(migrations, "docker_secrets_migration")
+ }
+
// Check if security features need to be applied
if len(detection.MissingSecurityFeatures) > 0 {
migrations = append(migrations, "security_hardening")
diff --git a/aggregator-agent/internal/migration/docker.go b/aggregator-agent/internal/migration/docker.go
new file mode 100644
index 0000000..a35cdd3
--- /dev/null
+++ b/aggregator-agent/internal/migration/docker.go
@@ -0,0 +1,393 @@
+package migration
+
+import (
+ "crypto/aes"
+ "crypto/cipher"
+ "crypto/rand"
+ "crypto/sha256"
+ "encoding/hex"
+ "encoding/json"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "time"
+)
+
+// DockerDetection represents Docker secrets detection results
+type DockerDetection struct {
+ DockerAvailable bool `json:"docker_available"`
+ SecretsMountPath string `json:"secrets_mount_path"`
+ RequiredSecrets []string `json:"required_secrets"`
+ ExistingSecrets []string `json:"existing_secrets"`
+ MigrateToSecrets bool `json:"migrate_to_secrets"`
+ SecretFiles []AgentFile `json:"secret_files"`
+ DetectionTime time.Time `json:"detection_time"`
+}
+
+// SecretFile represents a file that should be migrated to Docker secrets
+type SecretFile struct {
+ Name string `json:"name"`
+ SourcePath string `json:"source_path"`
+ SecretPath string `json:"secret_path"`
+ Encrypted bool `json:"encrypted"`
+ Checksum string `json:"checksum"`
+ Size int64 `json:"size"`
+}
+
+// DockerConfig holds Docker secrets configuration
+type DockerConfig struct {
+ Enabled bool `json:"enabled"`
+ SecretsPath string `json:"secrets_path"`
+ EncryptionKey string `json:"encryption_key,omitempty"`
+ Secrets map[string]string `json:"secrets,omitempty"`
+}
+
+// GetDockerSecretsPath returns the platform-specific Docker secrets path
+func GetDockerSecretsPath() string {
+ if runtime.GOOS == "windows" {
+ return `C:\ProgramData\Docker\secrets`
+ }
+ return "/run/secrets"
+}
+
+// DetectDockerSecretsRequirements detects if Docker secrets migration is needed
+func DetectDockerSecretsRequirements(config *FileDetectionConfig) (*DockerDetection, error) {
+ detection := &DockerDetection{
+ DetectionTime: time.Now(),
+ SecretsMountPath: GetDockerSecretsPath(),
+ }
+
+ // Check if Docker secrets directory exists
+ if _, err := os.Stat(detection.SecretsMountPath); err == nil {
+ detection.DockerAvailable = true
+ fmt.Printf("[DOCKER] Docker secrets mount path detected: %s\n", detection.SecretsMountPath)
+ } else {
+ fmt.Printf("[DOCKER] Docker secrets not available: %s\n", err)
+ return detection, nil
+ }
+
+ // Scan for sensitive files that should be migrated to secrets
+ secretFiles, err := scanSecretFiles(config)
+ if err != nil {
+ return nil, fmt.Errorf("failed to scan for secret files: %w", err)
+ }
+
+ detection.SecretFiles = secretFiles
+ detection.MigrateToSecrets = len(secretFiles) > 0
+
+ // Identify required secrets
+ detection.RequiredSecrets = identifyRequiredSecrets(secretFiles)
+
+ // Check existing secrets
+ detection.ExistingSecrets = scanExistingSecrets(detection.SecretsMountPath)
+
+ return detection, nil
+}
+
+// scanSecretFiles scans for files containing sensitive data
+func scanSecretFiles(config *FileDetectionConfig) ([]AgentFile, error) {
+ var secretFiles []AgentFile
+
+ // Define sensitive file patterns
+ secretPatterns := []string{
+ "agent.key",
+ "server.key",
+ "ca.crt",
+ "*.pem",
+ "*.key",
+ "config.json", // Will be filtered for sensitive content
+ }
+
+ // Scan new directory paths for secret files
+ for _, dirPath := range []string{config.NewConfigPath, config.NewStatePath} {
+ if _, err := os.Stat(dirPath); err == nil {
+ files, err := scanSecretDirectory(dirPath, secretPatterns)
+ if err != nil {
+ return nil, fmt.Errorf("failed to scan directory %s for secrets: %w", dirPath, err)
+ }
+ secretFiles = append(secretFiles, files...)
+ }
+ }
+
+ return secretFiles, nil
+}
+
+// scanSecretDirectory scans a directory for files that may contain secrets
+func scanSecretDirectory(dirPath string, patterns []string) ([]AgentFile, error) {
+ var files []AgentFile
+
+ err := filepath.Walk(dirPath, func(path string, info os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+
+ if info.IsDir() {
+ return nil
+ }
+
+ // Check if file matches secret patterns
+ if !matchesSecretPattern(path, patterns) {
+ // For config.json, check if it contains sensitive data
+ if filepath.Base(path) == "config.json" {
+ if hasSensitiveContent(path) {
+ return addSecretFile(&files, path, info)
+ }
+ }
+ return nil
+ }
+
+ return addSecretFile(&files, path, info)
+ })
+
+ return files, err
+}
+
+// addSecretFile adds a file to the secret files list
+func addSecretFile(files *[]AgentFile, path string, info os.FileInfo) error {
+ checksum, err := calculateFileChecksum(path)
+ if err != nil {
+ return nil // Skip files we can't read
+ }
+
+ file := AgentFile{
+ Path: path,
+ Size: info.Size(),
+ ModifiedTime: info.ModTime(),
+ Checksum: checksum,
+ Required: true,
+ Migrate: true,
+ Description: getSecretFileDescription(path),
+ }
+
+ *files = append(*files, file)
+ return nil
+}
+
+// matchesSecretPattern checks if a file path matches secret patterns
+func matchesSecretPattern(path string, patterns []string) bool {
+ base := filepath.Base(path)
+ for _, pattern := range patterns {
+ if matched, _ := filepath.Match(pattern, base); matched {
+ return true
+ }
+ }
+ return false
+}
+
+// hasSensitiveContent checks if a config file contains sensitive data
+func hasSensitiveContent(configPath string) bool {
+ data, err := os.ReadFile(configPath)
+ if err != nil {
+ return false
+ }
+
+ var config map[string]interface{}
+ if err := json.Unmarshal(data, &config); err != nil {
+ return false
+ }
+
+ // Check for sensitive fields
+ sensitiveFields := []string{
+ "password", "token", "key", "secret", "credential",
+ "proxy", "tls", "certificate", "private",
+ }
+
+ for _, field := range sensitiveFields {
+ if containsSensitiveField(config, field) {
+ return true
+ }
+ }
+
+ return false
+}
+
+// containsSensitiveField recursively checks for sensitive fields in config
+func containsSensitiveField(config map[string]interface{}, field string) bool {
+ for key, value := range config {
+ if containsString(key, field) {
+ return true
+ }
+
+ if nested, ok := value.(map[string]interface{}); ok {
+ if containsSensitiveField(nested, field) {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// containsString checks if a string contains a substring (case-insensitive)
+func containsString(s, substr string) bool {
+ s = strings.ToLower(s)
+ substr = strings.ToLower(substr)
+ return strings.Contains(s, substr)
+}
+
+// identifyRequiredSecrets identifies which secrets need to be created
+func identifyRequiredSecrets(secretFiles []AgentFile) []string {
+ var secrets []string
+ for _, file := range secretFiles {
+ secretName := filepath.Base(file.Path)
+ if file.Path == "config.json" {
+ secrets = append(secrets, "config.json.enc")
+ } else {
+ secrets = append(secrets, secretName)
+ }
+ }
+ return secrets
+}
+
+// scanExistingSecrets scans the Docker secrets directory for existing secrets
+func scanExistingSecrets(secretsPath string) []string {
+ var secrets []string
+
+ entries, err := os.ReadDir(secretsPath)
+ if err != nil {
+ return secrets
+ }
+
+ for _, entry := range entries {
+ if !entry.IsDir() {
+ secrets = append(secrets, entry.Name())
+ }
+ }
+
+ return secrets
+}
+
+// getSecretFileDescription returns a description for a secret file
+func getSecretFileDescription(path string) string {
+ base := filepath.Base(path)
+ switch {
+ case base == "agent.key":
+ return "Agent private key"
+ case base == "server.key":
+ return "Server private key"
+ case base == "ca.crt":
+ return "Certificate authority certificate"
+ case strings.Contains(base, ".key"):
+ return "Private key file"
+ case strings.Contains(base, ".crt") || strings.Contains(base, ".pem"):
+ return "Certificate file"
+ case base == "config.json":
+ return "Configuration file with sensitive data"
+ default:
+ return "Secret file"
+ }
+}
+
+// EncryptFile encrypts a file using AES-256-GCM
+func EncryptFile(inputPath, outputPath, key string) error {
+ // Generate key from passphrase
+ keyBytes := sha256.Sum256([]byte(key))
+
+ // Read input file
+ plaintext, err := os.ReadFile(inputPath)
+ if err != nil {
+ return fmt.Errorf("failed to read input file: %w", err)
+ }
+
+ // Create cipher
+ block, err := aes.NewCipher(keyBytes[:])
+ if err != nil {
+ return fmt.Errorf("failed to create cipher: %w", err)
+ }
+
+ // Create GCM
+ gcm, err := cipher.NewGCM(block)
+ if err != nil {
+ return fmt.Errorf("failed to create GCM: %w", err)
+ }
+
+ // Generate nonce
+ nonce := make([]byte, gcm.NonceSize())
+ if _, err := io.ReadFull(rand.Reader, nonce); err != nil {
+ return fmt.Errorf("failed to generate nonce: %w", err)
+ }
+
+ // Encrypt
+ ciphertext := gcm.Seal(nonce, nonce, plaintext, nil)
+
+ // Write encrypted file
+ if err := os.WriteFile(outputPath, ciphertext, 0600); err != nil {
+ return fmt.Errorf("failed to write encrypted file: %w", err)
+ }
+
+ return nil
+}
+
+// DecryptFile decrypts a file using AES-256-GCM
+func DecryptFile(inputPath, outputPath, key string) error {
+ // Generate key from passphrase
+ keyBytes := sha256.Sum256([]byte(key))
+
+ // Read encrypted file
+ ciphertext, err := os.ReadFile(inputPath)
+ if err != nil {
+ return fmt.Errorf("failed to read encrypted file: %w", err)
+ }
+
+ // Create cipher
+ block, err := aes.NewCipher(keyBytes[:])
+ if err != nil {
+ return fmt.Errorf("failed to create cipher: %w", err)
+ }
+
+ // Create GCM
+ gcm, err := cipher.NewGCM(block)
+ if err != nil {
+ return fmt.Errorf("failed to create GCM: %w", err)
+ }
+
+ // Check minimum length
+ if len(ciphertext) < gcm.NonceSize() {
+ return fmt.Errorf("ciphertext too short")
+ }
+
+ // Extract nonce and ciphertext
+ nonce := ciphertext[:gcm.NonceSize()]
+ ciphertext = ciphertext[gcm.NonceSize():]
+
+ // Decrypt
+ plaintext, err := gcm.Open(nil, nonce, ciphertext, nil)
+ if err != nil {
+ return fmt.Errorf("failed to decrypt: %w", err)
+ }
+
+ // Write decrypted file
+ if err := os.WriteFile(outputPath, plaintext, 0600); err != nil {
+ return fmt.Errorf("failed to write decrypted file: %w", err)
+ }
+
+ return nil
+}
+
+// GenerateEncryptionKey generates a random encryption key
+func GenerateEncryptionKey() (string, error) {
+ bytes := make([]byte, 32)
+ if _, err := rand.Read(bytes); err != nil {
+ return "", fmt.Errorf("failed to generate encryption key: %w", err)
+ }
+ return hex.EncodeToString(bytes), nil
+}
+
+// IsDockerEnvironment checks if running in Docker environment
+func IsDockerEnvironment() bool {
+ // Check for .dockerenv file
+ if _, err := os.Stat("/.dockerenv"); err == nil {
+ return true
+ }
+
+ // Check for Docker in cgroup
+ if data, err := os.ReadFile("/proc/1/cgroup"); err == nil {
+ if containsString(string(data), "docker") {
+ return true
+ }
+ }
+
+ return false
+}
\ No newline at end of file
diff --git a/aggregator-agent/internal/migration/docker_executor.go b/aggregator-agent/internal/migration/docker_executor.go
new file mode 100644
index 0000000..ef85ff6
--- /dev/null
+++ b/aggregator-agent/internal/migration/docker_executor.go
@@ -0,0 +1,342 @@
+package migration
+
+import (
+ "encoding/json"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+ "time"
+)
+
+// DockerSecretsExecutor handles the execution of Docker secrets migration
+type DockerSecretsExecutor struct {
+ detection *DockerDetection
+ config *FileDetectionConfig
+ encryption string
+}
+
+// NewDockerSecretsExecutor creates a new Docker secrets executor
+func NewDockerSecretsExecutor(detection *DockerDetection, config *FileDetectionConfig) *DockerSecretsExecutor {
+ return &DockerSecretsExecutor{
+ detection: detection,
+ config: config,
+ }
+}
+
+// ExecuteDockerSecretsMigration performs the Docker secrets migration
+func (e *DockerSecretsExecutor) ExecuteDockerSecretsMigration() error {
+ if !e.detection.DockerAvailable {
+ return fmt.Errorf("docker secrets not available")
+ }
+
+ if !e.detection.MigrateToSecrets {
+ fmt.Printf("[DOCKER] No secrets to migrate\n")
+ return nil
+ }
+
+ fmt.Printf("[DOCKER] Starting Docker secrets migration...\n")
+
+ // Generate encryption key for config files
+ encKey, err := GenerateEncryptionKey()
+ if err != nil {
+ return fmt.Errorf("failed to generate encryption key: %w", err)
+ }
+ e.encryption = encKey
+
+ // Create backup before migration
+ if err := e.createSecretsBackup(); err != nil {
+ return fmt.Errorf("failed to create secrets backup: %w", err)
+ }
+
+ // Migrate each secret file
+ for _, secretFile := range e.detection.SecretFiles {
+ if err := e.migrateSecretFile(secretFile); err != nil {
+ fmt.Printf("[DOCKER] Failed to migrate secret file %s: %v\n", secretFile.Path, err)
+ continue
+ }
+ }
+
+ // Create Docker secrets configuration
+ if err := e.createDockerConfig(); err != nil {
+ return fmt.Errorf("failed to create Docker config: %w", err)
+ }
+
+ // Remove original secret files
+ if err := e.removeOriginalSecrets(); err != nil {
+ return fmt.Errorf("failed to remove original secrets: %w", err)
+ }
+
+ fmt.Printf("[DOCKER] Docker secrets migration completed successfully\n")
+ fmt.Printf("[DOCKER] Encryption key: %s\n", encKey)
+ fmt.Printf("[DOCKER] Save this key securely for decryption\n")
+
+ return nil
+}
+
+// createSecretsBackup creates a backup of secret files before migration
+func (e *DockerSecretsExecutor) createSecretsBackup() error {
+ timestamp := time.Now().Format("2006-01-02-150405")
+ backupDir := fmt.Sprintf("/etc/redflag.backup.secrets.%s", timestamp)
+
+ if err := os.MkdirAll(backupDir, 0755); err != nil {
+ return fmt.Errorf("failed to create backup directory: %w", err)
+ }
+
+ for _, secretFile := range e.detection.SecretFiles {
+ backupPath := filepath.Join(backupDir, filepath.Base(secretFile.Path))
+ if err := copySecretFile(secretFile.Path, backupPath); err != nil {
+ fmt.Printf("[DOCKER] Failed to backup secret file %s: %v\n", secretFile.Path, err)
+ } else {
+ fmt.Printf("[DOCKER] Backed up secret file: %s โ %s\n", secretFile.Path, backupPath)
+ }
+ }
+
+ return nil
+}
+
+// migrateSecretFile migrates a single secret file to Docker secrets
+func (e *DockerSecretsExecutor) migrateSecretFile(secretFile AgentFile) error {
+ secretName := filepath.Base(secretFile.Path)
+ secretPath := filepath.Join(e.detection.SecretsMountPath, secretName)
+
+ // Handle config.json specially (encrypt it)
+ if secretName == "config.json" {
+ return e.migrateConfigFile(secretFile)
+ }
+
+ // Copy secret file to Docker secrets directory
+ if err := copySecretFile(secretFile.Path, secretPath); err != nil {
+ return fmt.Errorf("failed to copy secret to Docker mount: %w", err)
+ }
+
+ // Set secure permissions
+ if err := os.Chmod(secretPath, 0400); err != nil {
+ return fmt.Errorf("failed to set secret permissions: %w", err)
+ }
+
+ fmt.Printf("[DOCKER] Migrated secret: %s โ %s\n", secretFile.Path, secretPath)
+ return nil
+}
+
+// migrateConfigFile handles special migration of config.json with encryption
+func (e *DockerSecretsExecutor) migrateConfigFile(secretFile AgentFile) error {
+ // Read original config
+ configData, err := os.ReadFile(secretFile.Path)
+ if err != nil {
+ return fmt.Errorf("failed to read config file: %w", err)
+ }
+
+ // Parse config to separate sensitive from non-sensitive data
+ var config map[string]interface{}
+ if err := json.Unmarshal(configData, &config); err != nil {
+ return fmt.Errorf("failed to parse config: %w", err)
+ }
+
+ // Split config into public and sensitive parts
+ publicConfig, sensitiveConfig := e.splitConfig(config)
+
+ // Write public config back to original location
+ publicData, err := json.MarshalIndent(publicConfig, "", " ")
+ if err != nil {
+ return fmt.Errorf("failed to marshal public config: %w", err)
+ }
+
+ if err := os.WriteFile(secretFile.Path, publicData, 0644); err != nil {
+ return fmt.Errorf("failed to write public config: %w", err)
+ }
+
+ // Encrypt sensitive config
+ sensitiveData, err := json.MarshalIndent(sensitiveConfig, "", " ")
+ if err != nil {
+ return fmt.Errorf("failed to marshal sensitive config: %w", err)
+ }
+
+ tempSensitivePath := secretFile.Path + ".sensitive"
+ if err := os.WriteFile(tempSensitivePath, sensitiveData, 0600); err != nil {
+ return fmt.Errorf("failed to write sensitive config: %w", err)
+ }
+ defer os.Remove(tempSensitivePath)
+
+ // Encrypt sensitive config
+ encryptedPath := filepath.Join(e.detection.SecretsMountPath, "config.json.enc")
+ if err := EncryptFile(tempSensitivePath, encryptedPath, e.encryption); err != nil {
+ return fmt.Errorf("failed to encrypt config: %w", err)
+ }
+
+ fmt.Printf("[DOCKER] Migrated config with encryption: %s โ %s (public) + %s (encrypted)\n",
+ secretFile.Path, secretFile.Path, encryptedPath)
+
+ return nil
+}
+
+// splitConfig splits configuration into public and sensitive parts
+func (e *DockerSecretsExecutor) splitConfig(config map[string]interface{}) (map[string]interface{}, map[string]interface{}) {
+ public := make(map[string]interface{})
+ sensitive := make(map[string]interface{})
+
+ sensitiveFields := []string{
+ "password", "token", "key", "secret", "credential",
+ "proxy", "tls", "certificate", "private",
+ }
+
+ for key, value := range config {
+ if e.isSensitiveField(key, value, sensitiveFields) {
+ sensitive[key] = value
+ } else {
+ public[key] = value
+ }
+ }
+
+ return public, sensitive
+}
+
+// isSensitiveField checks if a field contains sensitive data
+func (e *DockerSecretsExecutor) isSensitiveField(key string, value interface{}, sensitiveFields []string) bool {
+ // Check key name
+ for _, field := range sensitiveFields {
+ if strings.Contains(strings.ToLower(key), strings.ToLower(field)) {
+ return true
+ }
+ }
+
+ // Check nested values
+ if nested, ok := value.(map[string]interface{}); ok {
+ for nKey, nValue := range nested {
+ if e.isSensitiveField(nKey, nValue, sensitiveFields) {
+ return true
+ }
+ }
+ }
+
+ return false
+}
+
+// createDockerConfig creates the Docker secrets configuration file
+func (e *DockerSecretsExecutor) createDockerConfig() error {
+ dockerConfig := DockerConfig{
+ Enabled: true,
+ SecretsPath: e.detection.SecretsMountPath,
+ EncryptionKey: e.encryption,
+ Secrets: make(map[string]string),
+ }
+
+ // Map secret files to their Docker secret names
+ for _, secretFile := range e.detection.SecretFiles {
+ secretName := filepath.Base(secretFile.Path)
+ if secretName == "config.json" {
+ dockerConfig.Secrets["config"] = "config.json.enc"
+ } else {
+ dockerConfig.Secrets[secretName] = secretName
+ }
+ }
+
+ // Write Docker config
+ configPath := filepath.Join(e.config.NewConfigPath, "docker.json")
+ configData, err := json.MarshalIndent(dockerConfig, "", " ")
+ if err != nil {
+ return fmt.Errorf("failed to marshal Docker config: %w", err)
+ }
+
+ if err := os.WriteFile(configPath, configData, 0600); err != nil {
+ return fmt.Errorf("failed to write Docker config: %w", err)
+ }
+
+ fmt.Printf("[DOCKER] Created Docker config: %s\n", configPath)
+ return nil
+}
+
+// removeOriginalSecrets removes the original secret files after migration
+func (e *DockerSecretsExecutor) removeOriginalSecrets() error {
+ for _, secretFile := range e.detection.SecretFiles {
+ // Don't remove config.json as it's been split into public part
+ if filepath.Base(secretFile.Path) == "config.json" {
+ continue
+ }
+
+ if err := os.Remove(secretFile.Path); err != nil {
+ fmt.Printf("[DOCKER] Failed to remove original secret %s: %v\n", secretFile.Path, err)
+ } else {
+ fmt.Printf("[DOCKER] Removed original secret: %s\n", secretFile.Path)
+ }
+ }
+
+ return nil
+}
+
+// copySecretFile copies a file from src to dst (renamed to avoid conflicts)
+func copySecretFile(src, dst string) error {
+ // Read source file
+ data, err := os.ReadFile(src)
+ if err != nil {
+ return err
+ }
+
+ // Ensure destination directory exists
+ if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil {
+ return err
+ }
+
+ // Write destination file
+ return os.WriteFile(dst, data, 0644)
+}
+
+// ValidateDockerSecretsMigration validates that the Docker secrets migration was successful
+func (e *DockerSecretsExecutor) ValidateDockerSecretsMigration() error {
+ // Check that Docker secrets directory exists
+ if _, err := os.Stat(e.detection.SecretsMountPath); err != nil {
+ return fmt.Errorf("Docker secrets directory not accessible: %w", err)
+ }
+
+ // Check that all required secrets exist
+ for _, secretName := range e.detection.RequiredSecrets {
+ secretPath := filepath.Join(e.detection.SecretsMountPath, secretName)
+ if _, err := os.Stat(secretPath); err != nil {
+ return fmt.Errorf("required secret not found: %s", secretName)
+ }
+ }
+
+ // Check that Docker config exists
+ dockerConfigPath := filepath.Join(e.config.NewConfigPath, "docker.json")
+ if _, err := os.Stat(dockerConfigPath); err != nil {
+ return fmt.Errorf("Docker config not found: %w", err)
+ }
+
+ fmt.Printf("[DOCKER] Docker secrets migration validation successful\n")
+ return nil
+}
+
+// RollbackDockerSecretsMigration rolls back the Docker secrets migration
+func (e *DockerSecretsExecutor) RollbackDockerSecretsMigration(backupDir string) error {
+ fmt.Printf("[DOCKER] Rolling back Docker secrets migration from backup: %s\n", backupDir)
+
+ // Restore original secret files from backup
+ entries, err := os.ReadDir(backupDir)
+ if err != nil {
+ return fmt.Errorf("failed to read backup directory: %w", err)
+ }
+
+ for _, entry := range entries {
+ if entry.IsDir() {
+ continue
+ }
+
+ backupPath := filepath.Join(backupDir, entry.Name())
+ originalPath := filepath.Join(e.config.NewConfigPath, entry.Name())
+
+ if err := copySecretFile(backupPath, originalPath); err != nil {
+ fmt.Printf("[DOCKER] Failed to restore %s: %v\n", entry.Name(), err)
+ } else {
+ fmt.Printf("[DOCKER] Restored: %s\n", entry.Name())
+ }
+ }
+
+ // Remove Docker config
+ dockerConfigPath := filepath.Join(e.config.NewConfigPath, "docker.json")
+ if err := os.Remove(dockerConfigPath); err != nil {
+ fmt.Printf("[DOCKER] Failed to remove Docker config: %v\n", err)
+ }
+
+ fmt.Printf("[DOCKER] Docker secrets migration rollback completed\n")
+ return nil
+}
\ No newline at end of file
diff --git a/aggregator-agent/internal/migration/executor.go b/aggregator-agent/internal/migration/executor.go
index 146c9f9..39b4c4f 100644
--- a/aggregator-agent/internal/migration/executor.go
+++ b/aggregator-agent/internal/migration/executor.go
@@ -76,7 +76,20 @@ func (e *MigrationExecutor) ExecuteMigration() (*MigrationResult, error) {
e.result.AppliedChanges = append(e.result.AppliedChanges, "Migrated configuration")
}
- // Phase 4: Security hardening
+ // Phase 4: Docker secrets migration (if available)
+ if contains(e.plan.Detection.RequiredMigrations, "docker_secrets_migration") {
+ if e.plan.Detection.DockerDetection == nil {
+ return e.completeMigration(false, fmt.Errorf("docker secrets migration requested but detection data missing"))
+ }
+
+ dockerExecutor := NewDockerSecretsExecutor(e.plan.Detection.DockerDetection, e.plan.Config)
+ if err := dockerExecutor.ExecuteDockerSecretsMigration(); err != nil {
+ return e.completeMigration(false, fmt.Errorf("docker secrets migration failed: %w", err))
+ }
+ e.result.AppliedChanges = append(e.result.AppliedChanges, "Migrated to Docker secrets")
+ }
+
+ // Phase 5: Security hardening
if contains(e.plan.Detection.RequiredMigrations, "security_hardening") {
if err := e.applySecurityHardening(); err != nil {
e.result.Warnings = append(e.result.Warnings,
@@ -86,7 +99,7 @@ func (e *MigrationExecutor) ExecuteMigration() (*MigrationResult, error) {
}
}
- // Phase 5: Validation
+ // Phase 6: Validation
if err := e.validateMigration(); err != nil {
return e.completeMigration(false, fmt.Errorf("migration validation failed: %w", err))
}
diff --git a/aggregator-server/cmd/server/main.go b/aggregator-server/cmd/server/main.go
index 022e1d5..9d11d44 100644
--- a/aggregator-server/cmd/server/main.go
+++ b/aggregator-server/cmd/server/main.go
@@ -2,6 +2,8 @@ package main
import (
"context"
+ "crypto/ed25519"
+ "encoding/hex"
"flag"
"fmt"
"log"
@@ -19,6 +21,31 @@ import (
"github.com/gin-gonic/gin"
)
+// validateSigningService performs a test sign/verify to ensure the key is valid
+func validateSigningService(signingService *services.SigningService) error {
+ if signingService == nil {
+ return fmt.Errorf("signing service is nil")
+ }
+
+ // Verify the key is accessible by getting public key and fingerprint
+ publicKeyHex := signingService.GetPublicKey()
+ if publicKeyHex == "" {
+ return fmt.Errorf("failed to get public key from signing service")
+ }
+
+ fingerprint := signingService.GetPublicKeyFingerprint()
+ if fingerprint == "" {
+ return fmt.Errorf("failed to get public key fingerprint")
+ }
+
+ // Basic validation: Ed25519 public key should be 64 hex characters (32 bytes)
+ if len(publicKeyHex) != 64 {
+ return fmt.Errorf("invalid public key length: expected 64 hex chars, got %d", len(publicKeyHex))
+ }
+
+ return nil
+}
+
func startWelcomeModeServer() {
setupHandler := handlers.NewSetupHandler("/app/config")
router := gin.Default()
@@ -146,18 +173,29 @@ func main() {
timezoneService := services.NewTimezoneService(cfg)
timeoutService := services.NewTimeoutService(commandQueries, updateQueries)
- // Initialize signing service if private key is configured
+ // Initialize and validate signing service if private key is configured
var signingService *services.SigningService
if cfg.SigningPrivateKey != "" {
var err error
signingService, err = services.NewSigningService(cfg.SigningPrivateKey)
if err != nil {
- log.Printf("Warning: Failed to initialize signing service: %v", err)
+ log.Printf("[ERROR] Failed to initialize signing service: %v", err)
+ log.Printf("[WARNING] Agent update signing is DISABLED - agents cannot be updated")
+ log.Printf("[INFO] To fix: Generate signing keys at /api/setup/generate-keys and add to .env")
} else {
- log.Printf("โ
Ed25519 signing service initialized")
+ // Validate the signing key works by performing a test sign/verify
+ if err := validateSigningService(signingService); err != nil {
+ log.Printf("[ERROR] Signing key validation failed: %v", err)
+ log.Printf("[WARNING] Agent update signing is DISABLED - key is corrupted")
+ signingService = nil // Disable signing
+ } else {
+ log.Printf("[system] Ed25519 signing service initialized and validated")
+ log.Printf("[system] Public key fingerprint: %s", signingService.GetPublicKeyFingerprint())
+ }
}
} else {
- log.Printf("Warning: No signing private key configured - agent update signing disabled")
+ log.Printf("[WARNING] No signing private key configured - agent update signing disabled")
+ log.Printf("[INFO] Generate keys: POST /api/setup/generate-keys")
}
// Initialize rate limiter
@@ -183,10 +221,23 @@ func main() {
verificationHandler = handlers.NewVerificationHandler(agentQueries, signingService)
}
+ // Initialize update nonce service (for version upgrade middleware)
+ var updateNonceService *services.UpdateNonceService
+ if signingService != nil && cfg.SigningPrivateKey != "" {
+ // Decode private key for nonce service
+ privateKeyBytes, err := hex.DecodeString(cfg.SigningPrivateKey)
+ if err == nil && len(privateKeyBytes) == ed25519.PrivateKeySize {
+ updateNonceService = services.NewUpdateNonceService(ed25519.PrivateKey(privateKeyBytes))
+ log.Printf("[system] Update nonce service initialized for version upgrades")
+ } else {
+ log.Printf("[WARNING] Failed to initialize update nonce service: invalid private key")
+ }
+ }
+
// Initialize agent update handler
var agentUpdateHandler *handlers.AgentUpdateHandler
if signingService != nil {
- agentUpdateHandler = handlers.NewAgentUpdateHandler(agentQueries, agentUpdateQueries, commandQueries, signingService, agentHandler)
+ agentUpdateHandler = handlers.NewAgentUpdateHandler(agentQueries, agentUpdateQueries, commandQueries, signingService, updateNonceService, agentHandler)
}
// Initialize system handler
@@ -225,6 +276,20 @@ func main() {
api.POST("/agents/register", rateLimiter.RateLimit("agent_registration", middleware.KeyByIP), agentHandler.RegisterAgent)
api.POST("/agents/renew", rateLimiter.RateLimit("public_access", middleware.KeyByIP), agentHandler.RenewToken)
+ // Agent setup routes (no authentication required, with rate limiting)
+ api.POST("/setup/agent", rateLimiter.RateLimit("agent_setup", middleware.KeyByIP), handlers.SetupAgent)
+ api.GET("/setup/templates", rateLimiter.RateLimit("public_access", middleware.KeyByIP), handlers.GetTemplates)
+ api.POST("/setup/validate", rateLimiter.RateLimit("agent_setup", middleware.KeyByIP), handlers.ValidateConfiguration)
+
+ // Build orchestrator routes (admin-only)
+ buildRoutes := api.Group("/build")
+ buildRoutes.Use(authHandler.WebAuthMiddleware())
+ {
+ buildRoutes.POST("/new", rateLimiter.RateLimit("agent_build", middleware.KeyByIP), handlers.NewAgentBuild)
+ buildRoutes.POST("/upgrade/:agentID", rateLimiter.RateLimit("agent_build", middleware.KeyByIP), handlers.UpgradeAgentBuild)
+ buildRoutes.POST("/detect", rateLimiter.RateLimit("agent_build", middleware.KeyByIP), handlers.DetectAgentInstallation)
+ }
+
// Public download routes (no authentication - agents need these!)
api.GET("/downloads/:platform", rateLimiter.RateLimit("public_access", middleware.KeyByIP), downloadHandler.DownloadAgent)
api.GET("/downloads/updates/:package_id", rateLimiter.RateLimit("public_access", middleware.KeyByIP), downloadHandler.DownloadUpdatePackage)
@@ -291,9 +356,12 @@ func main() {
// Agent update routes
if agentUpdateHandler != nil {
dashboard.POST("/agents/:id/update", agentUpdateHandler.UpdateAgent)
+ dashboard.POST("/agents/:id/update-nonce", agentUpdateHandler.GenerateUpdateNonce)
dashboard.POST("/agents/bulk-update", agentUpdateHandler.BulkUpdateAgents)
dashboard.GET("/updates/packages", agentUpdateHandler.ListUpdatePackages)
dashboard.POST("/updates/packages/sign", agentUpdateHandler.SignUpdatePackage)
+ dashboard.GET("/agents/:id/updates/available", agentUpdateHandler.CheckForUpdateAvailable)
+ dashboard.GET("/agents/:id/updates/status", agentUpdateHandler.GetUpdateStatus)
}
// Log routes
diff --git a/aggregator-server/internal/api/handlers/agent_build.go b/aggregator-server/internal/api/handlers/agent_build.go
new file mode 100644
index 0000000..c2eb31a
--- /dev/null
+++ b/aggregator-server/internal/api/handlers/agent_build.go
@@ -0,0 +1,186 @@
+package handlers
+
+import (
+ "net/http"
+ "os"
+ "path/filepath"
+
+ "github.com/Fimeg/RedFlag/aggregator-server/internal/services"
+ "github.com/gin-gonic/gin"
+)
+
+// BuildAgent handles the agent build endpoint
+func BuildAgent(c *gin.Context) {
+ var req services.AgentSetupRequest
+ if err := c.ShouldBindJSON(&req); err != nil {
+ c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
+ return
+ }
+
+ // Create config builder
+ configBuilder := services.NewConfigBuilder(req.ServerURL)
+
+ // Build agent configuration
+ config, err := configBuilder.BuildAgentConfig(req)
+ if err != nil {
+ c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
+ return
+ }
+
+ // Create agent builder
+ agentBuilder := services.NewAgentBuilder()
+
+ // Generate build artifacts
+ buildResult, err := agentBuilder.BuildAgentWithConfig(config)
+ if err != nil {
+ c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
+ return
+ }
+
+ // Create response with native binary instructions
+ response := gin.H{
+ "agent_id": config.AgentID,
+ "config_file": buildResult.ConfigFile,
+ "platform": buildResult.Platform,
+ "config_version": config.ConfigVersion,
+ "agent_version": config.AgentVersion,
+ "build_time": buildResult.BuildTime,
+ "next_steps": []string{
+ "1. Download native binary from server",
+ "2. Place binary in /usr/local/bin/redflag-agent",
+ "3. Set permissions: chmod 755 /usr/local/bin/redflag-agent",
+ "4. Create config directory: mkdir -p /etc/redflag",
+ "5. Save config to /etc/redflag/config.json",
+ "6. Set config permissions: chmod 600 /etc/redflag/config.json",
+ "7. Start service: systemctl enable --now redflag-agent",
+ },
+ "configuration": config.PublicConfig,
+ }
+
+ c.JSON(http.StatusOK, response)
+}
+
+// GetBuildInstructions returns build instructions for manual setup
+func GetBuildInstructions(c *gin.Context) {
+ agentID := c.Param("agentID")
+ if agentID == "" {
+ c.JSON(http.StatusBadRequest, gin.H{"error": "agent ID is required"})
+ return
+ }
+
+ instructions := gin.H{
+ "title": "RedFlag Agent Build Instructions",
+ "agent_id": agentID,
+ "steps": []gin.H{
+ {
+ "step": 1,
+ "title": "Prepare Build Environment",
+ "commands": []string{
+ "mkdir -p redflag-build",
+ "cd redflag-build",
+ },
+ },
+ {
+ "step": 2,
+ "title": "Copy Agent Source Code",
+ "commands": []string{
+ "cp -r ../aggregator-agent/* .",
+ "ls -la",
+ },
+ },
+ {
+ "step": 3,
+ "title": "Build Docker Image",
+ "commands": []string{
+ "docker build -t redflag-agent:" + agentID[:8] + " .",
+ },
+ },
+ {
+ "step": 4,
+ "title": "Create Docker Network",
+ "commands": []string{
+ "docker network create redflag 2>/dev/null || true",
+ },
+ },
+ {
+ "step": 5,
+ "title": "Deploy Agent",
+ "commands": []string{
+ "docker compose up -d",
+ },
+ },
+ {
+ "step": 6,
+ "title": "Verify Deployment",
+ "commands": []string{
+ "docker compose logs -f",
+ "docker ps",
+ },
+ },
+ },
+ "troubleshooting": []gin.H{
+ {
+ "issue": "Build fails with 'go mod download' errors",
+ "solution": "Ensure go.mod and go.sum are copied correctly and internet connectivity is available",
+ },
+ {
+ "issue": "Container fails to start",
+ "solution": "Check docker-compose.yml and ensure Docker secrets are created with 'echo \"secret-value\" | docker secret create secret-name -'",
+ },
+ {
+ "issue": "Agent cannot connect to server",
+ "solution": "Verify server URL is accessible from container and firewall rules allow traffic",
+ },
+ },
+ }
+
+ c.JSON(http.StatusOK, instructions)
+}
+
+// DownloadBuildArtifacts provides download links for generated files
+func DownloadBuildArtifacts(c *gin.Context) {
+ agentID := c.Param("agentID")
+ fileType := c.Param("fileType")
+ buildDir := c.Query("buildDir")
+
+ // Validate agent ID parameter
+ if agentID == "" {
+ c.JSON(http.StatusBadRequest, gin.H{"error": "agent ID is required"})
+ return
+ }
+
+ if buildDir == "" {
+ c.JSON(http.StatusBadRequest, gin.H{"error": "build directory is required"})
+ return
+ }
+
+ // Security check: ensure the buildDir is within expected path
+ absBuildDir, err := filepath.Abs(buildDir)
+ if err != nil {
+ c.JSON(http.StatusBadRequest, gin.H{"error": "invalid build directory"})
+ return
+ }
+
+ // Construct file path based on type
+ var filePath string
+ switch fileType {
+ case "compose":
+ filePath = filepath.Join(absBuildDir, "docker-compose.yml")
+ case "dockerfile":
+ filePath = filepath.Join(absBuildDir, "Dockerfile")
+ case "config":
+ filePath = filepath.Join(absBuildDir, "pkg", "embedded", "config.go")
+ default:
+ c.JSON(http.StatusBadRequest, gin.H{"error": "invalid file type"})
+ return
+ }
+
+ // Check if file exists
+ if _, err := os.Stat(filePath); os.IsNotExist(err) {
+ c.JSON(http.StatusNotFound, gin.H{"error": "file not found"})
+ return
+ }
+
+ // Serve file for download
+ c.FileAttachment(filePath, filepath.Base(filePath))
+}
\ No newline at end of file
diff --git a/aggregator-server/internal/api/handlers/agent_setup.go b/aggregator-server/internal/api/handlers/agent_setup.go
new file mode 100644
index 0000000..ad4d1e8
--- /dev/null
+++ b/aggregator-server/internal/api/handlers/agent_setup.go
@@ -0,0 +1,79 @@
+package handlers
+
+import (
+ "net/http"
+
+ "github.com/Fimeg/RedFlag/aggregator-server/internal/services"
+ "github.com/gin-gonic/gin"
+)
+
+// SetupAgent handles the agent setup endpoint
+func SetupAgent(c *gin.Context) {
+ var req services.AgentSetupRequest
+ if err := c.ShouldBindJSON(&req); err != nil {
+ c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
+ return
+ }
+
+ // Create config builder
+ configBuilder := services.NewConfigBuilder(req.ServerURL)
+
+ // Build agent configuration
+ config, err := configBuilder.BuildAgentConfig(req)
+ if err != nil {
+ c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
+ return
+ }
+
+ // Create response
+ response := gin.H{
+ "agent_id": config.AgentID,
+ "registration_token": config.Secrets["registration_token"],
+ "server_public_key": config.Secrets["server_public_key"],
+ "configuration": config.PublicConfig,
+ "secrets": config.Secrets,
+ "template": config.Template,
+ "setup_time": config.BuildTime,
+ "secrets_created": config.SecretsCreated,
+ "secrets_path": config.SecretsPath,
+ }
+
+ c.JSON(http.StatusOK, response)
+}
+
+// GetTemplates returns available agent templates
+func GetTemplates(c *gin.Context) {
+ configBuilder := services.NewConfigBuilder("")
+ templates := configBuilder.GetTemplates()
+ c.JSON(http.StatusOK, gin.H{"templates": templates})
+}
+
+// ValidateConfiguration validates a configuration before deployment
+func ValidateConfiguration(c *gin.Context) {
+ var config map[string]interface{}
+ if err := c.ShouldBindJSON(&config); err != nil {
+ c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
+ return
+ }
+
+ agentType, exists := config["agent_type"].(string)
+ if !exists {
+ c.JSON(http.StatusBadRequest, gin.H{"error": "agent_type is required"})
+ return
+ }
+
+ configBuilder := services.NewConfigBuilder("")
+ template, exists := configBuilder.GetTemplate(agentType)
+ if !exists {
+ c.JSON(http.StatusBadRequest, gin.H{"error": "Unknown agent type"})
+ return
+ }
+
+ // Simple validation response
+ c.JSON(http.StatusOK, gin.H{
+ "valid": true,
+ "message": "Configuration appears valid",
+ "agent_type": agentType,
+ "template": template.Name,
+ })
+}
\ No newline at end of file
diff --git a/aggregator-server/internal/api/handlers/build_orchestrator.go b/aggregator-server/internal/api/handlers/build_orchestrator.go
new file mode 100644
index 0000000..d48318d
--- /dev/null
+++ b/aggregator-server/internal/api/handlers/build_orchestrator.go
@@ -0,0 +1,229 @@
+package handlers
+
+import (
+ "fmt"
+ "net/http"
+
+ "github.com/Fimeg/RedFlag/aggregator-server/internal/services"
+ "github.com/gin-gonic/gin"
+)
+
+// NewAgentBuild handles new agent installation requests
+func NewAgentBuild(c *gin.Context) {
+ var req services.NewBuildRequest
+ if err := c.ShouldBindJSON(&req); err != nil {
+ c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
+ return
+ }
+
+ // Validate registration token
+ if req.RegistrationToken == "" {
+ c.JSON(http.StatusBadRequest, gin.H{"error": "registration token is required for new installations"})
+ return
+ }
+
+ // Convert to setup request format
+ setupReq := services.AgentSetupRequest{
+ ServerURL: req.ServerURL,
+ Environment: req.Environment,
+ AgentType: req.AgentType,
+ Organization: req.Organization,
+ CustomSettings: req.CustomSettings,
+ DeploymentID: req.DeploymentID,
+ }
+
+ // Create config builder
+ configBuilder := services.NewConfigBuilder(req.ServerURL)
+
+ // Build agent configuration
+ config, err := configBuilder.BuildAgentConfig(setupReq)
+ if err != nil {
+ c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
+ return
+ }
+
+ // Override generated agent ID if provided (for upgrades)
+ if req.AgentID != "" {
+ config.AgentID = req.AgentID
+ // Update public config with existing agent ID
+ if config.PublicConfig == nil {
+ config.PublicConfig = make(map[string]interface{})
+ }
+ config.PublicConfig["agent_id"] = req.AgentID
+ }
+
+ // Create agent builder
+ agentBuilder := services.NewAgentBuilder()
+
+ // Generate build artifacts
+ buildResult, err := agentBuilder.BuildAgentWithConfig(config)
+ if err != nil {
+ c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
+ return
+ }
+
+ // Construct download URL
+ binaryURL := fmt.Sprintf("%s/api/v1/downloads/%s", req.ServerURL, config.Platform)
+
+ // Create response with native binary instructions
+ response := gin.H{
+ "agent_id": config.AgentID,
+ "binary_url": binaryURL,
+ "platform": config.Platform,
+ "config_version": config.ConfigVersion,
+ "agent_version": config.AgentVersion,
+ "build_time": buildResult.BuildTime,
+ "install_type": "new",
+ "consumes_seat": true,
+ "next_steps": []string{
+ "1. Download native binary: curl -sL " + binaryURL + " -o /usr/local/bin/redflag-agent",
+ "2. Set permissions: chmod 755 /usr/local/bin/redflag-agent",
+ "3. Create config directory: mkdir -p /etc/redflag",
+ "4. Save configuration (provided in this response) to /etc/redflag/config.json",
+ "5. Set config permissions: chmod 600 /etc/redflag/config.json",
+ "6. Start service: systemctl enable --now redflag-agent",
+ },
+ "configuration": config.PublicConfig,
+ }
+
+ c.JSON(http.StatusOK, response)
+}
+
+// UpgradeAgentBuild handles agent upgrade requests
+func UpgradeAgentBuild(c *gin.Context) {
+ agentID := c.Param("agentID")
+ if agentID == "" {
+ c.JSON(http.StatusBadRequest, gin.H{"error": "agent ID is required"})
+ return
+ }
+
+ var req services.UpgradeBuildRequest
+ if err := c.ShouldBindJSON(&req); err != nil {
+ c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
+ return
+ }
+
+ // Validate required fields
+ if req.ServerURL == "" {
+ c.JSON(http.StatusBadRequest, gin.H{"error": "server URL is required"})
+ return
+ }
+
+ // Convert to setup request format
+ setupReq := services.AgentSetupRequest{
+ ServerURL: req.ServerURL,
+ Environment: req.Environment,
+ AgentType: req.AgentType,
+ Organization: req.Organization,
+ CustomSettings: req.CustomSettings,
+ DeploymentID: req.DeploymentID,
+ }
+
+ // Create config builder
+ configBuilder := services.NewConfigBuilder(req.ServerURL)
+
+ // Build agent configuration
+ config, err := configBuilder.BuildAgentConfig(setupReq)
+ if err != nil {
+ c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
+ return
+ }
+
+ // Override with existing agent ID (this is the key for upgrades)
+ config.AgentID = agentID
+ if config.PublicConfig == nil {
+ config.PublicConfig = make(map[string]interface{})
+ }
+ config.PublicConfig["agent_id"] = agentID
+
+ // For upgrades, we might want to preserve certain existing settings
+ if req.PreserveExisting {
+ // TODO: Load existing agent config and merge/override as needed
+ // This would involve reading the existing agent's configuration
+ // and selectively preserving certain fields
+ }
+
+ // Create agent builder
+ agentBuilder := services.NewAgentBuilder()
+
+ // Generate build artifacts
+ buildResult, err := agentBuilder.BuildAgentWithConfig(config)
+ if err != nil {
+ c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
+ return
+ }
+
+ // Construct download URL
+ binaryURL := fmt.Sprintf("%s/api/v1/downloads/%s?version=%s", req.ServerURL, config.Platform, config.AgentVersion)
+
+ // Create response with native binary upgrade instructions
+ response := gin.H{
+ "agent_id": config.AgentID,
+ "binary_url": binaryURL,
+ "platform": config.Platform,
+ "config_version": config.ConfigVersion,
+ "agent_version": config.AgentVersion,
+ "build_time": buildResult.BuildTime,
+ "install_type": "upgrade",
+ "consumes_seat": false,
+ "preserves_agent_id": true,
+ "next_steps": []string{
+ "1. Stop agent service: systemctl stop redflag-agent",
+ "2. Download updated binary: curl -sL " + binaryURL + " -o /usr/local/bin/redflag-agent",
+ "3. Set permissions: chmod 755 /usr/local/bin/redflag-agent",
+ "4. Update config (provided in this response) to /etc/redflag/config.json if needed",
+ "5. Start service: systemctl start redflag-agent",
+ "6. Verify: systemctl status redflag-agent",
+ },
+ "configuration": config.PublicConfig,
+ "upgrade_notes": []string{
+ "This upgrade preserves the existing agent ID: " + agentID,
+ "No additional seat will be consumed",
+ "Config version: " + config.ConfigVersion,
+ "Agent binary version: " + config.AgentVersion,
+ "Agent will receive latest security enhancements and bug fixes",
+ },
+ }
+
+ c.JSON(http.StatusOK, response)
+}
+
+// DetectAgentInstallation detects existing agent installations
+func DetectAgentInstallation(c *gin.Context) {
+ // This endpoint helps the installer determine what type of installation to perform
+ var req struct {
+ AgentID string `json:"agent_id"`
+ }
+ if err := c.ShouldBindJSON(&req); err != nil {
+ c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
+ return
+ }
+
+ // Create detector service
+ detector := services.NewInstallationDetector()
+
+ // Detect existing installation
+ detection, err := detector.DetectExistingInstallation(req.AgentID)
+ if err != nil {
+ c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
+ return
+ }
+
+ response := gin.H{
+ "detection_result": detection,
+ "recommended_action": func() string {
+ if detection.HasExistingAgent {
+ return "upgrade"
+ }
+ return "new_installation"
+ }(),
+ "installation_type": func() string {
+ if detection.HasExistingAgent {
+ return "upgrade"
+ }
+ return "new"
+ }(),
+ }
+
+ c.JSON(http.StatusOK, response)
+}
\ No newline at end of file
diff --git a/aggregator-server/internal/api/handlers/downloads.go b/aggregator-server/internal/api/handlers/downloads.go
index 03f638e..94db1d4 100644
--- a/aggregator-server/internal/api/handlers/downloads.go
+++ b/aggregator-server/internal/api/handlers/downloads.go
@@ -31,28 +31,24 @@ func (h *DownloadHandler) getServerURL(c *gin.Context) string {
return h.config.Server.PublicURL
}
- // Priority 2: Detect from request with TLS/proxy awareness
+ // Priority 2: Construct API server URL from configuration
scheme := "http"
+ host := h.config.Server.Host
+ port := h.config.Server.Port
- // Check if TLS is enabled in config
+ // Use HTTPS if TLS is enabled in config
if h.config.Server.TLS.Enabled {
scheme = "https"
}
- // Check if request came through HTTPS (direct or via proxy)
- if c.Request.TLS != nil {
- scheme = "https"
+ // For default host (0.0.0.0), use localhost for client connections
+ if host == "0.0.0.0" {
+ host = "localhost"
}
- // Check X-Forwarded-Proto for reverse proxy setups
- if forwardedProto := c.GetHeader("X-Forwarded-Proto"); forwardedProto == "https" {
- scheme = "https"
- }
-
- // Use the Host header exactly as received (includes port if present)
- host := c.GetHeader("X-Forwarded-Host")
- if host == "" {
- host = c.Request.Host
+ // Only include port if it's not the default for the protocol
+ if (scheme == "http" && port != 80) || (scheme == "https" && port != 443) {
+ return fmt.Sprintf("%s://%s:%d", scheme, host, port)
}
return fmt.Sprintf("%s://%s", scheme, host)
@@ -61,8 +57,9 @@ func (h *DownloadHandler) getServerURL(c *gin.Context) string {
// DownloadAgent serves agent binaries for different platforms
func (h *DownloadHandler) DownloadAgent(c *gin.Context) {
platform := c.Param("platform")
+ version := c.Query("version") // Optional version parameter for signed binaries
- // Validate platform to prevent directory traversal (removed darwin - no macOS support)
+ // Validate platform to prevent directory traversal
validPlatforms := map[string]bool{
"linux-amd64": true,
"linux-arm64": true,
@@ -81,12 +78,29 @@ func (h *DownloadHandler) DownloadAgent(c *gin.Context) {
filename += ".exe"
}
- // Serve from platform-specific directory: binaries/{platform}/redflag-agent
- agentPath := filepath.Join(h.agentDir, "binaries", platform, filename)
+ var agentPath string
+
+ // Try to serve signed package first if version is specified
+ // TODO: Implement database lookup for signed packages
+ // if version != "" {
+ // signedPackage, err := h.packageQueries.GetSignedPackage(version, platform)
+ // if err == nil && fileExists(signedPackage.BinaryPath) {
+ // agentPath = signedPackage.BinaryPath
+ // }
+ // }
+
+ // Fallback to unsigned generic binary
+ if agentPath == "" {
+ agentPath = filepath.Join(h.agentDir, "binaries", platform, filename)
+ }
// Check if file exists
if _, err := os.Stat(agentPath); os.IsNotExist(err) {
- c.JSON(http.StatusNotFound, gin.H{"error": "Agent binary not found"})
+ c.JSON(http.StatusNotFound, gin.H{
+ "error": "Agent binary not found",
+ "platform": platform,
+ "version": version,
+ })
return
}
@@ -112,9 +126,9 @@ func (h *DownloadHandler) DownloadUpdatePackage(c *gin.Context) {
// TODO: Implement actual package serving from database/filesystem
// For now, return a placeholder response
c.JSON(http.StatusNotImplemented, gin.H{
- "error": "Update package download not yet implemented",
- "package_id": packageID,
- "message": "This will serve the signed update package file",
+ "error": "Update package download not yet implemented",
+ "package_id": packageID,
+ "message": "This will serve the signed update package file",
})
}
@@ -122,7 +136,7 @@ func (h *DownloadHandler) DownloadUpdatePackage(c *gin.Context) {
func (h *DownloadHandler) InstallScript(c *gin.Context) {
platform := c.Param("platform")
- // Validate platform (removed darwin - no macOS support)
+ // Validate platform
validPlatforms := map[string]bool{
"linux": true,
"windows": true,
@@ -142,29 +156,59 @@ func (h *DownloadHandler) InstallScript(c *gin.Context) {
func (h *DownloadHandler) generateInstallScript(platform, baseURL string) string {
switch platform {
case "linux":
- return `#!/bin/bash
+ return h.generateLinuxScript(baseURL)
+ case "windows":
+ return h.generateWindowsScript(baseURL)
+ default:
+ return "# Unsupported platform: " + platform
+ }
+}
+
+func (h *DownloadHandler) generateLinuxScript(baseURL string) string {
+ return fmt.Sprintf(`#!/bin/bash
set -e
-# RedFlag Agent Installation Script
-# This script installs the RedFlag agent as a systemd service with proper security hardening
+# RedFlag Agent Smart Installer
+# Uses the sophisticated build orchestrator and migration system
-REDFLAG_SERVER="` + baseURL + `"
+REDFLAG_SERVER="%s"
AGENT_USER="redflag-agent"
AGENT_HOME="/var/lib/redflag-agent"
AGENT_BINARY="/usr/local/bin/redflag-agent"
SUDOERS_FILE="/etc/sudoers.d/redflag-agent"
SERVICE_FILE="/etc/systemd/system/redflag-agent.service"
-CONFIG_DIR="/etc/aggregator"
+CONFIG_DIR="/etc/redflag"
+STATE_DIR="/var/lib/redflag"
+OLD_CONFIG_DIR="/etc/aggregator"
+OLD_STATE_DIR="/var/lib/aggregator"
-echo "=== RedFlag Agent Installation ==="
+# Colors for output
+RED='\033[0;31m'
+GREEN='\033[0;32m'
+YELLOW='\033[1;33m'
+BLUE='\033[0;34m'
+NC='\033[0m' # No Color
+
+echo -e "${BLUE}=== RedFlag Agent Smart Installer ===${NC}"
echo ""
# Check if running as root
if [ "$EUID" -ne 0 ]; then
- echo "ERROR: This script must be run as root (use sudo)"
+ echo -e "${RED}ERROR: This script must be run as root (use sudo)${NC}"
exit 1
fi
+# Get registration token from first argument
+REGISTRATION_TOKEN="$1"
+if [ -z "$REGISTRATION_TOKEN" ]; then
+ echo -e "${RED}ERROR: Registration token is required${NC}"
+ echo -e "${YELLOW}Usage: curl -sL ${REDFLAG_SERVER}/api/v1/install/linux | sudo bash -s -- YOUR_REGISTRATION_TOKEN${NC}"
+ exit 1
+fi
+
+echo -e "${BLUE}Registration token: ${GREEN}${REGISTRATION_TOKEN:0:8}...${NC}"
+echo ""
+
# Detect architecture
ARCH=$(uname -m)
case "$ARCH" in
@@ -175,101 +219,480 @@ case "$ARCH" in
DOWNLOAD_ARCH="arm64"
;;
*)
- echo "ERROR: Unsupported architecture: $ARCH"
- echo "Supported: x86_64 (amd64), aarch64 (arm64)"
+ echo -e "${RED}ERROR: Unsupported architecture: $ARCH${NC}"
+ echo -e "${YELLOW}Supported: x86_64 (amd64), aarch64 (arm64)${NC}"
exit 1
;;
esac
-echo "Detected architecture: $ARCH (using linux-$DOWNLOAD_ARCH)"
+echo -e "${BLUE}Detected architecture: $ARCH (using linux-$DOWNLOAD_ARCH)${NC}"
echo ""
-# Step 1: Create system user
-echo "Step 1: Creating system user..."
-if id "$AGENT_USER" &>/dev/null; then
- echo "โ User $AGENT_USER already exists"
-else
- useradd -r -s /bin/false -d "$AGENT_HOME" -m "$AGENT_USER"
- echo "โ User $AGENT_USER created"
-fi
+# Function to detect existing installation using our sophisticated system
+detect_existing_agent() {
+ echo -e "${YELLOW}Detecting existing RedFlag agent installation...${NC}"
-# Create home directory if it doesn't exist
-if [ ! -d "$AGENT_HOME" ]; then
- mkdir -p "$AGENT_HOME"
- chown "$AGENT_USER:$AGENT_USER" "$AGENT_HOME"
- echo "โ Home directory created"
-fi
+ # DEBUGGING: Start comprehensive debugging trace
+ echo "=== DEBUGGING: detect_existing_agent() ==="
+ echo "DEBUG: Starting detection process..."
-# Stop existing service if running (to allow binary update)
-if systemctl is-active --quiet redflag-agent 2>/dev/null; then
- echo ""
- echo "Existing service detected - stopping to allow update..."
- systemctl stop redflag-agent
- sleep 2
- echo "โ Service stopped"
-fi
+ # Check for config files in both new and old locations
+ echo "DEBUG: Checking for config files in all locations..."
-# Step 2: Download agent binary
-echo ""
-echo "Step 2: Downloading agent binary..."
-echo "Downloading from ${REDFLAG_SERVER}/api/v1/downloads/linux-${DOWNLOAD_ARCH}..."
+ # Check new location first
+ echo "DEBUG: Checking new config file: /etc/redflag/config.json"
+ if [ -f "/etc/redflag/config.json" ]; then
+ echo "DEBUG: New config file exists!"
+ CONFIG_FILE="/etc/redflag/config.json"
+ CONFIG_LOCATION="new"
+ else
+ echo "DEBUG: New config file does not exist, checking legacy location..."
-# Download to temporary file first (to avoid root permission issues)
-TEMP_FILE="/tmp/redflag-agent-${DOWNLOAD_ARCH}"
-echo "Downloading to temporary file: $TEMP_FILE"
-
-# Try curl first (most reliable)
-if curl -sL "${REDFLAG_SERVER}/api/v1/downloads/linux-${DOWNLOAD_ARCH}" -o "$TEMP_FILE"; then
- echo "โ Download successful, moving to final location"
- mv "$TEMP_FILE" "${AGENT_BINARY}"
- chmod 755 "${AGENT_BINARY}"
- chown root:root "${AGENT_BINARY}"
- echo "โ Agent binary downloaded and installed"
-else
- echo "โ Download with curl failed"
- # Fallback to wget if available
- if command -v wget >/dev/null 2>&1; then
- echo "Trying wget fallback..."
- if wget -q "${REDFLAG_SERVER}/api/v1/downloads/linux-${DOWNLOAD_ARCH}" -O "$TEMP_FILE"; then
- echo "โ Download successful with wget, moving to final location"
- mv "$TEMP_FILE" "${AGENT_BINARY}"
- chmod 755 "${AGENT_BINARY}"
- chown root:root "${AGENT_BINARY}"
- echo "โ Agent binary downloaded and installed (using wget fallback)"
+ # Check old location
+ if [ -f "/etc/aggregator/config.json" ]; then
+ echo "DEBUG: Found legacy config file: /etc/aggregator/config.json"
+ CONFIG_FILE="/etc/aggregator/config.json"
+ CONFIG_LOCATION="old"
else
- echo "ERROR: Failed to download agent binary"
- echo "Both curl and wget failed"
- echo "Please ensure ${REDFLAG_SERVER} is accessible"
- # Clean up temp file if it exists
- rm -f "$TEMP_FILE"
+ echo "DEBUG: No config file found in either location"
+ CONFIG_FILE=""
+ CONFIG_LOCATION="none"
+ fi
+ fi
+
+ # If we found a config file, try to extract agent_id (using single reliable method)
+ if [ -n "$CONFIG_FILE" ]; then
+ echo "DEBUG: Processing config file: $CONFIG_FILE (location: $CONFIG_LOCATION)"
+
+ # Check file permissions
+ echo "DEBUG: File permissions:"
+ ls -la "$CONFIG_FILE"
+
+ # Check file ownership
+ echo "DEBUG: File ownership:"
+ stat -c "%U:%G" "$CONFIG_FILE"
+
+ # Try reading file content
+ echo "DEBUG: Attempting to read file content..."
+ echo "DEBUG: Method 1 - Direct cat:"
+ if sudo cat "$CONFIG_FILE" 2>/dev/null; then
+ echo "DEBUG: Direct cat successful"
+ else
+ echo "DEBUG: Direct cat failed"
+ fi
+
+ # Extract agent_id using single reliable method
+ echo "DEBUG: Extracting agent_id with grep:"
+ agent_id=$(grep -o '"agent_id": *"[^"]*"' "$CONFIG_FILE" 2>/dev/null | cut -d'"' -f4)
+ echo "DEBUG: Extracted agent_id: '$agent_id'"
+
+ # Check if agent_id looks valid (UUID format)
+ if [ -n "$agent_id" ]; then
+ if echo "$agent_id" | grep -qE '^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$'; then
+ echo "DEBUG: Agent ID appears to be valid UUID format"
+ else
+ echo "DEBUG: Agent ID does not appear to be valid UUID format"
+ fi
+ else
+ echo "DEBUG: Agent ID is empty or null"
+ fi
+
+ # Note if migration is needed
+ if [ "$CONFIG_LOCATION" = "old" ]; then
+ echo "DEBUG: *** MIGRATION REQUIRED - Config found in legacy location ***"
+ fi
+ else
+ echo "DEBUG: No config files found, checking directories..."
+
+ # Check if directories exist for debugging
+ for dir_path in "/etc/redflag" "/etc/aggregator" "/var/lib/redflag" "/var/lib/aggregator"; do
+ if [ -d "$dir_path" ]; then
+ echo "DEBUG: Found directory: $dir_path"
+ echo "DEBUG: Directory contents:"
+ ls -la "$dir_path/" 2>/dev/null || echo "DEBUG: Cannot list contents (permissions?)"
+ else
+ echo "DEBUG: Directory does not exist: $dir_path"
+ fi
+ done
+ fi
+
+ # Check if systemd service exists
+ echo "DEBUG: Checking systemd service..."
+ if systemctl list-unit-files | grep -q "redflag-agent.service"; then
+ echo "DEBUG: Systemd service file exists"
+
+ # Check service status
+ echo "DEBUG: Service status:"
+ systemctl status redflag-agent --no-pager -l || echo "DEBUG: Could not get service status"
+
+ # Check if service is enabled
+ if systemctl is-enabled --quiet redflag-agent 2>/dev/null; then
+ echo "DEBUG: Service is enabled"
+ else
+ echo "DEBUG: Service is not enabled"
+ fi
+
+ # Check if service is active
+ if systemctl is-active --quiet redflag-agent 2>/dev/null; then
+ echo "DEBUG: Service is active"
+ else
+ echo "DEBUG: Service is not active"
+ fi
+ else
+ echo "DEBUG: Systemd service file does not exist"
+ fi
+
+ # Check if binary exists
+ echo "DEBUG: Checking for agent binary..."
+ for binary_path in "/usr/local/bin/redflag-agent" "/usr/bin/redflag-agent" "/opt/redflag-agent/bin/redflag-agent"; do
+ if [ -f "$binary_path" ]; then
+ echo "DEBUG: Found agent binary at: $binary_path"
+ echo "DEBUG: Binary permissions:"
+ ls -la "$binary_path"
+ break
+ fi
+ done
+
+ # Test server connectivity
+ echo "DEBUG: Testing server connectivity..."
+ echo "DEBUG: Server URL: ${REDFLAG_SERVER}"
+
+ # Test basic connectivity
+ echo "DEBUG: Testing basic HTTP connectivity..."
+ if curl -s --connect-timeout 5 "${REDFLAG_SERVER}/api/v1/health" >/dev/null 2>&1; then
+ echo "DEBUG: Server connectivity test successful"
+ else
+ echo "DEBUG: Server connectivity test failed"
+ echo "DEBUG: curl exit code: $?"
+ fi
+
+ # Call detection API with debugging
+ echo "DEBUG: Calling detection API..."
+ echo "DEBUG: URL: ${REDFLAG_SERVER}/api/v1/build/detect"
+ echo "DEBUG: Payload: {\"agent_id\": \"${agent_id}\"}"
+
+ DETECTION_RESPONSE=$(curl -s -X POST "${REDFLAG_SERVER}/api/v1/build/detect" \
+ -H "Content-Type: application/json" \
+ -d '{"agent_id": "'"$agent_id"'"}' 2>/dev/null)
+
+ echo "DEBUG: curl exit code: $?"
+ echo "DEBUG: Detection response: '$DETECTION_RESPONSE'"
+
+ if [ $? -eq 0 ] && [ -n "$DETECTION_RESPONSE" ]; then
+ echo "DEBUG: Successfully received detection response"
+
+ # Parse JSON response with debugging
+ echo "DEBUG: Parsing detection response..."
+
+ HAS_AGENT=$(echo "$DETECTION_RESPONSE" | grep -o '"has_existing_agent":[^,]*' | cut -d':' -f2 | tr -d ' ')
+ echo "DEBUG: Extracted has_existing_agent: '$HAS_AGENT'"
+
+ AGENT_ID=$(echo "$DETECTION_RESPONSE" | grep -o '"agent_id":"[^"]*"' | cut -d'"' -f4)
+ echo "DEBUG: Extracted agent_id from response: '$AGENT_ID'"
+
+ REQUIRES_MIGRATION=$(echo "$DETECTION_RESPONSE" | grep -o '"requires_migration":[^,]*' | cut -d':' -f2 | tr -d ' ')
+ echo "DEBUG: Extracted requires_migration: '$REQUIRES_MIGRATION'"
+
+ CURRENT_VERSION=$(echo "$DETECTION_RESPONSE" | grep -o '"current_version":"[^"]*"' | cut -d'"' -f4)
+ echo "DEBUG: Extracted current_version: '$CURRENT_VERSION'"
+
+ # Check conditions for successful detection
+ if [ "$HAS_AGENT" = "true" ] && [ -n "$AGENT_ID" ]; then
+ echo "DEBUG: Detection SUCCESS - existing agent found"
+ echo -e "${GREEN}โ Existing agent detected: ${AGENT_ID}${NC}"
+ echo -e "${BLUE} Current version: ${CURRENT_VERSION}${NC}"
+ if [ "$REQUIRES_MIGRATION" = "true" ]; then
+ echo -e "${YELLOW}โ Migration will be performed during upgrade${NC}"
+ fi
+ echo "=== END DEBUGGING: detect_existing_agent() ==="
+ return 0 # Upgrade path
+ else
+ echo "DEBUG: Detection indicates no existing agent"
+ echo "DEBUG: has_existing_agent: '$HAS_AGENT'"
+ echo "DEBUG: agent_id from response: '$AGENT_ID'"
+ fi
+ else
+ echo "DEBUG: Detection API call failed or returned empty response"
+ echo "DEBUG: curl exit code: $?"
+ echo "DEBUG: response length: ${#DETECTION_RESPONSE}"
+ fi
+
+ echo "DEBUG: Returning new installation path"
+ echo -e "${GREEN}โ No existing agent detected - performing new installation${NC}"
+ echo "=== END DEBUGGING: detect_existing_agent() ==="
+ return 1 # New installation path
+}
+
+# Function to perform migration from old paths
+perform_migration() {
+ echo ""
+ echo -e "${BLUE}=== Migration Required ===${NC}"
+
+ # Create backup directories with timestamp
+ BACKUP_TIMESTAMP=$(date +%%Y%%m%%d_%%H%%M%%S)
+ OLD_CONFIG_BACKUP="${OLD_CONFIG_DIR}.backup.${BACKUP_TIMESTAMP}"
+ OLD_STATE_BACKUP="${OLD_STATE_DIR}.backup.${BACKUP_TIMESTAMP}"
+
+ # Backup old directories if they exist
+ if [ -d "$OLD_CONFIG_DIR" ]; then
+ echo -e "${YELLOW}Backing up old configuration: ${OLD_CONFIG_DIR} -> ${OLD_CONFIG_BACKUP}${NC}"
+ mv "$OLD_CONFIG_DIR" "$OLD_CONFIG_BACKUP"
+ fi
+
+ if [ -d "$OLD_STATE_DIR" ]; then
+ echo -e "${YELLOW}Backing up old state: ${OLD_STATE_DIR} -> ${OLD_STATE_BACKUP}${NC}"
+ mv "$OLD_STATE_DIR" "$OLD_STATE_BACKUP"
+ fi
+
+ # Migrate configuration data if backup exists
+ if [ -d "$OLD_CONFIG_BACKUP" ]; then
+ echo -e "${YELLOW}Migrating configuration data to new location...${NC}"
+ mkdir -p "$CONFIG_DIR"
+
+ # Copy config files, preserving permissions when possible
+ cp -r "$OLD_CONFIG_BACKUP"/* "$CONFIG_DIR/" 2>/dev/null || true
+
+ # Set proper ownership for new location
+ chown -R "$AGENT_USER:$AGENT_USER" "$CONFIG_DIR" 2>/dev/null || true
+ chmod 755 "$CONFIG_DIR" 2>/dev/null || true
+
+ # Ensure config file has correct permissions
+ if [ -f "$CONFIG_DIR/config.json" ]; then
+ chmod 600 "$CONFIG_DIR/config.json" 2>/dev/null || true
+ chown "$AGENT_USER:$AGENT_USER" "$CONFIG_DIR/config.json" 2>/dev/null || true
+ fi
+ fi
+
+ # Migrate state data if backup exists
+ if [ -d "$OLD_STATE_BACKUP" ]; then
+ echo -e "${YELLOW}Migrating state data to new location...${NC}"
+ mkdir -p "$STATE_DIR"
+ cp -r "$OLD_STATE_BACKUP"/* "$STATE_DIR/" 2>/dev/null || true
+ chown -R "$AGENT_USER:$AGENT_USER" "$STATE_DIR" 2>/dev/null || true
+ fi
+
+ # Migrate secrets to Docker secrets if available
+ migrate_secrets_to_docker
+
+ echo -e "${GREEN}โ Migration completed${NC}"
+}
+
+# Function to migrate secrets from filesystem to Docker secrets
+migrate_secrets_to_docker() {
+ echo -e "${YELLOW}Checking for secrets migration...${NC}"
+
+ # Look for potential secret files in old locations
+ local secrets_found=false
+
+ # Check for common secret file patterns
+ for secret_pattern in "agent.key" "private_key" "secrets.json" ".env" "credentials.json"; do
+ if [ -f "$OLD_CONFIG_BACKUP/$secret_pattern" ] || [ -f "$OLD_STATE_BACKUP/$secret_pattern" ]; then
+ echo -e "${YELLOW}Found potential secret file: $secret_pattern${NC}"
+ secrets_found=true
+ fi
+ done
+
+ # Check for agent private keys or certificates
+ for key_path in "$OLD_CONFIG_BACKUP" "$OLD_STATE_BACKUP"; do
+ if [ -d "$key_path" ]; then
+ # Look for key files
+ find "$key_path" -type f \( -name "*.key" -o -name "*.pem" -o -name "*.crt" -o -name "id_*" \) 2>/dev/null | while read -r key_file; do
+ echo -e "${YELLOW}Found key file: $(basename "$key_file")${NC}"
+ secrets_found=true
+ done
+ fi
+ done
+
+ if [ "$secrets_found" = true ]; then
+ echo -e "${BLUE}Secrets migration available${NC}"
+ echo -e "${YELLOW}Note: Secrets will be migrated to Docker secrets when the agent starts${NC}"
+ echo -e "${YELLOW}The agent will automatically detect and migrate filesystem secrets to Docker storage${NC}"
+
+ # Create a migration marker for the agent to find
+ mkdir -p "$CONFIG_DIR"
+ echo '{"secrets_migration_required": true, "migration_timestamp": "'$(date -Iseconds)'"}' > "$CONFIG_DIR/secrets_migration.json"
+ chown "$AGENT_USER:$AGENT_USER" "$CONFIG_DIR/secrets_migration.json" 2>/dev/null || true
+ chmod 600 "$CONFIG_DIR/secrets_migration.json" 2>/dev/null || true
+ else
+ echo -e "${GREEN}No secrets requiring migration found${NC}"
+ fi
+}
+
+# Function to perform new installation using build orchestrator
+perform_new_installation() {
+ echo ""
+ echo -e "${BLUE}=== New Agent Installation ===${NC}"
+
+ # Call build/new endpoint to get proper configuration and upgrade logic
+ echo -e "${YELLOW}Requesting agent build configuration...${NC}"
+ BUILD_RESPONSE=$(curl -s -X POST "${REDFLAG_SERVER}/api/v1/build/new" \
+ -H "Content-Type: application/json" \
+ -d '{
+ "server_url": "'"${REDFLAG_SERVER}"'",
+ "environment": "production",
+ "agent_type": "linux-server",
+ "organization": "default",
+ "registration_token": "'"${REGISTRATION_TOKEN}"'"
+ }' 2>/dev/null)
+
+ if [ $? -ne 0 ] || [ -z "$BUILD_RESPONSE" ]; then
+ echo -e "${RED}โ Failed to request agent build configuration${NC}"
+ exit 1
+ fi
+
+ # Extract agent ID from build response
+ AGENT_ID=$(echo "$BUILD_RESPONSE" | grep -o '"agent_id":"[^"]*"' | cut -d'"' -f4)
+
+ if [ -z "$AGENT_ID" ]; then
+ echo -e "${RED}โ Invalid response from server${NC}"
+ exit 1
+ fi
+
+ echo -e "${GREEN}โ Agent configuration created: ${AGENT_ID}${NC}"
+
+ # Download native agent binary
+ echo -e "${YELLOW}Downloading native signed agent binary...${NC}"
+ if curl -sL "${REDFLAG_SERVER}/api/v1/downloads/linux-${DOWNLOAD_ARCH}" -o "$AGENT_BINARY"; then
+ chmod 755 "$AGENT_BINARY"
+ chown root:root "$AGENT_BINARY"
+ echo -e "${GREEN}โ Native signed agent binary installed${NC}"
+ else
+ echo -e "${RED}โ Failed to download agent binary${NC}"
+ exit 1
+ fi
+
+ deploy_agent "$AGENT_ID" "$BUILD_RESPONSE" "new"
+}
+
+# Function to perform upgrade using build orchestrator
+perform_upgrade() {
+ echo ""
+ echo -e "${BLUE}=== Agent Upgrade ===${NC}"
+
+ # Extract agent ID from detection
+ AGENT_ID=$(echo "$DETECTION_RESPONSE" | grep -o '"agent_id":"[^"]*"' | cut -d'"' -f4)
+
+ if [ -z "$AGENT_ID" ]; then
+ echo -e "${RED}โ Could not extract agent ID for upgrade${NC}"
+ exit 1
+ fi
+
+ echo -e "${YELLOW}Requesting upgrade configuration for agent: ${AGENT_ID}${NC}"
+
+ # Call build/upgrade endpoint to get upgrade configuration
+ BUILD_RESPONSE=$(curl -s -X POST "${REDFLAG_SERVER}/api/v1/build/upgrade/${AGENT_ID}" \
+ -H "Content-Type: application/json" \
+ -d '{
+ "server_url": "'"${REDFLAG_SERVER}"'",
+ "environment": "production",
+ "agent_type": "linux-server",
+ "preserve_existing": true
+ }' 2>/dev/null)
+
+ if [ $? -ne 0 ] || [ -z "$BUILD_RESPONSE" ]; then
+ echo -e "${RED}โ Failed to request agent upgrade configuration${NC}"
+ exit 1
+ fi
+
+ echo -e "${GREEN}โ Upgrade configuration prepared for agent: ${AGENT_ID}${NC}"
+
+ # STOP SERVICE BEFORE DOWNLOADING BINARY
+ echo -e "${YELLOW}Stopping agent service to allow binary replacement...${NC}"
+ if systemctl is-active --quiet redflag-agent 2>/dev/null; then
+ systemctl stop redflag-agent
+ # Wait for service to fully stop
+ local retry_count=0
+ while [ $retry_count -lt 10 ]; do
+ if ! systemctl is-active --quiet redflag-agent 2>/dev/null; then
+ echo -e "${GREEN}โ Service stopped successfully${NC}"
+ break
+ fi
+ echo -e "${YELLOW}Waiting for service to stop... (attempt $((retry_count + 1))/10)${NC}"
+ sleep 1
+ retry_count=$((retry_count + 1))
+ done
+
+ if systemctl is-active --quiet redflag-agent 2>/dev/null; then
+ echo -e "${RED}โ Failed to stop service, forcing...${NC}"
+ systemctl kill redflag-agent
+ sleep 2
+ fi
+ else
+ echo -e "${BLUE}โ Service is already stopped${NC}"
+ fi
+
+ # Download updated native agent binary to temporary location first
+ echo -e "${YELLOW}Downloading updated native signed agent binary...${NC}"
+ TEMP_BINARY="${AGENT_BINARY}.new"
+
+ # Remove any existing temp binary
+ rm -f "$TEMP_BINARY"
+
+ if curl -sL "${REDFLAG_SERVER}/api/v1/downloads/linux-${DOWNLOAD_ARCH}" -o "$TEMP_BINARY"; then
+ # Verify the download
+ if [ -f "$TEMP_BINARY" ] && [ -s "$TEMP_BINARY" ]; then
+ chmod 755 "$TEMP_BINARY"
+ chown root:root "$TEMP_BINARY"
+
+ # Atomic move to replace binary
+ mv "$TEMP_BINARY" "$AGENT_BINARY"
+
+ # Verify the replacement
+ if [ -f "$AGENT_BINARY" ] && [ -s "$AGENT_BINARY" ]; then
+ echo -e "${GREEN}โ Native signed agent binary updated successfully${NC}"
+ else
+ echo -e "${RED}โ Binary replacement verification failed${NC}"
+ exit 1
+ fi
+ else
+ echo -e "${RED}โ Downloaded binary is empty or missing${NC}"
+ rm -f "$TEMP_BINARY"
exit 1
fi
else
- echo "ERROR: Failed to download agent binary"
- echo "curl failed and wget is not available"
- echo "Please ensure ${REDFLAG_SERVER} is accessible"
- # Clean up temp file if it exists
- rm -f "$TEMP_FILE"
+ echo -e "${RED}โ Failed to download agent binary${NC}"
+ rm -f "$TEMP_BINARY"
exit 1
fi
-fi
-# Clean up temp file if it still exists
-rm -f "$TEMP_FILE"
+ deploy_agent "$AGENT_ID" "$BUILD_RESPONSE" "upgrade"
+}
-# Set SELinux context for binary if SELinux is enabled
-if command -v getenforce >/dev/null 2>&1 && [ "$(getenforce)" != "Disabled" ]; then
- echo "SELinux detected, setting file context for binary..."
- restorecon -v "${AGENT_BINARY}" 2>/dev/null || true
- echo "โ SELinux context set for binary"
-fi
+# Function to deploy native agent with systemd
+deploy_agent() {
+ local AGENT_ID="$1"
+ local BUILD_RESPONSE="$2"
+ local INSTALL_TYPE="$3"
-# Step 3: Install sudoers configuration
-echo ""
-echo "Step 3: Installing sudoers configuration..."
-cat > "$SUDOERS_FILE" <<'SUDOERS_EOF'
+ echo ""
+ echo -e "${BLUE}=== Agent Deployment ===${NC}"
+
+ # Create agent user if it doesn't exist
+ if ! id "$AGENT_USER" &>/dev/null; then
+ echo -e "${YELLOW}Creating agent user: $AGENT_USER${NC}"
+ useradd -r -s /bin/false -d "$AGENT_HOME" -m "$AGENT_USER"
+ fi
+
+ # Note: Service is already stopped for upgrades, but check for new installations
+ if [ "$INSTALL_TYPE" = "new" ] && systemctl is-active --quiet redflag-agent 2>/dev/null; then
+ echo -e "${YELLOW}Stopping existing agent service...${NC}"
+ systemctl stop redflag-agent
+ sleep 2
+ fi
+
+ # Save build response for potential recovery and debugging
+ echo "$BUILD_RESPONSE" > "${CONFIG_DIR}/build_response.json"
+ chown "$AGENT_USER:$AGENT_USER" "${CONFIG_DIR}/build_response.json"
+ chmod 600 "${CONFIG_DIR}/build_response.json"
+
+ # Create directories
+ mkdir -p "$CONFIG_DIR" "$STATE_DIR"
+
+ # Install sudoers configuration if not exists
+ if [ ! -f "$SUDOERS_FILE" ]; then
+ echo -e "${YELLOW}Installing sudoers configuration...${NC}"
+ cat > "$SUDOERS_FILE" << 'SUDOERS_EOF'
# RedFlag Agent minimal sudo permissions
-# This file grants the redflag-agent user limited sudo access for package management
# Generated automatically during RedFlag agent installation
# APT package management commands (Debian/Ubuntu)
@@ -288,38 +711,37 @@ redflag-agent ALL=(root) NOPASSWD: /usr/bin/dnf install --assumeno --downloadonl
redflag-agent ALL=(root) NOPASSWD: /usr/bin/docker pull *
redflag-agent ALL=(root) NOPASSWD: /usr/bin/docker image inspect *
redflag-agent ALL=(root) NOPASSWD: /usr/bin/docker manifest inspect *
+
+# Directory operations for RedFlag
+redflag-agent ALL=(root) NOPASSWD: /bin/mkdir -p /etc/redflag
+redflag-agent ALL=(root) NOPASSWD: /bin/mkdir -p /var/lib/redflag
+redflag-agent ALL=(root) NOPASSWD: /bin/chown redflag-agent:redflag-agent /etc/redflag
+redflag-agent ALL=(root) NOPASSWD: /bin/chown redflag-agent:redflag-agent /var/lib/redflag
+redflag-agent ALL=(root) NOPASSWD: /bin/chmod 755 /etc/redflag
+redflag-agent ALL=(root) NOPASSWD: /bin/chmod 755 /var/lib/redflag
+
+# Migration operations (for existing installations)
+redflag-agent ALL=(root) NOPASSWD: /bin/mv /etc/aggregator /etc/redflag.backup.*
+redflag-agent ALL=(root) NOPASSWD: /bin/mv /var/lib/aggregator/* /var/lib/redflag/
+redflag-agent ALL=(root) NOPASSWD: /bin/rmdir /var/lib/aggregator 2>/dev/null || true
+redflag-agent ALL=(root) NOPASSWD: /bin/rmdir /etc/aggregator 2>/dev/null || true
SUDOERS_EOF
-chmod 440 "$SUDOERS_FILE"
+ chmod 440 "$SUDOERS_FILE"
-# Validate sudoers file
-if visudo -c -f "$SUDOERS_FILE" &>/dev/null; then
- echo "โ Sudoers configuration installed and validated"
-else
- echo "ERROR: Sudoers configuration is invalid"
- rm -f "$SUDOERS_FILE"
- exit 1
-fi
+ # Validate sudoers file
+ if visudo -c -f "$SUDOERS_FILE" &>/dev/null; then
+ echo -e "${GREEN}โ Sudoers configuration installed${NC}"
+ else
+ echo -e "${RED}โ Invalid sudoers configuration${NC}"
+ rm -f "$SUDOERS_FILE"
+ exit 1
+ fi
+ fi
-# Step 4: Create configuration directory
-echo ""
-echo "Step 4: Creating configuration directory..."
-mkdir -p "$CONFIG_DIR"
-chown "$AGENT_USER:$AGENT_USER" "$CONFIG_DIR"
-chmod 755 "$CONFIG_DIR"
-echo "โ Configuration directory created"
-
-# Set SELinux context for config directory if SELinux is enabled
-if command -v getenforce >/dev/null 2>&1 && [ "$(getenforce)" != "Disabled" ]; then
- echo "Setting SELinux context for config directory..."
- restorecon -Rv "$CONFIG_DIR" 2>/dev/null || true
- echo "โ SELinux context set for config directory"
-fi
-
-# Step 5: Install systemd service
-echo ""
-echo "Step 5: Installing systemd service..."
-cat > "$SERVICE_FILE" < "$SERVICE_FILE" << EOF
[Unit]
Description=RedFlag Update Agent
After=network.target
@@ -335,10 +757,9 @@ Restart=always
RestartSec=30
# Security hardening
-# NoNewPrivileges=true - DISABLED: Prevents sudo from working, which agent needs for package management
ProtectSystem=strict
ProtectHome=true
-ReadWritePaths=$AGENT_HOME /var/log $CONFIG_DIR
+ReadWritePaths=$AGENT_HOME /var/log $CONFIG_DIR $STATE_DIR
PrivateTmp=true
# Logging
@@ -348,167 +769,88 @@ SyslogIdentifier=redflag-agent
[Install]
WantedBy=multi-user.target
-SERVICE_EOF
-
-chmod 644 "$SERVICE_FILE"
-echo "โ Systemd service installed"
-
-# Step 6: Register agent with server
-echo ""
-echo "Step 6: Agent registration"
-echo "=========================================="
-echo ""
-
-# Check if token was provided as parameter (for one-liner support)
-if [ -n "$1" ]; then
- REGISTRATION_TOKEN="$1"
- echo "Using provided registration token"
-else
- # Check if stdin is a terminal (not being piped)
- if [ -t 0 ]; then
- echo "Registration token required to enroll this agent with the server."
- echo ""
- echo "To get a token:"
- echo " 1. Visit: ${REDFLAG_SERVER}/settings/tokens"
- echo " 2. Copy the active token from the list"
- echo ""
- echo "Enter registration token (or press Enter to skip):"
- read -p "> " REGISTRATION_TOKEN
- else
- echo ""
- echo "IMPORTANT: Registration token required!"
- echo ""
- echo "Since you're running this via pipe, you need to:"
- echo ""
- echo "Option 1 - One-liner with token:"
- echo " curl -sfL ${REDFLAG_SERVER}/api/v1/install/linux | sudo bash -s -- YOUR_TOKEN"
- echo ""
- echo "Option 2 - Download and run interactively:"
- echo " curl -sfL ${REDFLAG_SERVER}/api/v1/install/linux -o install.sh"
- echo " chmod +x install.sh"
- echo " sudo ./install.sh"
- echo ""
- echo "Skipping registration for now."
- echo "Please register manually after installation."
- fi
-fi
-
-# Check if agent is already registered
-if [ -f "$CONFIG_DIR/config.json" ]; then
- echo ""
- echo "[INFO] Agent already registered - configuration file exists"
- echo "[INFO] Skipping registration to preserve agent history"
- echo "[INFO] If you need to re-register, delete: $CONFIG_DIR/config.json"
- echo ""
-elif [ -n "$REGISTRATION_TOKEN" ]; then
- echo ""
- echo "Registering agent..."
-
- # Create config file and register
- cat > "$CONFIG_DIR/config.json" <nul 2>&1
-if %errorLevel% neq 0 (
+if %%errorLevel%% neq 0 (
echo ERROR: This script must be run as Administrator
echo Right-click and select "Run as administrator"
pause
@@ -534,39 +876,39 @@ if %errorLevel% neq 0 (
)
REM Detect architecture
-if "%PROCESSOR_ARCHITECTURE%"=="AMD64" (
+if "%%PROCESSOR_ARCHITECTURE%%"=="AMD64" (
set DOWNLOAD_ARCH=amd64
-) else if "%PROCESSOR_ARCHITECTURE%"=="ARM64" (
+) else if "%%PROCESSOR_ARCHITECTURE%%"=="ARM64" (
set DOWNLOAD_ARCH=arm64
) else (
- echo ERROR: Unsupported architecture: %PROCESSOR_ARCHITECTURE%
+ echo ERROR: Unsupported architecture: %%PROCESSOR_ARCHITECTURE%%
echo Supported: AMD64, ARM64
pause
exit /b 1
)
-echo Detected architecture: %PROCESSOR_ARCHITECTURE% (using windows-%DOWNLOAD_ARCH%)
+echo Detected architecture: %%PROCESSOR_ARCHITECTURE%% (using windows-%%DOWNLOAD_ARCH%%)
echo.
REM Create installation directory
echo Creating installation directory...
-if not exist "%AGENT_DIR%" mkdir "%AGENT_DIR%"
+if not exist "%%AGENT_DIR%%" mkdir "%%AGENT_DIR%%"
echo [OK] Installation directory created
REM Create config directory
-if not exist "%CONFIG_DIR%" mkdir "%CONFIG_DIR%"
+if not exist "%%CONFIG_DIR%%" mkdir "%%CONFIG_DIR%%"
echo [OK] Configuration directory created
REM Grant full permissions to SYSTEM and Administrators on config directory
echo Setting permissions on configuration directory...
-icacls "%CONFIG_DIR%" /grant "SYSTEM:(OI)(CI)F"
-icacls "%CONFIG_DIR%" /grant "Administrators:(OI)(CI)F"
+icacls "%%CONFIG_DIR%%" /grant "SYSTEM:(OI)(CI)F"
+icacls "%%CONFIGDIR%%" /grant "Administrators:(OI)(CI)F"
echo [OK] Permissions set
echo.
REM Stop existing service if running (to allow binary update)
sc query RedFlagAgent >nul 2>&1
-if %errorLevel% equ 0 (
+if %%errorLevel%% equ 0 (
echo Existing service detected - stopping to allow update...
sc stop RedFlagAgent >nul 2>&1
timeout /t 3 /nobreak >nul
@@ -575,11 +917,11 @@ if %errorLevel% equ 0 (
REM Download agent binary
echo Downloading agent binary...
-echo From: %REDFLAG_SERVER%/api/v1/downloads/windows-%DOWNLOAD_ARCH%
-curl -sfL "%REDFLAG_SERVER%/api/v1/downloads/windows-%DOWNLOAD_ARCH%" -o "%AGENT_BINARY%"
-if %errorLevel% neq 0 (
+echo From: %%REDFLAG_SERVER%%/api/v1/downloads/windows-%%DOWNLOAD_ARCH%%
+curl -sfL "%%REDFLAG_SERVER%%/api/v1/downloads/windows-%%DOWNLOAD_ARCH%%" -o "%%AGENT_BINARY%%"
+if %%errorLevel%% neq 0 (
echo ERROR: Failed to download agent binary
- echo Please ensure %REDFLAG_SERVER% is accessible
+ echo Please ensure %%REDFLAG_SERVER%% is accessible
pause
exit /b 1
)
@@ -591,14 +933,14 @@ echo === Agent Registration ===
echo.
REM Check if token was provided as command-line argument
-if not "%1"=="" (
- set TOKEN=%1
+if not "%%1"=="" (
+ set TOKEN=%%1
echo Using provided registration token
) else (
echo IMPORTANT: You need a registration token to enroll this agent.
echo.
echo To get a token:
- echo 1. Visit: %REDFLAG_SERVER%/settings/tokens
+ echo 1. Visit: %%REDFLAG_SERVER%%/settings/tokens
echo 2. Create a new registration token
echo 3. Copy the token
echo.
@@ -606,36 +948,36 @@ if not "%1"=="" (
)
REM Check if agent is already registered
-if exist "%CONFIG_DIR%\config.json" (
+if exist "%%CONFIG_DIR%%\config.json" (
echo.
echo [INFO] Agent already registered - configuration file exists
echo [INFO] Skipping registration to preserve agent history
- echo [INFO] If you need to re-register, delete: %CONFIG_DIR%\config.json
+ echo [INFO] If you need to re-register, delete: %%CONFIG_DIR%%\config.json
echo.
-) else if not "%TOKEN%"=="" (
+) else if not "%%TOKEN%%"=="" (
echo.
echo === Registering Agent ===
echo.
REM Attempt registration
- "%AGENT_BINARY%" --server "%REDFLAG_SERVER%" --token "%TOKEN%" --register
+ "%%AGENT_BINARY%%" --server "%%REDFLAG_SERVER%%" --token "%%TOKEN%%" --register
REM Check exit code
- if %errorLevel% equ 0 (
+ if %%errorLevel%% equ 0 (
echo [OK] Agent registered successfully
- echo [OK] Configuration saved to: %CONFIG_DIR%\config.json
+ echo [OK] Configuration saved to: %%CONFIG_DIR%%\config.json
echo.
) else (
echo.
echo [ERROR] Registration failed
echo.
echo Please check:
- echo 1. Server is accessible: %REDFLAG_SERVER%
+ echo 1. Server is accessible: %%REDFLAG_SERVER%%
echo 2. Registration token is valid and not expired
echo 3. Token has available seats remaining
echo.
echo To try again:
- echo "%AGENT_BINARY%" --server "%REDFLAG_SERVER%" --token "%TOKEN%" --register
+ echo "%%AGENT_BINARY%%" --server "%%REDFLAG_SERVER%%" --token "%%TOKEN%%" --register
echo.
pause
exit /b 1
@@ -645,22 +987,23 @@ if exist "%CONFIG_DIR%\config.json" (
echo [INFO] No registration token provided - skipping registration
echo.
echo To register later:
- echo "%AGENT_BINARY%" --server "%REDFLAG_SERVER%" --token YOUR_TOKEN --register
+ echo "%%AGENT_BINARY%%" --server "%%REDFLAG_SERVER%%" --token YOUR_TOKEN --register
)
REM Check if service already exists
echo.
echo === Configuring Windows Service ===
echo.
+
sc query RedFlagAgent >nul 2>&1
-if %errorLevel% equ 0 (
+if %%errorLevel%% equ 0 (
echo [INFO] RedFlag Agent service already installed
echo [INFO] Service will be restarted with updated binary
echo.
) else (
echo Installing RedFlag Agent service...
- "%AGENT_BINARY%" -install-service
- if %errorLevel% equ 0 (
+ "%%AGENT_BINARY%%" -install-service
+ if %%errorLevel%% equ 0 (
echo [OK] Service installed successfully
echo.
@@ -675,53 +1018,54 @@ if %errorLevel% equ 0 (
)
REM Start the service if agent is registered
-if exist "%CONFIG_DIR%\config.json" (
+if exist "%%CONFIG_DIR%%\config.json" (
echo Starting RedFlag Agent service...
- "%AGENT_BINARY%" -start-service
- if %errorLevel% equ 0 (
+ "%%AGENT_BINARY%%" -start-service
+ if %%errorLevel%% equ 0 (
echo [OK] RedFlag Agent service started
echo.
echo Agent is now running as a Windows service in the background.
echo You can verify it is working by checking the agent status in the web UI.
) else (
echo [WARNING] Failed to start service. You can start it manually:
- echo "%AGENT_BINARY%" -start-service
+ echo "%%AGENT_BINARY%%" -start-service
echo Or use Windows Services: services.msc
)
) else (
echo [WARNING] Service not started (agent not registered)
echo To register and start the service:
- echo 1. Register: "%AGENT_BINARY%" --server "%REDFLAG_SERVER%" --token YOUR_TOKEN --register
- echo 2. Start: "%AGENT_BINARY%" -start-service
+ echo 1. Register: "%%AGENT_BINARY%%" --server "%%REDFLAGSERVER%%" --token YOUR_TOKEN --register
+ echo 2. Start: "%%AGENT_BINARY%%" -start-service
)
echo.
echo === Installation Complete ===
echo.
echo The RedFlag agent has been installed as a Windows service.
-echo Configuration file: %CONFIG_DIR%\config.json
-echo Agent binary: %AGENT_BINARY%
+echo Configuration file: %%CONFIG_DIR%%\config.json
+echo Agent binary: %%AGENT_BINARY%%
echo.
+
echo Managing the RedFlag Agent service:
-echo Check status: "%AGENT_BINARY%" -service-status
-echo Start manually: "%AGENT_BINARY%" -start-service
-echo Stop service: "%AGENT_BINARY%" -stop-service
-echo Remove service: "%AGENT_BINARY%" -remove-service
+echo Check status: "%%AGENT_BINARY%%" -service-status
+echo Start manually: "%%AGENT_BINARY%%" -start-service
+echo Stop service: "%%AGENT_BINARY%%" -stop-service
+echo Remove service: "%%AGENT_BINARY%%" -remove-service
echo.
+
echo Alternative management with Windows Services:
echo Open services.msc and look for "RedFlag Update Agent"
echo.
+
echo To run the agent directly (for debugging):
-echo "%AGENT_BINARY%"
+echo "%%AGENT_BINARY%%"
echo.
+
echo To verify the agent is working:
echo 1. Check the web UI for the agent status
echo 2. Look for recent check-ins from this machine
echo.
-pause
-`
- default:
- return "# Unsupported platform"
- }
+pause
+`, baseURL)
}
\ No newline at end of file
diff --git a/aggregator-server/internal/api/handlers/security.go b/aggregator-server/internal/api/handlers/security.go
index 9087cc3..fc4cb02 100644
--- a/aggregator-server/internal/api/handlers/security.go
+++ b/aggregator-server/internal/api/handlers/security.go
@@ -180,6 +180,8 @@ func (h *SecurityHandler) MachineBindingStatus(c *gin.Context) {
// Get total agents for comparison
if totalAgents, err := h.agentQueries.GetTotalAgentCount(); err == nil {
+ response["checks"].(map[string]interface{})["total_agents"] = totalAgents
+
// Calculate version compliance (agents meeting minimum version requirement)
if compliantAgents, err := h.agentQueries.GetAgentCountByVersion("0.1.22"); err == nil {
response["checks"].(map[string]interface{})["version_compliance"] = compliantAgents
diff --git a/aggregator-server/internal/api/handlers/setup.go b/aggregator-server/internal/api/handlers/setup.go
index 62d09f6..62f8214 100644
--- a/aggregator-server/internal/api/handlers/setup.go
+++ b/aggregator-server/internal/api/handlers/setup.go
@@ -425,6 +425,13 @@ func (h *SetupHandler) GenerateSigningKeys(c *gin.Context) {
c.Header("Pragma", "no-cache")
c.Header("Expires", "0")
+ // Load configuration to check for existing key
+ cfg, err := config.Load() // This will load from .env file
+ if err == nil && cfg.SigningPrivateKey != "" {
+ c.JSON(http.StatusConflict, gin.H{"error": "A signing key is already configured for this server."})
+ return
+ }
+
// Generate Ed25519 keypair
publicKey, privateKey, err := ed25519.GenerateKey(rand.Reader)
if err != nil {
diff --git a/aggregator-server/internal/api/middleware/machine_binding.go b/aggregator-server/internal/api/middleware/machine_binding.go
index 150bc21..3e35647 100644
--- a/aggregator-server/internal/api/middleware/machine_binding.go
+++ b/aggregator-server/internal/api/middleware/machine_binding.go
@@ -1,8 +1,16 @@
package middleware
import (
+ "crypto/ed25519"
+ "encoding/base64"
+ "encoding/hex"
+ "encoding/json"
+ "fmt"
"log"
"net/http"
+ "strconv"
+ "strings"
+ "time"
"github.com/Fimeg/RedFlag/aggregator-server/internal/database/queries"
"github.com/Fimeg/RedFlag/aggregator-server/internal/utils"
@@ -38,6 +46,48 @@ func MachineBindingMiddleware(agentQueries *queries.AgentQueries, minAgentVersio
return
}
+ // Check if agent is reporting an update completion
+ reportedVersion := c.GetHeader("X-Agent-Version")
+ updateNonce := c.GetHeader("X-Update-Nonce")
+
+ if agent.IsUpdating && updateNonce != "" {
+ // Validate the nonce first (proves server authorized this update)
+ if agent.PublicKeyFingerprint == nil {
+ log.Printf("[SECURITY] Agent %s has no public key fingerprint for nonce validation", agentID)
+ c.JSON(http.StatusForbidden, gin.H{"error": "server public key not configured"})
+ c.Abort()
+ return
+ }
+ if err := validateUpdateNonceMiddleware(updateNonce, *agent.PublicKeyFingerprint); err != nil {
+ log.Printf("[SECURITY] Invalid update nonce for agent %s: %v", agentID, err)
+ c.JSON(http.StatusForbidden, gin.H{"error": "invalid update nonce"})
+ c.Abort()
+ return
+ }
+
+ // Check for downgrade attempt (security boundary)
+ if !isVersionUpgrade(reportedVersion, agent.CurrentVersion) {
+ log.Printf("[SECURITY] Downgrade attempt detected: agent %s %s โ %s",
+ agentID, agent.CurrentVersion, reportedVersion)
+ c.JSON(http.StatusForbidden, gin.H{"error": "downgrade not allowed"})
+ c.Abort()
+ return
+ }
+
+ // Valid upgrade - complete it in database
+ go func() {
+ if err := agentQueries.CompleteAgentUpdate(agentID.String(), reportedVersion); err != nil {
+ log.Printf("[ERROR] Failed to complete agent update: %v", err)
+ } else {
+ log.Printf("[system] Agent %s updated: %s โ %s", agentID, agent.CurrentVersion, reportedVersion)
+ }
+ }()
+
+ // Allow this request through
+ c.Next()
+ return
+ }
+
// Check minimum version (hard cutoff for legacy de-support)
if agent.CurrentVersion != "" && minAgentVersion != "" {
if !utils.IsNewerOrEqualVersion(agent.CurrentVersion, minAgentVersion) {
@@ -97,3 +147,82 @@ func MachineBindingMiddleware(agentQueries *queries.AgentQueries, minAgentVersio
c.Next()
}
}
+
+func validateUpdateNonceMiddleware(nonceB64, serverPublicKey string) error {
+ // Decode base64 nonce
+ data, err := base64.StdEncoding.DecodeString(nonceB64)
+ if err != nil {
+ return fmt.Errorf("invalid base64: %w", err)
+ }
+
+ // Parse JSON
+ var nonce struct {
+ AgentID string `json:"agent_id"`
+ TargetVersion string `json:"target_version"`
+ Timestamp int64 `json:"timestamp"`
+ Signature string `json:"signature"`
+ }
+ if err := json.Unmarshal(data, &nonce); err != nil {
+ return fmt.Errorf("invalid format: %w", err)
+ }
+
+ // Check freshness
+ if time.Now().Unix()-nonce.Timestamp > 600 { // 10 minutes
+ return fmt.Errorf("nonce expired (age: %d seconds)", time.Now().Unix()-nonce.Timestamp)
+ }
+
+ // Verify signature
+ signature, err := base64.StdEncoding.DecodeString(nonce.Signature)
+ if err != nil {
+ return fmt.Errorf("invalid signature encoding: %w", err)
+ }
+
+ // Parse server's public key
+ pubKeyBytes, err := hex.DecodeString(serverPublicKey)
+ if err != nil {
+ return fmt.Errorf("invalid server public key: %w", err)
+ }
+
+ // Remove signature for verification
+ originalSig := nonce.Signature
+ nonce.Signature = ""
+ verifyData, err := json.Marshal(nonce)
+ if err != nil {
+ return fmt.Errorf("marshal verify data: %w", err)
+ }
+
+ if !ed25519.Verify(ed25519.PublicKey(pubKeyBytes), verifyData, signature) {
+ return fmt.Errorf("signature verification failed")
+ }
+
+ // Restore signature (not needed but good practice)
+ nonce.Signature = originalSig
+ return nil
+}
+
+func isVersionUpgrade(new, current string) bool {
+ // Parse semantic versions
+ newParts := strings.Split(new, ".")
+ curParts := strings.Split(current, ".")
+
+ // Convert to integers for comparison
+ newMajor, _ := strconv.Atoi(newParts[0])
+ newMinor, _ := strconv.Atoi(newParts[1])
+ newPatch, _ := strconv.Atoi(newParts[2])
+
+ curMajor, _ := strconv.Atoi(curParts[0])
+ curMinor, _ := strconv.Atoi(curParts[1])
+ curPatch, _ := strconv.Atoi(curParts[2])
+
+ // Check if new > current (not equal, not less)
+ if newMajor > curMajor {
+ return true
+ }
+ if newMajor == curMajor && newMinor > curMinor {
+ return true
+ }
+ if newMajor == curMajor && newMinor == curMinor && newPatch > curPatch {
+ return true
+ }
+ return false // Equal or downgrade
+}
diff --git a/aggregator-server/internal/database/queries/agents.go b/aggregator-server/internal/database/queries/agents.go
index b136883..39a5019 100644
--- a/aggregator-server/internal/database/queries/agents.go
+++ b/aggregator-server/internal/database/queries/agents.go
@@ -1,6 +1,7 @@
package queries
import (
+ "context"
"database/sql"
"fmt"
"time"
@@ -324,3 +325,46 @@ func (q *AgentQueries) UpdateAgentUpdatingStatus(id uuid.UUID, isUpdating bool,
_, err := q.db.Exec(query, isUpdating, versionPtr, time.Now(), id)
return err
}
+
+// CompleteAgentUpdate marks an agent update as successful and updates version
+func (q *AgentQueries) CompleteAgentUpdate(agentID string, newVersion string) error {
+ query := `
+ UPDATE agents
+ SET
+ current_version = $2,
+ is_updating = false,
+ updated_at = CURRENT_TIMESTAMP
+ WHERE id = $1
+ `
+
+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+
+ result, err := q.db.ExecContext(ctx, query, agentID, newVersion)
+ if err != nil {
+ return fmt.Errorf("failed to complete agent update: %w", err)
+ }
+
+ rows, err := result.RowsAffected()
+ if err != nil || rows == 0 {
+ return fmt.Errorf("agent not found or version not updated")
+ }
+
+ return nil
+}
+
+// SetAgentUpdating marks an agent as updating with nonce
+func (q *AgentQueries) SetAgentUpdating(agentID string, isUpdating bool, targetVersion string) error {
+ query := `
+ UPDATE agents
+ SET is_updating = $2, updating_to_version = $3, updated_at = CURRENT_TIMESTAMP
+ WHERE id = $1
+ `
+
+ _, err := q.db.Exec(query, agentID, isUpdating, targetVersion)
+ if err != nil {
+ return fmt.Errorf("failed to set agent updating state: %w", err)
+ }
+
+ return nil
+}
diff --git a/aggregator-server/internal/services/agent_builder.go b/aggregator-server/internal/services/agent_builder.go
new file mode 100644
index 0000000..ef62ef0
--- /dev/null
+++ b/aggregator-server/internal/services/agent_builder.go
@@ -0,0 +1,380 @@
+package services
+
+import (
+ "encoding/json"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+ "text/template"
+ "time"
+)
+
+// AgentBuilder handles generating embedded agent configurations
+type AgentBuilder struct {
+ buildContext string
+}
+
+// NewAgentBuilder creates a new agent builder
+func NewAgentBuilder() *AgentBuilder {
+ return &AgentBuilder{}
+}
+
+// BuildAgentWithConfig generates agent configuration and prepares signed binary
+func (ab *AgentBuilder) BuildAgentWithConfig(config *AgentConfiguration) (*BuildResult, error) {
+ // Create temporary build directory
+ buildDir, err := os.MkdirTemp("", "agent-build-")
+ if err != nil {
+ return nil, fmt.Errorf("failed to create build directory: %w", err)
+ }
+
+ // Generate config.json (not embedded in binary)
+ configJSONPath := filepath.Join(buildDir, "config.json")
+ configJSON, err := ab.generateConfigJSON(config)
+ if err != nil {
+ os.RemoveAll(buildDir)
+ return nil, fmt.Errorf("failed to generate config JSON: %w", err)
+ }
+
+ // Write config.json to file
+ if err := os.WriteFile(configJSONPath, []byte(configJSON), 0600); err != nil {
+ os.RemoveAll(buildDir)
+ return nil, fmt.Errorf("failed to write config file: %w", err)
+ }
+
+ // Note: Binary is pre-built and stored in /app/binaries/{platform}/
+ // We don't build or modify the binary here - it's generic for all agents
+ // The signing happens at the platform level, not per-agent
+
+ return &BuildResult{
+ BuildDir: buildDir,
+ AgentID: config.AgentID,
+ ConfigFile: configJSONPath,
+ ConfigJSON: configJSON,
+ Platform: config.Platform,
+ BuildTime: time.Now(),
+ }, nil
+}
+
+// generateConfigJSON converts configuration to JSON format
+func (ab *AgentBuilder) generateConfigJSON(config *AgentConfiguration) (string, error) {
+ // Create complete configuration
+ completeConfig := make(map[string]interface{})
+
+ // Copy public configuration
+ for k, v := range config.PublicConfig {
+ completeConfig[k] = v
+ }
+
+ // Add secrets (they will be protected by file permissions at runtime)
+ for k, v := range config.Secrets {
+ completeConfig[k] = v
+ }
+
+ // CRITICAL: Add both version fields explicitly
+ // These MUST be present or middleware will block the agent
+ completeConfig["version"] = config.ConfigVersion // Config schema version (e.g., "5")
+ completeConfig["agent_version"] = config.AgentVersion // Agent binary version (e.g., "0.1.23.5")
+
+ // Add agent metadata
+ completeConfig["agent_id"] = config.AgentID
+ completeConfig["server_url"] = config.ServerURL
+ completeConfig["organization"] = config.Organization
+ completeConfig["environment"] = config.Environment
+ completeConfig["template"] = config.Template
+ completeConfig["build_time"] = config.BuildTime.Format(time.RFC3339)
+
+ // Convert to JSON
+ jsonBytes, err := json.MarshalIndent(completeConfig, "", " ")
+ if err != nil {
+ return "", fmt.Errorf("failed to marshal config to JSON: %w", err)
+ }
+
+ return string(jsonBytes), nil
+}
+
+// BuildResult contains the results of the build process
+type BuildResult struct {
+ BuildDir string `json:"build_dir"`
+ AgentID string `json:"agent_id"`
+ ConfigFile string `json:"config_file"`
+ ConfigJSON string `json:"config_json"`
+ Platform string `json:"platform"`
+ BuildTime time.Time `json:"build_time"`
+}
+
+// generateEmbeddedConfig generates the embedded configuration Go file
+func (ab *AgentBuilder) generateEmbeddedConfig(filename string, config *AgentConfiguration) error {
+ // Create directory structure
+ if err := os.MkdirAll(filepath.Dir(filename), 0755); err != nil {
+ return err
+ }
+
+ // Convert configuration to JSON for embedding
+ configJSON, err := ab.configToJSON(config)
+ if err != nil {
+ return err
+ }
+
+ // Generate Go source file with embedded configuration
+ tmpl := `// Code generated by dynamic build system. DO NOT EDIT.
+package embedded
+
+import (
+ "encoding/json"
+ "time"
+)
+
+// EmbeddedAgentConfiguration contains the pre-built agent configuration
+var EmbeddedAgentConfiguration = []byte(` + "`" + `{{.ConfigJSON}}` + "`" + `)
+
+// EmbeddedAgentID contains the agent ID
+var EmbeddedAgentID = "{{.AgentID}}"
+
+// EmbeddedServerURL contains the server URL
+var EmbeddedServerURL = "{{.ServerURL}}"
+
+// EmbeddedOrganization contains the organization
+var EmbeddedOrganization = "{{.Organization}}"
+
+// EmbeddedEnvironment contains the environment
+var EmbeddedEnvironment = "{{.Environment}}"
+
+// EmbeddedTemplate contains the template type
+var EmbeddedTemplate = "{{.Template}}"
+
+// EmbeddedBuildTime contains the build time
+var EmbeddedBuildTime, _ = time.Parse(time.RFC3339, "{{.BuildTime}}")
+
+// GetEmbeddedConfig returns the embedded configuration as a map
+func GetEmbeddedConfig() (map[string]interface{}, error) {
+ var config map[string]interface{}
+ err := json.Unmarshal(EmbeddedAgentConfiguration, &config)
+ return config, err
+}
+
+// SecretsMapping maps configuration fields to Docker secrets
+var SecretsMapping = map[string]string{
+ {{range $key, $value := .Secrets}}"{{$key}}": "{{$value}}",
+ {{end}}
+}
+`
+
+ // Execute template
+ t, err := template.New("embedded").Parse(tmpl)
+ if err != nil {
+ return fmt.Errorf("failed to parse template: %w", err)
+ }
+
+ file, err := os.Create(filename)
+ if err != nil {
+ return fmt.Errorf("failed to create file: %w", err)
+ }
+ defer file.Close()
+
+ data := struct {
+ ConfigJSON string
+ AgentID string
+ ServerURL string
+ Organization string
+ Environment string
+ Template string
+ BuildTime string
+ Secrets map[string]string
+ }{
+ ConfigJSON: configJSON,
+ AgentID: config.AgentID,
+ ServerURL: config.ServerURL,
+ Organization: config.Organization,
+ Environment: config.Environment,
+ Template: config.Template,
+ BuildTime: config.BuildTime.Format(time.RFC3339),
+ Secrets: config.Secrets,
+ }
+
+ if err := t.Execute(file, data); err != nil {
+ return fmt.Errorf("failed to execute template: %w", err)
+ }
+
+ return nil
+}
+
+// generateDockerCompose generates a docker-compose.yml file
+func (ab *AgentBuilder) generateDockerCompose(filename string, config *AgentConfiguration) error {
+ tmpl := `# Generated dynamically based on configuration
+version: '3.8'
+
+services:
+ redflag-agent:
+ image: {{.ImageTag}}
+ container_name: redflag-agent-{{.AgentID}}
+ restart: unless-stopped
+ secrets:
+ {{range $key := .SecretsKeys}}- {{$key}}
+ {{end}}
+ volumes:
+ - /var/lib/redflag:/var/lib/redflag
+ - /var/run/docker.sock:/var/run/docker.sock
+ environment:
+ - REDFLAG_AGENT_ID={{.AgentID}}
+ - REDFLAG_ENVIRONMENT={{.Environment}}
+ - REDFLAG_SERVER_URL={{.ServerURL}}
+ - REDFLAG_ORGANIZATION={{.Organization}}
+ networks:
+ - redflag
+ logging:
+ driver: "json-file"
+ options:
+ max-size: "10m"
+ max-file: "3"
+
+secrets:
+ {{range $key, $value := .Secrets}}{{$key}}:
+ external: true
+ {{end}}
+
+networks:
+ redflag:
+ external: true
+`
+
+ t, err := template.New("compose").Parse(tmpl)
+ if err != nil {
+ return err
+ }
+
+ file, err := os.Create(filename)
+ if err != nil {
+ return err
+ }
+ defer file.Close()
+
+ // Extract secret keys for template
+ secretsKeys := make([]string, 0, len(config.Secrets))
+ for key := range config.Secrets {
+ secretsKeys = append(secretsKeys, key)
+ }
+
+ data := struct {
+ ImageTag string
+ AgentID string
+ Environment string
+ ServerURL string
+ Organization string
+ Secrets map[string]string
+ SecretsKeys []string
+ }{
+ ImageTag: fmt.Sprintf("redflag-agent:%s", config.AgentID[:8]),
+ AgentID: config.AgentID,
+ Environment: config.Environment,
+ ServerURL: config.ServerURL,
+ Organization: config.Organization,
+ Secrets: config.Secrets,
+ SecretsKeys: secretsKeys,
+ }
+
+ return t.Execute(file, data)
+}
+
+// generateDockerfile generates a Dockerfile for building the agent
+func (ab *AgentBuilder) generateDockerfile(filename string, config *AgentConfiguration) error {
+ tmpl := `# Dockerfile for RedFlag Agent with embedded configuration
+FROM golang:1.21-alpine AS builder
+
+# Install ca-certificates for SSL/TLS
+RUN apk add --no-cache ca-certificates git
+
+WORKDIR /app
+
+# Copy go mod files (these should be in the same directory as the Dockerfile)
+COPY go.mod go.sum ./
+RUN go mod download
+
+# Copy source code
+COPY . .
+
+# Copy generated embedded configuration
+COPY pkg/embedded/config.go ./pkg/embedded/config.go
+
+# Build the agent with embedded configuration
+RUN CGO_ENABLED=0 GOOS=linux go build \
+ -ldflags="-w -s -X main.version=dynamic-build-{{.AgentID}}" \
+ -o redflag-agent \
+ ./cmd/agent
+
+# Final stage
+FROM scratch
+
+# Copy ca-certificates for SSL/TLS
+COPY --from=builder /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
+
+# Copy the agent binary
+COPY --from=builder /app/redflag-agent /redflag-agent
+
+# Set environment variables (these can be overridden by docker-compose)
+ENV REDFLAG_AGENT_ID="{{.AgentID}}"
+ENV REDFLAG_ENVIRONMENT="{{.Environment}}"
+ENV REDFLAG_SERVER_URL="{{.ServerURL}}"
+ENV REDFLAG_ORGANIZATION="{{.Organization}}"
+
+# Health check
+HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
+ CMD ["/redflag-agent", "--health-check"]
+
+# Run the agent
+ENTRYPOINT ["/redflag-agent"]
+`
+
+ t, err := template.New("dockerfile").Parse(tmpl)
+ if err != nil {
+ return err
+ }
+
+ file, err := os.Create(filename)
+ if err != nil {
+ return err
+ }
+ defer file.Close()
+
+ data := struct {
+ AgentID string
+ Environment string
+ ServerURL string
+ Organization string
+ }{
+ AgentID: config.AgentID,
+ Environment: config.Environment,
+ ServerURL: config.ServerURL,
+ Organization: config.Organization,
+ }
+
+ return t.Execute(file, data)
+}
+
+// configToJSON converts configuration to JSON string
+func (ab *AgentBuilder) configToJSON(config *AgentConfiguration) (string, error) {
+ // Create complete configuration with embedded values
+ completeConfig := make(map[string]interface{})
+
+ // Copy public configuration
+ for k, v := range config.PublicConfig {
+ completeConfig[k] = v
+ }
+
+ // Add secrets values (they will be overridden by Docker secrets at runtime)
+ for k, v := range config.Secrets {
+ completeConfig[k] = v
+ }
+
+ // Convert to JSON with proper escaping
+ jsonBytes, err := json.MarshalIndent(completeConfig, "", " ")
+ if err != nil {
+ return "", fmt.Errorf("failed to marshal config to JSON: %w", err)
+ }
+
+ // Escape backticks for Go string literal
+ jsonStr := string(jsonBytes)
+ jsonStr = strings.ReplaceAll(jsonStr, "`", "` + \"`\" + `")
+
+ return jsonStr, nil
+}
\ No newline at end of file
diff --git a/aggregator-server/internal/services/build_types.go b/aggregator-server/internal/services/build_types.go
new file mode 100644
index 0000000..70a6020
--- /dev/null
+++ b/aggregator-server/internal/services/build_types.go
@@ -0,0 +1,318 @@
+package services
+
+import (
+ "crypto/sha256"
+ "encoding/hex"
+ "encoding/json"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+ "time"
+)
+
+// NewBuildRequest represents a request for a new agent build
+type NewBuildRequest struct {
+ ServerURL string `json:"server_url" binding:"required"`
+ Environment string `json:"environment" binding:"required"`
+ AgentType string `json:"agent_type" binding:"required,oneof=linux-server windows-workstation docker-host"`
+ Organization string `json:"organization" binding:"required"`
+ RegistrationToken string `json:"registration_token" binding:"required"`
+ CustomSettings map[string]interface{} `json:"custom_settings,omitempty"`
+ DeploymentID string `json:"deployment_id,omitempty"`
+ AgentID string `json:"agent_id,omitempty"` // For upgrades when preserving ID
+}
+
+// UpgradeBuildRequest represents a request for an agent upgrade
+type UpgradeBuildRequest struct {
+ ServerURL string `json:"server_url" binding:"required"`
+ Environment string `json:"environment"`
+ AgentType string `json:"agent_type"`
+ Organization string `json:"organization"`
+ CustomSettings map[string]interface{} `json:"custom_settings,omitempty"`
+ DeploymentID string `json:"deployment_id,omitempty"`
+ PreserveExisting bool `json:"preserve_existing"`
+ DetectionPath string `json:"detection_path,omitempty"`
+}
+
+// DetectionRequest represents a request to detect existing agent installation
+type DetectionRequest struct {
+ DetectionPath string `json:"detection_path,omitempty"`
+}
+
+// InstallationDetection represents the result of detecting an existing installation
+type InstallationDetection struct {
+ HasExistingAgent bool `json:"has_existing_agent"`
+ AgentID string `json:"agent_id,omitempty"`
+ CurrentVersion string `json:"current_version,omitempty"`
+ ConfigVersion int `json:"config_version,omitempty"`
+ RequiresMigration bool `json:"requires_migration"`
+ Inventory *AgentFileInventory `json:"inventory,omitempty"`
+ MigrationPlan *MigrationDetection `json:"migration_plan,omitempty"`
+ DetectionPath string `json:"detection_path"`
+ DetectionTime string `json:"detection_time"`
+ RecommendedAction string `json:"recommended_action"`
+}
+
+// AgentFileInventory represents all files associated with an agent installation
+type AgentFileInventory struct {
+ ConfigFiles []AgentFile `json:"config_files"`
+ StateFiles []AgentFile `json:"state_files"`
+ BinaryFiles []AgentFile `json:"binary_files"`
+ LogFiles []AgentFile `json:"log_files"`
+ CertificateFiles []AgentFile `json:"certificate_files"`
+ ExistingPaths []string `json:"existing_paths"`
+ MissingPaths []string `json:"missing_paths"`
+}
+
+// AgentFile represents a file associated with the agent
+type AgentFile struct {
+ Path string `json:"path"`
+ Size int64 `json:"size"`
+ ModifiedTime string `json:"modified_time"`
+ Version string `json:"version,omitempty"`
+ Checksum string `json:"checksum"`
+ Required bool `json:"required"`
+ Migrate bool `json:"migrate"`
+ Description string `json:"description"`
+}
+
+// MigrationDetection represents migration detection results (from existing migration system)
+type MigrationDetection struct {
+ CurrentAgentVersion string `json:"current_agent_version"`
+ CurrentConfigVersion int `json:"current_config_version"`
+ RequiresMigration bool `json:"requires_migration"`
+ RequiredMigrations []string `json:"required_migrations"`
+ MissingSecurityFeatures []string `json:"missing_security_features"`
+ Inventory *AgentFileInventory `json:"inventory"`
+ DetectionTime string `json:"detection_time"`
+}
+
+// InstallationDetector handles detection of existing agent installations
+type InstallationDetector struct{}
+
+// NewInstallationDetector creates a new installation detector
+func NewInstallationDetector() *InstallationDetector {
+ return &InstallationDetector{}
+}
+
+// DetectExistingInstallation detects if there's an existing agent installation
+func (id *InstallationDetector) DetectExistingInstallation(agentID string) (*InstallationDetection, error) {
+ result := &InstallationDetection{
+ HasExistingAgent: false,
+ DetectionTime: time.Now().Format(time.RFC3339),
+ RecommendedAction: "new_installation",
+ }
+
+ if agentID != "" {
+ result.HasExistingAgent = true
+ result.AgentID = agentID
+ result.RecommendedAction = "upgrade"
+ }
+
+ return result, nil
+}
+
+// scanDirectory scans a directory for agent-related files
+func (id *InstallationDetector) scanDirectory(dirPath string) ([]AgentFile, error) {
+ var files []AgentFile
+
+ entries, err := os.ReadDir(dirPath)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return files, nil // Directory doesn't exist, return empty
+ }
+ return nil, err
+ }
+
+ for _, entry := range entries {
+ if entry.IsDir() {
+ continue
+ }
+
+ fullPath := filepath.Join(dirPath, entry.Name())
+ info, err := entry.Info()
+ if err != nil {
+ continue
+ }
+
+ // Calculate checksum
+ checksum, err := id.calculateChecksum(fullPath)
+ if err != nil {
+ checksum = ""
+ }
+
+ file := AgentFile{
+ Path: fullPath,
+ Size: info.Size(),
+ ModifiedTime: info.ModTime().Format(time.RFC3339),
+ Checksum: checksum,
+ Required: id.isRequiredFile(entry.Name()),
+ Migrate: id.shouldMigrateFile(entry.Name()),
+ Description: id.getFileDescription(entry.Name()),
+ }
+
+ files = append(files, file)
+ }
+
+ return files, nil
+}
+
+// categorizeFile categorizes a file into the appropriate inventory section
+func (id *InstallationDetector) categorizeFile(file AgentFile, inventory *AgentFileInventory) {
+ filename := filepath.Base(file.Path)
+
+ switch {
+ case filename == "config.json":
+ inventory.ConfigFiles = append(inventory.ConfigFiles, file)
+ case filename == "pending_acks.json" || filename == "public_key.cache" || filename == "last_scan.json" || filename == "metrics.json":
+ inventory.StateFiles = append(inventory.StateFiles, file)
+ case filename == "redflag-agent" || filename == "redflag-agent.exe":
+ inventory.BinaryFiles = append(inventory.BinaryFiles, file)
+ case strings.HasSuffix(filename, ".log"):
+ inventory.LogFiles = append(inventory.LogFiles, file)
+ case strings.HasSuffix(filename, ".crt") || strings.HasSuffix(filename, ".key") || strings.HasSuffix(filename, ".pem"):
+ inventory.CertificateFiles = append(inventory.CertificateFiles, file)
+ }
+}
+
+// extractAgentInfo extracts agent ID, version, and config version from config files
+func (id *InstallationDetector) extractAgentInfo(inventory *AgentFileInventory) (string, string, int, error) {
+ var agentID, version string
+ var configVersion int
+
+ // Look for config.json first
+ for _, configFile := range inventory.ConfigFiles {
+ if strings.Contains(configFile.Path, "config.json") {
+ data, err := os.ReadFile(configFile.Path)
+ if err != nil {
+ continue
+ }
+
+ var config map[string]interface{}
+ if err := json.Unmarshal(data, &config); err != nil {
+ continue
+ }
+
+ // Extract agent ID
+ if id, ok := config["agent_id"].(string); ok {
+ agentID = id
+ }
+
+ // Extract version information
+ if ver, ok := config["agent_version"].(string); ok {
+ version = ver
+ }
+ if ver, ok := config["version"].(float64); ok {
+ configVersion = int(ver)
+ }
+
+ break
+ }
+ }
+
+ // If no agent ID found in config, we don't have a valid installation
+ if agentID == "" {
+ return "", "", 0, fmt.Errorf("no agent ID found in configuration")
+ }
+
+ return agentID, version, configVersion, nil
+}
+
+// determineMigrationRequired determines if migration is needed
+func (id *InstallationDetector) determineMigrationRequired(inventory *AgentFileInventory) bool {
+ // Check for old directory paths
+ for _, configFile := range inventory.ConfigFiles {
+ if strings.Contains(configFile.Path, "/etc/aggregator/") || strings.Contains(configFile.Path, "/var/lib/aggregator/") {
+ return true
+ }
+ }
+
+ for _, stateFile := range inventory.StateFiles {
+ if strings.Contains(stateFile.Path, "/etc/aggregator/") || strings.Contains(stateFile.Path, "/var/lib/aggregator/") {
+ return true
+ }
+ }
+
+ // Check config version (older than v5 needs migration)
+ for _, configFile := range inventory.ConfigFiles {
+ if strings.Contains(configFile.Path, "config.json") {
+ if _, _, configVersion, err := id.extractAgentInfo(inventory); err == nil {
+ if configVersion < 5 {
+ return true
+ }
+ }
+ }
+ }
+
+ return false
+}
+
+// calculateChecksum calculates SHA256 checksum of a file
+func (id *InstallationDetector) calculateChecksum(filePath string) (string, error) {
+ file, err := os.Open(filePath)
+ if err != nil {
+ return "", err
+ }
+ defer file.Close()
+
+ hash := sha256.New()
+ if _, err := io.Copy(hash, file); err != nil {
+ return "", err
+ }
+
+ return hex.EncodeToString(hash.Sum(nil)), nil
+}
+
+// isRequiredFile determines if a file is required for agent operation
+func (id *InstallationDetector) isRequiredFile(filename string) bool {
+ requiredFiles := []string{
+ "config.json",
+ "redflag-agent",
+ "redflag-agent.exe",
+ }
+
+ for _, required := range requiredFiles {
+ if filename == required {
+ return true
+ }
+ }
+ return false
+}
+
+// shouldMigrateFile determines if a file should be migrated
+func (id *InstallationDetector) shouldMigrateFile(filename string) bool {
+ migratableFiles := []string{
+ "config.json",
+ "pending_acks.json",
+ "public_key.cache",
+ "last_scan.json",
+ "metrics.json",
+ }
+
+ for _, migratable := range migratableFiles {
+ if filename == migratable {
+ return true
+ }
+ }
+ return false
+}
+
+// getFileDescription returns a human-readable description of a file
+func (id *InstallationDetector) getFileDescription(filename string) string {
+ descriptions := map[string]string{
+ "config.json": "Agent configuration file",
+ "pending_acks.json": "Pending command acknowledgments",
+ "public_key.cache": "Server public key cache",
+ "last_scan.json": "Last scan results",
+ "metrics.json": "Agent metrics data",
+ "redflag-agent": "Agent binary",
+ "redflag-agent.exe": "Windows agent binary",
+ }
+
+ if desc, ok := descriptions[filename]; ok {
+ return desc
+ }
+ return "Agent file"
+}
\ No newline at end of file
diff --git a/aggregator-server/internal/services/config_builder.go b/aggregator-server/internal/services/config_builder.go
new file mode 100644
index 0000000..8388b58
--- /dev/null
+++ b/aggregator-server/internal/services/config_builder.go
@@ -0,0 +1,727 @@
+package services
+
+import (
+ "crypto/rand"
+ "encoding/hex"
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "time"
+
+ "github.com/google/uuid"
+)
+
+// AgentTemplate defines a template for different agent types
+type AgentTemplate struct {
+ Name string `json:"name"`
+ Description string `json:"description"`
+ BaseConfig map[string]interface{} `json:"base_config"`
+ Secrets []string `json:"required_secrets"`
+ Validation ValidationRules `json:"validation"`
+}
+
+// ValidationRules defines validation rules for configuration
+type ValidationRules struct {
+ RequiredFields []string `json:"required_fields"`
+ AllowedValues map[string][]string `json:"allowed_values"`
+ Patterns map[string]string `json:"patterns"`
+ Constraints map[string]interface{} `json:"constraints"`
+}
+
+// PublicKeyResponse represents the server's public key response
+type PublicKeyResponse struct {
+ PublicKey string `json:"public_key"`
+ Fingerprint string `json:"fingerprint"`
+ Algorithm string `json:"algorithm"`
+ KeySize int `json:"key_size"`
+}
+
+// ConfigBuilder handles dynamic agent configuration generation
+type ConfigBuilder struct {
+ serverURL string
+ templates map[string]AgentTemplate
+ httpClient *http.Client
+ publicKeyCache map[string]string
+}
+
+// NewConfigBuilder creates a new configuration builder
+func NewConfigBuilder(serverURL string) *ConfigBuilder {
+ return &ConfigBuilder{
+ serverURL: serverURL,
+ templates: getAgentTemplates(),
+ httpClient: &http.Client{
+ Timeout: 30 * time.Second,
+ },
+ publicKeyCache: make(map[string]string),
+ }
+}
+
+// AgentSetupRequest represents a request to set up a new agent
+type AgentSetupRequest struct {
+ ServerURL string `json:"server_url" binding:"required"`
+ Environment string `json:"environment" binding:"required"`
+ AgentType string `json:"agent_type" binding:"required,oneof=linux-server windows-workstation docker-host"`
+ Organization string `json:"organization" binding:"required"`
+ CustomSettings map[string]interface{} `json:"custom_settings,omitempty"`
+ DeploymentID string `json:"deployment_id,omitempty"`
+}
+
+// BuildAgentConfig builds a complete agent configuration
+func (cb *ConfigBuilder) BuildAgentConfig(req AgentSetupRequest) (*AgentConfiguration, error) {
+ // Validate request
+ if err := cb.validateRequest(req); err != nil {
+ return nil, err
+ }
+
+ // Generate agent ID
+ agentID := uuid.New().String()
+
+ // Fetch server public key
+ serverPublicKey, err := cb.fetchServerPublicKey(req.ServerURL)
+ if err != nil {
+ return nil, fmt.Errorf("failed to fetch server public key: %w", err)
+ }
+
+ // Generate registration token
+ registrationToken, err := cb.generateRegistrationToken(agentID)
+ if err != nil {
+ return nil, fmt.Errorf("failed to generate registration token: %w", err)
+ }
+
+ // Get template
+ template, exists := cb.templates[req.AgentType]
+ if !exists {
+ return nil, fmt.Errorf("unknown agent type: %s", req.AgentType)
+ }
+
+ // Build base configuration
+ config := cb.buildFromTemplate(template, req.CustomSettings)
+
+ // Inject deployment-specific values
+ cb.injectDeploymentValues(config, req, agentID, registrationToken, serverPublicKey)
+
+ // Apply environment-specific defaults
+ cb.applyEnvironmentDefaults(config, req.Environment)
+
+ // Validate final configuration
+ if err := cb.validateConfiguration(config, template); err != nil {
+ return nil, fmt.Errorf("configuration validation failed: %w", err)
+ }
+
+ // Separate sensitive and non-sensitive data
+ publicConfig, secrets := cb.separateSecrets(config)
+
+ // Create Docker secrets if needed
+ var secretsCreated bool
+ var secretsPath string
+ if len(secrets) > 0 {
+ secretsManager := NewSecretsManager()
+
+ // Generate encryption key if not set
+ if secretsManager.GetEncryptionKey() == "" {
+ key, err := secretsManager.GenerateEncryptionKey()
+ if err != nil {
+ return nil, fmt.Errorf("failed to generate encryption key: %w", err)
+ }
+ secretsManager.SetEncryptionKey(key)
+ }
+
+ // Create Docker secrets
+ if err := secretsManager.CreateDockerSecrets(secrets); err != nil {
+ return nil, fmt.Errorf("failed to create Docker secrets: %w", err)
+ }
+
+ secretsCreated = true
+ secretsPath = secretsManager.GetSecretsPath()
+ }
+
+ // Determine platform from agent type
+ platform := "linux-amd64" // Default
+ if req.AgentType == "windows-workstation" {
+ platform = "windows-amd64"
+ }
+
+ return &AgentConfiguration{
+ AgentID: agentID,
+ PublicConfig: publicConfig,
+ Secrets: secrets,
+ Template: req.AgentType,
+ Environment: req.Environment,
+ ServerURL: req.ServerURL,
+ Organization: req.Organization,
+ Platform: platform,
+ ConfigVersion: "5", // Config schema version
+ AgentVersion: "0.1.23.4", // Agent binary version
+ BuildTime: time.Now(),
+ SecretsCreated: secretsCreated,
+ SecretsPath: secretsPath,
+ }, nil
+}
+
+// AgentConfiguration represents a complete agent configuration
+type AgentConfiguration struct {
+ AgentID string `json:"agent_id"`
+ PublicConfig map[string]interface{} `json:"public_config"`
+ Secrets map[string]string `json:"secrets"`
+ Template string `json:"template"`
+ Environment string `json:"environment"`
+ ServerURL string `json:"server_url"`
+ Organization string `json:"organization"`
+ Platform string `json:"platform"`
+ ConfigVersion string `json:"config_version"` // Config schema version (e.g., "5")
+ AgentVersion string `json:"agent_version"` // Agent binary version (e.g., "0.1.23.5")
+ BuildTime time.Time `json:"build_time"`
+ SecretsCreated bool `json:"secrets_created"`
+ SecretsPath string `json:"secrets_path,omitempty"`
+}
+
+// validateRequest validates the setup request
+func (cb *ConfigBuilder) validateRequest(req AgentSetupRequest) error {
+ if req.ServerURL == "" {
+ return fmt.Errorf("server_url is required")
+ }
+
+ if req.Environment == "" {
+ return fmt.Errorf("environment is required")
+ }
+
+ if req.AgentType == "" {
+ return fmt.Errorf("agent_type is required")
+ }
+
+ if req.Organization == "" {
+ return fmt.Errorf("organization is required")
+ }
+
+ // Check if agent type exists
+ if _, exists := cb.templates[req.AgentType]; !exists {
+ return fmt.Errorf("unknown agent type: %s", req.AgentType)
+ }
+
+ return nil
+}
+
+// fetchServerPublicKey fetches the server's public key with caching
+func (cb *ConfigBuilder) fetchServerPublicKey(serverURL string) (string, error) {
+ // Check cache first
+ if cached, exists := cb.publicKeyCache[serverURL]; exists {
+ return cached, nil
+ }
+
+ // Fetch from server
+ resp, err := cb.httpClient.Get(serverURL + "/api/v1/public-key")
+ if err != nil {
+ return "", fmt.Errorf("failed to fetch public key: %w", err)
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode != http.StatusOK {
+ return "", fmt.Errorf("server returned status %d", resp.StatusCode)
+ }
+
+ var keyResp PublicKeyResponse
+ if err := json.NewDecoder(resp.Body).Decode(&keyResp); err != nil {
+ return "", fmt.Errorf("failed to decode public key response: %w", err)
+ }
+
+ // Cache the key
+ cb.publicKeyCache[serverURL] = keyResp.PublicKey
+
+ return keyResp.PublicKey, nil
+}
+
+// generateRegistrationToken generates a secure registration token
+func (cb *ConfigBuilder) generateRegistrationToken(agentID string) (string, error) {
+ bytes := make([]byte, 32)
+ if _, err := rand.Read(bytes); err != nil {
+ return "", err
+ }
+
+ // Combine agent ID with random bytes for uniqueness
+ data := append([]byte(agentID), bytes...)
+ token := hex.EncodeToString(data)
+
+ // Ensure token doesn't exceed reasonable length
+ if len(token) > 128 {
+ token = token[:128]
+ }
+
+ return token, nil
+}
+
+// buildFromTemplate builds configuration from template
+func (cb *ConfigBuilder) buildFromTemplate(template AgentTemplate, customSettings map[string]interface{}) map[string]interface{} {
+ config := make(map[string]interface{})
+
+ // Deep copy base configuration
+ for k, v := range template.BaseConfig {
+ config[k] = cb.deepCopy(v)
+ }
+
+ // Apply custom settings
+ if customSettings != nil {
+ cb.mergeSettings(config, customSettings)
+ }
+
+ return config
+}
+
+// injectDeploymentValues injects deployment-specific values into configuration
+func (cb *ConfigBuilder) injectDeploymentValues(config map[string]interface{}, req AgentSetupRequest, agentID, registrationToken, serverPublicKey string) {
+ config["version"] = "5" // Config schema version (for migration system)
+ config["agent_version"] = "0.1.23.5" // Agent binary version (MUST match the binary being served)
+ config["server_url"] = req.ServerURL
+ config["agent_id"] = agentID
+ config["registration_token"] = registrationToken
+ config["server_public_key"] = serverPublicKey
+ config["organization"] = req.Organization
+ config["environment"] = req.Environment
+ config["agent_type"] = req.AgentType
+
+ if req.DeploymentID != "" {
+ config["deployment_id"] = req.DeploymentID
+ }
+}
+
+// applyEnvironmentDefaults applies environment-specific configuration defaults
+func (cb *ConfigBuilder) applyEnvironmentDefaults(config map[string]interface{}, environment string) {
+ environmentDefaults := map[string]interface{}{
+ "development": map[string]interface{}{
+ "logging": map[string]interface{}{
+ "level": "debug",
+ "max_size": 50,
+ "max_backups": 2,
+ "max_age": 7,
+ },
+ "check_in_interval": 60, // More frequent polling in development
+ },
+ "staging": map[string]interface{}{
+ "logging": map[string]interface{}{
+ "level": "info",
+ "max_size": 100,
+ "max_backups": 3,
+ "max_age": 14,
+ },
+ "check_in_interval": 180,
+ },
+ "production": map[string]interface{}{
+ "logging": map[string]interface{}{
+ "level": "warn",
+ "max_size": 200,
+ "max_backups": 5,
+ "max_age": 30,
+ },
+ "check_in_interval": 300, // 5 minutes for production
+ },
+ "testing": map[string]interface{}{
+ "logging": map[string]interface{}{
+ "level": "debug",
+ "max_size": 10,
+ "max_backups": 1,
+ "max_age": 1,
+ },
+ "check_in_interval": 30, // Very frequent for testing
+ },
+ }
+
+ if defaults, exists := environmentDefaults[environment]; exists {
+ if defaultsMap, ok := defaults.(map[string]interface{}); ok {
+ cb.mergeSettings(config, defaultsMap)
+ }
+ }
+}
+
+// validateConfiguration validates the final configuration
+func (cb *ConfigBuilder) validateConfiguration(config map[string]interface{}, template AgentTemplate) error {
+ // Check required fields
+ for _, field := range template.Validation.RequiredFields {
+ if _, exists := config[field]; !exists {
+ return fmt.Errorf("required field missing: %s", field)
+ }
+ }
+
+ // Validate allowed values
+ for field, allowedValues := range template.Validation.AllowedValues {
+ if value, exists := config[field]; exists {
+ if strValue, ok := value.(string); ok {
+ if !cb.containsString(allowedValues, strValue) {
+ return fmt.Errorf("invalid value for %s: %s (allowed: %v)", field, strValue, allowedValues)
+ }
+ }
+ }
+ }
+
+ // Validate constraints
+ for field, constraint := range template.Validation.Constraints {
+ if value, exists := config[field]; exists {
+ if err := cb.validateConstraint(field, value, constraint); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+// separateSecrets separates sensitive data from public configuration
+func (cb *ConfigBuilder) separateSecrets(config map[string]interface{}) (map[string]interface{}, map[string]string) {
+ publicConfig := make(map[string]interface{})
+ secrets := make(map[string]string)
+
+ // Copy all values to public config initially
+ for k, v := range config {
+ publicConfig[k] = cb.deepCopy(v)
+ }
+
+ // Extract known sensitive fields
+ sensitiveFields := []string{
+ "registration_token",
+ "server_public_key",
+ }
+
+ for _, field := range sensitiveFields {
+ if value, exists := publicConfig[field]; exists {
+ if strValue, ok := value.(string); ok {
+ secrets[field] = strValue
+ delete(publicConfig, field)
+ }
+ }
+ }
+
+ // Extract nested sensitive fields
+ if proxy, exists := publicConfig["proxy"].(map[string]interface{}); exists {
+ if username, exists := proxy["username"].(string); exists && username != "" {
+ secrets["proxy_username"] = username
+ delete(proxy, "username")
+ }
+ if password, exists := proxy["password"].(string); exists && password != "" {
+ secrets["proxy_password"] = password
+ delete(proxy, "password")
+ }
+ }
+
+ if tls, exists := publicConfig["tls"].(map[string]interface{}); exists {
+ if certFile, exists := tls["cert_file"].(string); exists && certFile != "" {
+ secrets["tls_cert"] = certFile
+ delete(tls, "cert_file")
+ }
+ if keyFile, exists := tls["key_file"].(string); exists && keyFile != "" {
+ secrets["tls_key"] = keyFile
+ delete(tls, "key_file")
+ }
+ if caFile, exists := tls["ca_file"].(string); exists && caFile != "" {
+ secrets["tls_ca"] = caFile
+ delete(tls, "ca_file")
+ }
+ }
+
+ return publicConfig, secrets
+}
+
+// Helper functions
+
+func (cb *ConfigBuilder) deepCopy(value interface{}) interface{} {
+ if m, ok := value.(map[string]interface{}); ok {
+ result := make(map[string]interface{})
+ for k, v := range m {
+ result[k] = cb.deepCopy(v)
+ }
+ return result
+ }
+ if s, ok := value.([]interface{}); ok {
+ result := make([]interface{}, len(s))
+ for i, v := range s {
+ result[i] = cb.deepCopy(v)
+ }
+ return result
+ }
+ return value
+}
+
+func (cb *ConfigBuilder) mergeSettings(target map[string]interface{}, source map[string]interface{}) {
+ for key, value := range source {
+ if existing, exists := target[key]; exists {
+ if existingMap, ok := existing.(map[string]interface{}); ok {
+ if sourceMap, ok := value.(map[string]interface{}); ok {
+ cb.mergeSettings(existingMap, sourceMap)
+ continue
+ }
+ }
+ }
+ target[key] = cb.deepCopy(value)
+ }
+}
+
+func (cb *ConfigBuilder) containsString(slice []string, item string) bool {
+ for _, s := range slice {
+ if s == item {
+ return true
+ }
+ }
+ return false
+}
+
+// GetTemplates returns the available agent templates
+func (cb *ConfigBuilder) GetTemplates() map[string]AgentTemplate {
+ return getAgentTemplates()
+}
+
+// GetTemplate returns a specific agent template
+func (cb *ConfigBuilder) GetTemplate(agentType string) (AgentTemplate, bool) {
+ template, exists := getAgentTemplates()[agentType]
+ return template, exists
+}
+
+func (cb *ConfigBuilder) validateConstraint(field string, value interface{}, constraint interface{}) error {
+ constraints, ok := constraint.(map[string]interface{})
+ if !ok {
+ return nil
+ }
+
+ if numValue, ok := value.(float64); ok {
+ if min, exists := constraints["min"].(float64); exists && numValue < min {
+ return fmt.Errorf("value for %s is below minimum: %f < %f", field, numValue, min)
+ }
+ if max, exists := constraints["max"].(float64); exists && numValue > max {
+ return fmt.Errorf("value for %s is above maximum: %f > %f", field, numValue, max)
+ }
+ }
+
+ return nil
+}
+
+// getAgentTemplates returns the available agent templates
+func getAgentTemplates() map[string]AgentTemplate {
+ return map[string]AgentTemplate{
+ "linux-server": {
+ Name: "Linux Server Agent",
+ Description: "Optimized for Linux server deployments with package management",
+ BaseConfig: map[string]interface{}{
+ "check_in_interval": 300,
+ "network": map[string]interface{}{
+ "timeout": 30000000000,
+ "retry_count": 3,
+ "retry_delay": 5000000000,
+ "max_idle_conn": 10,
+ },
+ "proxy": map[string]interface{}{
+ "enabled": false,
+ },
+ "tls": map[string]interface{}{
+ "insecure_skip_verify": false,
+ },
+ "logging": map[string]interface{}{
+ "level": "info",
+ "max_size": 100,
+ "max_backups": 3,
+ "max_age": 28,
+ },
+ "subsystems": map[string]interface{}{
+ "apt": map[string]interface{}{
+ "enabled": true,
+ "timeout": 30000000000,
+ "circuit_breaker": map[string]interface{}{
+ "enabled": true,
+ "failure_threshold": 3,
+ "failure_window": 600000000000,
+ "open_duration": 1800000000000,
+ "half_open_attempts": 2,
+ },
+ },
+ "dnf": map[string]interface{}{
+ "enabled": true,
+ "timeout": 45000000000,
+ "circuit_breaker": map[string]interface{}{
+ "enabled": true,
+ "failure_threshold": 3,
+ "failure_window": 600000000000,
+ "open_duration": 1800000000000,
+ "half_open_attempts": 2,
+ },
+ },
+ "docker": map[string]interface{}{
+ "enabled": true,
+ "timeout": 60000000000,
+ "circuit_breaker": map[string]interface{}{
+ "enabled": true,
+ "failure_threshold": 3,
+ "failure_window": 600000000000,
+ "open_duration": 1800000000000,
+ "half_open_attempts": 2,
+ },
+ },
+ "windows": map[string]interface{}{
+ "enabled": false,
+ },
+ "winget": map[string]interface{}{
+ "enabled": false,
+ },
+ "storage": map[string]interface{}{
+ "enabled": true,
+ "timeout": 10000000000,
+ "circuit_breaker": map[string]interface{}{
+ "enabled": true,
+ "failure_threshold": 3,
+ "failure_window": 600000000000,
+ "open_duration": 1800000000000,
+ "half_open_attempts": 2,
+ },
+ },
+ },
+ },
+ Secrets: []string{"registration_token", "server_public_key"},
+ Validation: ValidationRules{
+ RequiredFields: []string{"server_url", "organization"},
+ AllowedValues: map[string][]string{
+ "environment": {"development", "staging", "production", "testing"},
+ },
+ Patterns: map[string]string{
+ "server_url": "^https?://.+",
+ },
+ Constraints: map[string]interface{}{
+ "check_in_interval": map[string]interface{}{"min": 30, "max": 3600},
+ },
+ },
+ },
+ "windows-workstation": {
+ Name: "Windows Workstation Agent",
+ Description: "Optimized for Windows workstation deployments",
+ BaseConfig: map[string]interface{}{
+ "check_in_interval": 300,
+ "network": map[string]interface{}{
+ "timeout": 30000000000,
+ "retry_count": 3,
+ "retry_delay": 5000000000,
+ "max_idle_conn": 10,
+ },
+ "proxy": map[string]interface{}{
+ "enabled": false,
+ },
+ "tls": map[string]interface{}{
+ "insecure_skip_verify": false,
+ },
+ "logging": map[string]interface{}{
+ "level": "info",
+ "max_size": 100,
+ "max_backups": 3,
+ "max_age": 28,
+ },
+ "subsystems": map[string]interface{}{
+ "apt": map[string]interface{}{
+ "enabled": false,
+ },
+ "dnf": map[string]interface{}{
+ "enabled": false,
+ },
+ "docker": map[string]interface{}{
+ "enabled": false,
+ },
+ "windows": map[string]interface{}{
+ "enabled": true,
+ "timeout": 600000000000,
+ "circuit_breaker": map[string]interface{}{
+ "enabled": true,
+ "failure_threshold": 2,
+ "failure_window": 900000000000,
+ "open_duration": 3600000000000,
+ "half_open_attempts": 3,
+ },
+ },
+ "winget": map[string]interface{}{
+ "enabled": true,
+ "timeout": 120000000000,
+ "circuit_breaker": map[string]interface{}{
+ "enabled": true,
+ "failure_threshold": 3,
+ "failure_window": 600000000000,
+ "open_duration": 1800000000000,
+ "half_open_attempts": 2,
+ },
+ },
+ "storage": map[string]interface{}{
+ "enabled": false,
+ },
+ },
+ },
+ Secrets: []string{"registration_token", "server_public_key"},
+ Validation: ValidationRules{
+ RequiredFields: []string{"server_url", "organization"},
+ AllowedValues: map[string][]string{
+ "environment": {"development", "staging", "production", "testing"},
+ },
+ Patterns: map[string]string{
+ "server_url": "^https?://.+",
+ },
+ Constraints: map[string]interface{}{
+ "check_in_interval": map[string]interface{}{"min": 30, "max": 3600},
+ },
+ },
+ },
+ "docker-host": {
+ Name: "Docker Host Agent",
+ Description: "Optimized for Docker host deployments",
+ BaseConfig: map[string]interface{}{
+ "check_in_interval": 300,
+ "network": map[string]interface{}{
+ "timeout": 30000000000,
+ "retry_count": 3,
+ "retry_delay": 5000000000,
+ "max_idle_conn": 10,
+ },
+ "proxy": map[string]interface{}{
+ "enabled": false,
+ },
+ "tls": map[string]interface{}{
+ "insecure_skip_verify": false,
+ },
+ "logging": map[string]interface{}{
+ "level": "info",
+ "max_size": 100,
+ "max_backups": 3,
+ "max_age": 28,
+ },
+ "subsystems": map[string]interface{}{
+ "apt": map[string]interface{}{
+ "enabled": false,
+ },
+ "dnf": map[string]interface{}{
+ "enabled": false,
+ },
+ "docker": map[string]interface{}{
+ "enabled": true,
+ "timeout": 60000000000,
+ "circuit_breaker": map[string]interface{}{
+ "enabled": true,
+ "failure_threshold": 3,
+ "failure_window": 600000000000,
+ "open_duration": 1800000000000,
+ "half_open_attempts": 2,
+ },
+ },
+ "windows": map[string]interface{}{
+ "enabled": false,
+ },
+ "winget": map[string]interface{}{
+ "enabled": false,
+ },
+ "storage": map[string]interface{}{
+ "enabled": false,
+ },
+ },
+ },
+ Secrets: []string{"registration_token", "server_public_key"},
+ Validation: ValidationRules{
+ RequiredFields: []string{"server_url", "organization"},
+ AllowedValues: map[string][]string{
+ "environment": {"development", "staging", "production", "testing"},
+ },
+ Patterns: map[string]string{
+ "server_url": "^https?://.+",
+ },
+ Constraints: map[string]interface{}{
+ "check_in_interval": map[string]interface{}{"min": 30, "max": 3600},
+ },
+ },
+ },
+ }
+}
\ No newline at end of file
diff --git a/aggregator-server/internal/services/secrets_manager.go b/aggregator-server/internal/services/secrets_manager.go
new file mode 100644
index 0000000..a92ab75
--- /dev/null
+++ b/aggregator-server/internal/services/secrets_manager.go
@@ -0,0 +1,263 @@
+package services
+
+import (
+ "crypto/aes"
+ "crypto/cipher"
+ "crypto/rand"
+ "encoding/hex"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "runtime"
+)
+
+// SecretsManager handles Docker secrets creation and management
+type SecretsManager struct {
+ secretsPath string
+ encryptionKey string
+}
+
+// NewSecretsManager creates a new secrets manager
+func NewSecretsManager() *SecretsManager {
+ secretsPath := getSecretsPath()
+ return &SecretsManager{
+ secretsPath: secretsPath,
+ }
+}
+
+// CreateDockerSecrets creates Docker secrets from the provided secrets map
+func (sm *SecretsManager) CreateDockerSecrets(secrets map[string]string) error {
+ if len(secrets) == 0 {
+ return nil
+ }
+
+ // Ensure secrets directory exists
+ if err := os.MkdirAll(sm.secretsPath, 0755); err != nil {
+ return fmt.Errorf("failed to create secrets directory: %w", err)
+ }
+
+ // Generate encryption key if not provided
+ if sm.encryptionKey == "" {
+ key, err := sm.GenerateEncryptionKey()
+ if err != nil {
+ return fmt.Errorf("failed to generate encryption key: %w", err)
+ }
+ sm.encryptionKey = key
+ }
+
+ // Create each secret
+ for name, value := range secrets {
+ if err := sm.createSecret(name, value); err != nil {
+ return fmt.Errorf("failed to create secret %s: %w", name, err)
+ }
+ }
+
+ return nil
+}
+
+// createSecret creates a single Docker secret
+func (sm *SecretsManager) createSecret(name, value string) error {
+ secretPath := filepath.Join(sm.secretsPath, name)
+
+ // Encrypt sensitive values
+ encryptedValue, err := sm.encryptSecret(value)
+ if err != nil {
+ return fmt.Errorf("failed to encrypt secret: %w", err)
+ }
+
+ // Write secret file with restricted permissions
+ if err := os.WriteFile(secretPath, encryptedValue, 0400); err != nil {
+ return fmt.Errorf("failed to write secret file: %w", err)
+ }
+
+ return nil
+}
+
+// encryptSecret encrypts a secret value using AES-256-GCM
+func (sm *SecretsManager) encryptSecret(value string) ([]byte, error) {
+ // Generate key from master key
+ keyBytes, err := hex.DecodeString(sm.encryptionKey)
+ if err != nil {
+ return nil, fmt.Errorf("invalid encryption key format: %w", err)
+ }
+
+ // Create cipher
+ block, err := aes.NewCipher(keyBytes)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create cipher: %w", err)
+ }
+
+ // Create GCM
+ gcm, err := cipher.NewGCM(block)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create GCM: %w", err)
+ }
+
+ // Generate nonce
+ nonce := make([]byte, gcm.NonceSize())
+ if _, err := io.ReadFull(rand.Reader, nonce); err != nil {
+ return nil, fmt.Errorf("failed to generate nonce: %w", err)
+ }
+
+ // Encrypt
+ ciphertext := gcm.Seal(nonce, nonce, []byte(value), nil)
+
+ // Prepend nonce to ciphertext
+ result := append(nonce, ciphertext...)
+
+ return result, nil
+}
+
+// decryptSecret decrypts a secret value using AES-256-GCM
+func (sm *SecretsManager) decryptSecret(encryptedValue []byte) (string, error) {
+ if len(encryptedValue) < 12 { // GCM nonce size
+ return "", fmt.Errorf("invalid encrypted value length")
+ }
+
+ // Generate key from master key
+ keyBytes, err := hex.DecodeString(sm.encryptionKey)
+ if err != nil {
+ return "", fmt.Errorf("invalid encryption key format: %w", err)
+ }
+
+ // Create cipher
+ block, err := aes.NewCipher(keyBytes)
+ if err != nil {
+ return "", fmt.Errorf("failed to create cipher: %w", err)
+ }
+
+ // Create GCM
+ gcm, err := cipher.NewGCM(block)
+ if err != nil {
+ return "", fmt.Errorf("failed to create GCM: %w", err)
+ }
+
+ // Extract nonce and ciphertext
+ nonce := encryptedValue[:gcm.NonceSize()]
+ ciphertext := encryptedValue[gcm.NonceSize():]
+
+ // Decrypt
+ plaintext, err := gcm.Open(nil, nonce, ciphertext, nil)
+ if err != nil {
+ return "", fmt.Errorf("failed to decrypt secret: %w", err)
+ }
+
+ return string(plaintext), nil
+}
+
+// GenerateEncryptionKey generates a new encryption key
+func (sm *SecretsManager) GenerateEncryptionKey() (string, error) {
+ bytes := make([]byte, 32)
+ if _, err := rand.Read(bytes); err != nil {
+ return "", fmt.Errorf("failed to generate encryption key: %w", err)
+ }
+ return hex.EncodeToString(bytes), nil
+}
+
+// SetEncryptionKey sets the master encryption key
+func (sm *SecretsManager) SetEncryptionKey(key string) {
+ sm.encryptionKey = key
+}
+
+// GetEncryptionKey returns the current encryption key
+func (sm *SecretsManager) GetEncryptionKey() string {
+ return sm.encryptionKey
+}
+
+// GetSecretsPath returns the current secrets path
+func (sm *SecretsManager) GetSecretsPath() string {
+ return sm.secretsPath
+}
+
+// ValidateSecrets validates that all required secrets exist
+func (sm *SecretsManager) ValidateSecrets(requiredSecrets []string) error {
+ for _, secretName := range requiredSecrets {
+ secretPath := filepath.Join(sm.secretsPath, secretName)
+ if _, err := os.Stat(secretPath); os.IsNotExist(err) {
+ return fmt.Errorf("required secret not found: %s", secretName)
+ }
+ }
+ return nil
+}
+
+// ListSecrets returns a list of all created secrets
+func (sm *SecretsManager) ListSecrets() ([]string, error) {
+ entries, err := os.ReadDir(sm.secretsPath)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return []string{}, nil
+ }
+ return nil, fmt.Errorf("failed to read secrets directory: %w", err)
+ }
+
+ var secrets []string
+ for _, entry := range entries {
+ if !entry.IsDir() {
+ secrets = append(secrets, entry.Name())
+ }
+ }
+
+ return secrets, nil
+}
+
+// RemoveSecret removes a Docker secret
+func (sm *SecretsManager) RemoveSecret(name string) error {
+ secretPath := filepath.Join(sm.secretsPath, name)
+ return os.Remove(secretPath)
+}
+
+// Cleanup removes all secrets and the secrets directory
+func (sm *SecretsManager) Cleanup() error {
+ if _, err := os.Stat(sm.secretsPath); os.IsNotExist(err) {
+ return nil
+ }
+
+ // Remove all files in the directory
+ entries, err := os.ReadDir(sm.secretsPath)
+ if err != nil {
+ return fmt.Errorf("failed to read secrets directory: %w", err)
+ }
+
+ for _, entry := range entries {
+ if !entry.IsDir() {
+ if err := os.Remove(filepath.Join(sm.secretsPath, entry.Name())); err != nil {
+ return fmt.Errorf("failed to remove secret %s: %w", entry.Name(), err)
+ }
+ }
+ }
+
+ // Remove the directory itself
+ return os.Remove(sm.secretsPath)
+}
+
+// getSecretsPath returns the platform-specific secrets path
+func getSecretsPath() string {
+ if runtime.GOOS == "windows" {
+ return `C:\ProgramData\Docker\secrets`
+ }
+ return "/run/secrets"
+}
+
+// IsDockerEnvironment checks if running in Docker
+func IsDockerEnvironment() bool {
+ // Check for .dockerenv file
+ if _, err := os.Stat("/.dockerenv"); err == nil {
+ return true
+ }
+
+ // Check for Docker in cgroup
+ if data, err := os.ReadFile("/proc/1/cgroup"); err == nil {
+ if containsString(string(data), "docker") {
+ return true
+ }
+ }
+
+ return false
+}
+
+// containsString checks if a string contains a substring
+func containsString(s, substr string) bool {
+ return len(s) >= len(substr) && (s == substr ||
+ (len(s) > len(substr) && (s[:len(substr)] == substr || s[len(s)-len(substr):] == substr)))
+}
\ No newline at end of file
diff --git a/aggregator-server/internal/services/update_nonce.go b/aggregator-server/internal/services/update_nonce.go
new file mode 100644
index 0000000..87ec894
--- /dev/null
+++ b/aggregator-server/internal/services/update_nonce.go
@@ -0,0 +1,90 @@
+package services
+
+import (
+ "crypto/ed25519"
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "time"
+)
+
+type UpdateNonce struct {
+ AgentID string `json:"agent_id"`
+ TargetVersion string `json:"target_version"`
+ Timestamp int64 `json:"timestamp"`
+ Signature string `json:"signature"`
+}
+
+type UpdateNonceService struct {
+ privateKey ed25519.PrivateKey
+ maxAge time.Duration
+}
+
+func NewUpdateNonceService(privateKey ed25519.PrivateKey) *UpdateNonceService {
+ return &UpdateNonceService{
+ privateKey: privateKey,
+ maxAge: 10 * time.Minute,
+ }
+}
+
+// Generate creates a signed nonce authorizing an agent to update
+func (s *UpdateNonceService) Generate(agentID, targetVersion string) (string, error) {
+ nonce := UpdateNonce{
+ AgentID: agentID,
+ TargetVersion: targetVersion,
+ Timestamp: time.Now().Unix(),
+ }
+
+ data, err := json.Marshal(nonce)
+ if err != nil {
+ return "", fmt.Errorf("marshal failed: %w", err)
+ }
+
+ signature := ed25519.Sign(s.privateKey, data)
+ nonce.Signature = base64.StdEncoding.EncodeToString(signature)
+
+ encoded, err := json.Marshal(nonce)
+ if err != nil {
+ return "", fmt.Errorf("encode failed: %w", err)
+ }
+
+ return base64.StdEncoding.EncodeToString(encoded), nil
+}
+
+// Validate verifies the nonce signature and freshness
+func (s *UpdateNonceService) Validate(encodedNonce string) (*UpdateNonce, error) {
+ data, err := base64.StdEncoding.DecodeString(encodedNonce)
+ if err != nil {
+ return nil, fmt.Errorf("invalid base64: %w", err)
+ }
+
+ var nonce UpdateNonce
+ if err := json.Unmarshal(data, &nonce); err != nil {
+ return nil, fmt.Errorf("invalid format: %w", err)
+ }
+
+ // Check freshness
+ if time.Now().Unix()-nonce.Timestamp > int64(s.maxAge.Seconds()) {
+ return nil, fmt.Errorf("nonce expired")
+ }
+
+ // Verify signature
+ signature, err := base64.StdEncoding.DecodeString(nonce.Signature)
+ if err != nil {
+ return nil, fmt.Errorf("invalid signature: %w", err)
+ }
+
+ // Remove signature for verification
+ nonce.Signature = ""
+ verifyData, err := json.Marshal(nonce)
+ if err != nil {
+ return nil, fmt.Errorf("marshal verify data: %w", err)
+ }
+
+ if !ed25519.Verify(s.privateKey.Public().(ed25519.PublicKey), verifyData, signature) {
+ return nil, fmt.Errorf("signature verification failed")
+ }
+
+ // Return validated nonce
+ return &nonce, nil
+}
diff --git a/aggregator-web/src/components/AgentUpdate.tsx b/aggregator-web/src/components/AgentUpdate.tsx
new file mode 100644
index 0000000..b8b262c
--- /dev/null
+++ b/aggregator-web/src/components/AgentUpdate.tsx
@@ -0,0 +1,200 @@
+import React, { useState } from 'react';
+import { Upload, CheckCircle, XCircle, RotateCw, Download } from 'lucide-react';
+import { useAgentUpdate } from '@/hooks/useAgentUpdate';
+import { Agent } from '@/types';
+import { cn } from '@/lib/utils';
+import toast from 'react-hot-toast';
+
+interface AgentUpdateProps {
+ agent: Agent;
+ onUpdateComplete?: () => void;
+ className?: string;
+}
+
+export function AgentUpdate({ agent, onUpdateComplete, className }: AgentUpdateProps) {
+ const {
+ checkForUpdate,
+ triggerAgentUpdate,
+ updateStatus,
+ checkingUpdate,
+ updatingAgent,
+ hasUpdate,
+ availableVersion,
+ currentVersion
+ } = useAgentUpdate();
+
+ const [isChecking, setIsChecking] = useState(false);
+ const [showConfirmDialog, setShowConfirmDialog] = useState(false);
+ const [hasChecked, setHasChecked] = useState(false);
+
+ const handleCheckUpdate = async (e: React.MouseEvent) => {
+ e.stopPropagation();
+ setIsChecking(true);
+
+ try {
+ await checkForUpdate(agent.id);
+ setHasChecked(true);
+
+ if (hasUpdate && availableVersion) {
+ setShowConfirmDialog(true);
+ } else if (!hasUpdate && hasChecked) {
+ toast.info('Agent is already at latest version');
+ }
+ } catch (error) {
+ console.error('[UI] Failed to check for updates:', error);
+ toast.error('Failed to check for available updates');
+ } finally {
+ setIsChecking(false);
+ }
+ };
+
+ const handleConfirmUpdate = async () => {
+ if (!hasUpdate || !availableVersion) {
+ toast.error('No update available');
+ return;
+ }
+
+ setShowConfirmDialog(false);
+
+ try {
+ await triggerAgentUpdate(agent, availableVersion);
+
+ if (onUpdateComplete) {
+ onUpdateComplete();
+ }
+
+ } catch (error) {
+ console.error('[UI] Update failed:', error);
+ }
+ };
+
+ const buttonContent = () => {
+ if (updatingAgent) {
+ return (
+ <>
+
+
+ {updateStatus.status === 'downloading' && 'Downloading...'}
+ {updateStatus.status === 'installing' && 'Installing...'}
+ {updateStatus.status === 'pending' && 'Starting update...'}
+
+ >
+ );
+ }
+
+ if (agent.is_updating) {
+ return (
+ <>
+
+ Updating...
+ >
+ );
+ }
+
+ if (isChecking) {
+ return (
+ <>
+
+ Checking...
+ >
+ );
+ }
+
+ if (hasChecked && hasUpdate) {
+ return (
+ <>
+
+ Update to {availableVersion}
+ >
+ );
+ }
+
+ return (
+ <>
+
+ Check for Update
+ >
+ );
+ };
+
+ return (
+
+
+
+ {/* Progress indicator */}
+ {updatingAgent && updateStatus.progress && (
+
+ )}
+
+ {/* Status icon */}
+ {hasChecked && !updatingAgent && (
+
+ {hasUpdate ? (
+
+ ) : (
+
+ )}
+
+ )}
+
+ {/* Version info popup */}
+ {hasChecked && (
+
+ {currentVersion} โ {hasUpdate ? availableVersion : 'Latest'}
+
+ )}
+
+ {/* Confirmation Dialog */}
+ {showConfirmDialog && (
+
+
+
+ Update Agent: {agent.hostname}
+
+
+ Update agent from {currentVersion} to {availableVersion}?
+
+
+ This will temporarily take the agent offline during the update process.
+
+
+
+
+
+
+
+ )}
+
+ );
+}
\ No newline at end of file
diff --git a/aggregator-web/src/components/RelayList.tsx b/aggregator-web/src/components/RelayList.tsx
new file mode 100644
index 0000000..a9c1aad
--- /dev/null
+++ b/aggregator-web/src/components/RelayList.tsx
@@ -0,0 +1,208 @@
+import React, { useState } from 'react';
+import { Upload, RefreshCw } from 'lucide-react';
+import { agentApi } from '@/lib/api';
+import { Agent } from '@/types';
+import toast from 'react-hot-toast';
+
+interface BulkAgentUpdateProps {
+ agents: Agent[];
+ onBulkUpdateComplete?: () => void;
+}
+
+export function BulkAgentUpdate({ agents, onBulkUpdateComplete }: BulkAgentUpdateProps) {
+ const [updatingAgents, setUpdatingAgents] = useState>(new Set());
+ const [checkingUpdates, setCheckingUpdates] = useState>(new Set());
+
+ const handleBulkUpdate = async () => {
+ if (agents.length === 0) {
+ toast.error('No agents selected');
+ return;
+ }
+
+ // Check each agent for available updates first
+ let agentsNeedingUpdate: Agent[] = [];
+ let availableVersion: string | undefined;
+
+ // This will populate the checking state
+ agents.forEach(agent => setCheckingUpdates(prev => new Set(prev).add(agent.id)));
+
+ try {
+ const checkPromises = agents.map(async (agent) => {
+ try {
+ const result = await agentApi.checkForUpdateAvailable(agent.id);
+
+ if (result.hasUpdate && result.latestVersion) {
+ agentsNeedingUpdate.push(agent);
+ if (!availableVersion) {
+ availableVersion = result.latestVersion;
+ }
+ }
+ } catch (error) {
+ console.error(`Failed to check updates for agent ${agent.id}:`, error);
+ } finally {
+ setCheckingUpdates(prev => {
+ const newSet = new Set(prev);
+ newSet.delete(agent.id);
+ return newSet;
+ });
+ }
+ });
+
+ await Promise.all(checkPromises);
+
+ if (agentsNeedingUpdate.length === 0) {
+ toast.info('Selected agents are already up to date');
+ return;
+ }
+
+ // Generate nonces for each agent that needs updating
+ const noncePromises = agentsNeedingUpdate.map(async (agent) => {
+ if (availableVersion) {
+ try {
+ const nonceData = await agentApi.generateUpdateNonce(agent.id, availableVersion);
+
+ // Store nonce for use in update request
+ return {
+ agentId: agent.id,
+ hostname: agent.hostname,
+ nonce: nonceData.update_nonce,
+ targetVersion: availableVersion
+ };
+ } catch (error) {
+ console.error(`Failed to generate nonce for ${agent.hostname}:`, error);
+ return null;
+ }
+ }
+ return null;
+ });
+
+ const nonceResults = await Promise.all(noncePromises);
+ const validUpdates = nonceResults.filter(item => item !== null);
+
+ if (validUpdates.length === 0) {
+ toast.error('Failed to generate update nonces for any agents');
+ return;
+ }
+
+ // Perform bulk updates
+ const updateData = {
+ agent_ids: validUpdates.map(item => item.agentId),
+ version: availableVersion,
+ platform: 'linux-amd64', // This should match the platform
+ nonces: validUpdates.map(item => item.nonce)
+ };
+
+ // Mark agents as updating
+ validUpdates.forEach(item => {
+ setUpdatingAgents(prev => new Set(prev).add(item.agentId));
+ });
+
+ const result = await agentApi.updateMultipleAgents(updateData);
+
+ toast.success(`Initiated updates for ${result.updated.length} of ${agents.length} agents`);
+
+ if (result.failed.length > 0) {
+ toast.error(`Failed to update ${result.failed.length} agents`);
+ }
+
+ // Start polling for completion
+ startBulkUpdatePolling(validUpdates);
+
+ if (onBulkUpdateComplete) {
+ onBulkUpdateComplete();
+ }
+
+ } catch (error) {
+ console.error('Bulk update failed:', error);
+ toast.error(`Bulk update failed: ${error.message}`);
+ }
+ };
+
+ const startBulkUpdatePolling = (agents: Array<{agentId: string, hostname: string}>) => {
+ let attempts = 0;
+ const maxAttempts = 60; // 5 minutes max
+
+ const pollInterval = setInterval(async () => {
+ attempts++;
+
+ if (attempts >= maxAttempts || updatingAgents.size === 0) {
+ clearInterval(pollInterval);
+ setUpdatingAgents(new Set());
+ return;
+ }
+
+ const statusPromises = agents.map(async (item) => {
+ try {
+ const status = await agentApi.getUpdateStatus(item.agentId);
+
+ if (status.status === 'complete' || status.status === 'failed') {
+ // Remove from updating set
+ setUpdatingAgents(prev => {
+ const newSet = new Set(prev);
+ newSet.delete(item.agentId);
+ return newSet;
+ });
+
+ if (status.status === 'complete') {
+ toast.success(`${item.hostname} updated successfully`);
+ } else {
+ toast.error(`${item.hostname} update failed: ${status.error || 'Unknown error'}`);
+ }
+ }
+ } catch (error) {
+ console.error(`Failed to poll ${item.hostname}:`, error);
+ }
+ });
+
+ await Promise.allSettled(statusPromises);
+
+ }, 5000); // Check every 5 seconds
+
+ return () => clearInterval(pollInterval);
+ };
+
+ const isAnyAgentUpdating = (): boolean => {
+ return agents.some(agent => updatingAgents.has(agent.id));
+ };
+
+ const isAnyAgentChecking = (): boolean => {
+ return agents.some(agent => checkingUpdates.has(agent.id));
+ };
+
+ const getButtonContent = () => {
+ if (isAnyAgentUpdating() || isAnyAgentChecking()) {
+ return (
+ <>
+
+ {isAnyAgentChecking() ? "Checking..." : "Updating..."}
+ >
+ );
+ }
+
+ if (agents.length === 1) {
+ return (
+ <>
+
+ Update 1 Agent
+ >
+ );
+ }
+
+ return (
+ <>
+
+ Update {agents.length} Agents
+ >
+ );
+ };
+
+ return (
+
+ );
+}
\ No newline at end of file
diff --git a/aggregator-web/src/hooks/useAgentUpdate.ts b/aggregator-web/src/hooks/useAgentUpdate.ts
new file mode 100644
index 0000000..923cab5
--- /dev/null
+++ b/aggregator-web/src/hooks/useAgentUpdate.ts
@@ -0,0 +1,159 @@
+import { useState, useEffect } from 'react';
+import { useMutation, useQuery, useQueryClient } from '@tanstack/react-query';
+import { toast } from 'react-hot-toast';
+import { agentApi } from '@/lib/api';
+import { Agent } from '@/types';
+
+interface UseAgentUpdateReturn {
+ checkForUpdate: (agentId: string) => Promise;
+ triggerAgentUpdate: (agent: Agent, targetVersion: string) => Promise;
+ updateStatus: UpdateStatus;
+ checkingUpdate: boolean;
+ updatingAgent: boolean;
+ hasUpdate: boolean;
+ availableVersion?: string;
+ currentVersion?: string;
+}
+
+interface UpdateStatus {
+ status: 'idle' | 'checking' | 'pending' | 'downloading' | 'installing' | 'complete' | 'failed';
+ progress?: number;
+ error?: string;
+}
+
+export function useAgentUpdate(): UseAgentUpdateReturn {
+ const queryClient = useQueryClient();
+ const [updateStatus, setUpdateStatus] = useState({ status: 'idle' });
+ const [hasUpdate, setHasUpdate] = useState(false);
+ const [availableVersion, setAvailableVersion] = useState();
+ const [currentVersion, setCurrentVersion] = useState();
+
+ // Check if update available for agent
+ const checkMutation = useMutation({
+ mutationFn: agentApi.checkForUpdateAvailable,
+ onSuccess: (data) => {
+ setHasUpdate(data.hasUpdate);
+ setAvailableVersion(data.latestVersion);
+ setCurrentVersion(data.currentVersion);
+
+ if (!data.hasUpdate) {
+ toast.info('Agent is already at latest version');
+ }
+ },
+ onError: (error) => {
+ console.error('Failed to check for updates:', error);
+ toast.error(`Failed to check for updates: ${error.message}`);
+ }
+ });
+
+ // Check for update available
+ const checkForUpdate = async (agentId: string) => {
+ try {
+ await checkMutation.mutateAsync(agentId);
+ } catch (error) {
+ console.error('Error checking for update:', error);
+ }
+ };
+
+ // Trigger agent update with nonce generation
+ const triggerAgentUpdate = async (agent: Agent, targetVersion: string) => {
+ try {
+ // Step 1: Check for update availability (already done by checkmutation)
+ if (!hasUpdate) {
+ await checkForUpdate(agent.id);
+ if (!hasUpdate) {
+ toast.info('No updates available');
+ return;
+ }
+ }
+
+ // Step 2: Generate nonce for authorized update
+ const nonceData = await agentApi.generateUpdateNonce(agent.id, targetVersion);
+
+ console.log('[UI] Update nonce generated:', nonceData);
+
+ // Step 3: Trigger the actual update
+ const updateResponse = await agentApi.updateAgent(agent.id, {
+ version: targetVersion,
+ platform: `${agent.os_type}-${agent.os_architecture}`,
+ // Include nonce in request for security
+ nonce: nonceData.update_nonce
+ });
+
+ setUpdateStatus({ status: 'pending', progress: 0 });
+
+ // Step 4: Start polling for progress
+ startUpdatePolling(agent.id);
+
+ // Step 5: Refresh agent data in cache
+ queryClient.invalidateQueries({ queryKey: ['agents'] });
+
+ console.log('[UI] Update initiated successfully:', updateResponse);
+
+ } catch (error) {
+ console.error('[UI] Update failed:', error);
+ toast.error(`Update failed: ${error.message}`);
+ setUpdateStatus({ status: 'failed', error: error.message });
+ }
+ };
+
+ // Poll for update progress
+ const startUpdatePolling = (agentId: string) => {
+ let attempts = 0;
+ const maxAttempts = 60; // 5 minutes with 5 second intervals
+
+ const pollInterval = setInterval(async () => {
+ attempts++;
+
+ if (attempts >= maxAttempts) {
+ clearInterval(pollInterval);
+ setUpdateStatus({ status: 'failed', error: 'Update timeout' });
+ toast.error('Update timed out after 5 minutes');
+ return;
+ }
+
+ try {
+ const status = await agentApi.getUpdateStatus(agentId);
+
+ switch (status.status) {
+ case 'complete':
+ clearInterval(pollInterval);
+ setUpdateStatus({ status: 'complete' });
+ toast.success('Agent updated successfully!');
+ setHasUpdate(false);
+ setAvailableVersion(undefined);
+ break;
+ case 'failed':
+ clearInterval(pollInterval);
+ setUpdateStatus({ status: 'failed', error: status.error || 'Update failed' });
+ toast.error(`Update failed: ${status.error || 'Unknown error'}`);
+ break;
+ case 'downloading':
+ setUpdateStatus({ status: 'downloading', progress: status.progress });
+ break;
+ case 'installing':
+ setUpdateStatus({ status: 'installing', progress: status.progress });
+ break;
+ default:
+ setUpdateStatus({ status: 'idle' });
+ }
+ } catch (error) {
+ console.error('[UI] Failed to get update status:', error);
+ clearInterval(pollInterval);
+ setUpdateStatus({ status: 'failed', error: 'Failed to get update status' });
+ }
+ }, 5000); // Poll every 5 seconds
+
+ return () => clearInterval(pollInterval);
+ };
+
+ return {
+ checkForUpdate,
+ triggerAgentUpdate,
+ updateStatus,
+ updatingAgent: updateStatus.status === 'downloading' || updateStatus.status === 'installing' || updateStatus.status === 'pending',
+ hasUpdate,
+ availableVersion,
+ currentVersion
+ };
+}
\ No newline at end of file
diff --git a/aggregator-web/src/hooks/useSecurity.ts b/aggregator-web/src/hooks/useSecurity.ts
new file mode 100644
index 0000000..c6800e0
--- /dev/null
+++ b/aggregator-web/src/hooks/useSecurity.ts
@@ -0,0 +1,25 @@
+import { useQuery } from '@tanstack/react-query';
+import api from '@/lib/api';
+
+export interface ServerKeySecurityStatus {
+ has_private_key: boolean;
+ public_key_fingerprint?: string;
+ algorithm?: string;
+}
+
+export const useServerKeySecurity = () => {
+ return useQuery({
+ queryKey: ['serverKeySecurity'],
+ queryFn: async () => {
+ const response = await api.get('/security/overview');
+ const overview = response.data;
+ const signingStatus = overview.subsystems.ed25519_signing;
+
+ return {
+ has_private_key: signingStatus.status === 'healthy',
+ public_key_fingerprint: signingStatus.checks?.public_key_fingerprint,
+ algorithm: signingStatus.checks?.algorithm,
+ };
+ },
+ });
+};
diff --git a/aggregator-web/src/pages/Agents.tsx b/aggregator-web/src/pages/Agents.tsx
index 5cdc94c..e9746c4 100644
--- a/aggregator-web/src/pages/Agents.tsx
+++ b/aggregator-web/src/pages/Agents.tsx
@@ -28,11 +28,13 @@ import {
Upload,
} from 'lucide-react';
import { useAgents, useAgent, useScanAgent, useScanMultipleAgents, useUnregisterAgent } from '@/hooks/useAgents';
+import { useAgentUpdate } from '@/hooks/useAgentUpdate';
import { useActiveCommands, useCancelCommand } from '@/hooks/useCommands';
import { useHeartbeatStatus, useInvalidateHeartbeat, useHeartbeatAgentSync } from '@/hooks/useHeartbeat';
import { agentApi } from '@/lib/api';
import { useQueryClient } from '@tanstack/react-query';
import { getStatusColor, formatRelativeTime, isOnline, formatBytes } from '@/lib/utils';
+import { AgentUpdate } from '@/components/AgentUpdate';
import { cn } from '@/lib/utils';
import toast from 'react-hot-toast';
import { AgentSystemUpdates } from '@/components/AgentUpdates';
@@ -40,6 +42,7 @@ import { AgentStorage } from '@/components/AgentStorage';
import { AgentUpdatesEnhanced } from '@/components/AgentUpdatesEnhanced';
import { AgentScanners } from '@/components/AgentScanners';
import { AgentUpdatesModal } from '@/components/AgentUpdatesModal';
+import { BulkAgentUpdate } from '@/components/RelayList';
import ChatTimeline from '@/components/ChatTimeline';
const Agents: React.FC = () => {
@@ -1167,13 +1170,12 @@ const Agents: React.FC = () => {
)}
Scan Selected ({selectedAgents.length})
-
+ selectedAgents.includes(agent.id))}
+ onBulkUpdateComplete={() => {
+ queryClient.invalidateQueries({ queryKey: ['agents'] });
+ }}
+ />
>
)}
@@ -1393,6 +1395,13 @@ const Agents: React.FC = () => {
>
+ {/* Agent Update with nonce security */}
+ {
+ queryClient.invalidateQueries({ queryKey: ['agents'] });
+ }}
+ />
+ {/* Important Messages / Security Alert */}
+ {serverKeySecurity && !serverKeySecurity.has_private_key && (
+
+
+
+
+ Security Upgrade Required:
+ Your server is missing a private key for secure agent updates. Please go to Agent Management to generate one.
+
+
+
+ )}
+
{/* Stats cards */}
{statCards.map((stat) => {
diff --git a/aggregator-web/src/pages/settings/AgentManagement.tsx b/aggregator-web/src/pages/settings/AgentManagement.tsx
index c428cc6..b299328 100644
--- a/aggregator-web/src/pages/settings/AgentManagement.tsx
+++ b/aggregator-web/src/pages/settings/AgentManagement.tsx
@@ -18,11 +18,15 @@ import {
import { useRegistrationTokens } from '@/hooks/useRegistrationTokens';
import { toast } from 'react-hot-toast';
+import { useServerKeySecurity } from '@/hooks/useSecurity';
+
const AgentManagement: React.FC = () => {
const navigate = useNavigate();
const [copiedCommand, setCopiedCommand] = useState(null);
const [selectedPlatform, setSelectedPlatform] = useState('linux');
const { data: tokens, isLoading: tokensLoading } = useRegistrationTokens({ is_active: true });
+ const { data: serverKeySecurity, isLoading: isLoadingServerKeySecurity, refetch: refetchServerKeySecurity } = useServerKeySecurity();
+ const [generatingKeys, setGeneratingKeys] = useState(false);
const platforms = [
{
@@ -303,6 +307,88 @@ const AgentManagement: React.FC = () => {
+ {/* Server Signing Key */}
+
+
๐ Server Signing Key
+ {isLoadingServerKeySecurity ? (
+
+
+
Loading key status...
+
+ ) : serverKeySecurity?.has_private_key ? (
+
+
+
+ โ Server has a private key for signing agent updates.
+
+
+
+
+
+
+
+
+
+
+
+ ) : (
+
+
+
+ Your server is missing a private key. Generate one to enable secure agent updates.
+
+
+
+
+ )}
+
+
๐ก๏ธ Security Model
diff --git a/discord/discord_manager.py b/discord/discord_manager.py
index 1d4a1bc..fb16043 100755
--- a/discord/discord_manager.py
+++ b/discord/discord_manager.py
@@ -52,9 +52,11 @@ class DiscordManager:
logger.info(f'โ
Bot logged in as {self.bot.user}')
logger.info(f'Serving server: {self.bot.user.name} (ID: {self.bot.user.id})')
- # Sync commands
+ # Sync commands to guild specifically (more reliable)
+ guild = self.bot.get_guild(self.server_id)
+ # Sync commands globally
await self.bot.tree.sync()
- logger.info('โ
Commands synced')
+ logger.info('โ
Commands synced globally')
# Get server info
guild = self.bot.get_guild(self.server_id)
@@ -75,6 +77,15 @@ class DiscordManager:
else:
await ctx.send(f'โ An error occurred: {error}')
+ @self.bot.event
+ async def on_interaction_error(interaction, error):
+ """Handle interaction errors"""
+ logger.error(f'Interaction error: {error}')
+ if interaction.response.is_done():
+ await interaction.followup.send(f'โ An error occurred: {error}', ephemeral=True)
+ else:
+ await interaction.response.send_message(f'โ An error occurred: {error}', ephemeral=True)
+
def setup_commands(self):
"""Setup slash commands"""
@@ -105,6 +116,47 @@ class DiscordManager:
async def cmd_create_test_channel(interaction: discord.Interaction):
await self.cmd_create_test_channel(interaction)
+ @self.bot.tree.command(name="create-roles", description="Create RedFlag community roles")
+ async def cmd_create_roles(interaction: discord.Interaction):
+ await self.cmd_create_roles(interaction)
+
+ @self.bot.tree.command(name="role-menu", description="Show interactive role assignment menu")
+ async def cmd_role_menu(interaction: discord.Interaction):
+ await self.cmd_role_menu(interaction)
+
+ @self.bot.tree.command(name="assign-lead-dev", description="Assign RedFlag Lead Dev role *(Admin only)*")
+ async def cmd_assign_lead_dev(interaction: discord.Interaction, user: discord.Member):
+ await self.cmd_assign_lead_dev(interaction, user)
+
+ @self.bot.tree.command(name="setup-welcome", description="Setup welcome channel with message and role selector *(Admin only)*")
+ async def cmd_setup_welcome(interaction: discord.Interaction):
+ await self.cmd_setup_welcome(interaction)
+
+ @self.bot.tree.command(name="create-version-channels", description="Create version-related channels *(Admin only)*")
+ async def cmd_create_version_channels(interaction: discord.Interaction):
+ await self.cmd_create_version_channels(interaction)
+
+ @self.bot.tree.command(name="sync-commands", description="Force sync commands *(Admin only)*")
+ async def cmd_sync_commands(interaction: discord.Interaction):
+ await self.cmd_sync_commands(interaction)
+
+ @self.bot.tree.command(name="create-redflag-channels", description="Create RedFlag homelab management channels")
+ async def cmd_create_redflag_channels(interaction: discord.Interaction):
+ await self.cmd_create_redflag_channels(interaction)
+
+ @self.bot.tree.command(name="test", description="Test command")
+ async def cmd_test(interaction: discord.Interaction):
+ await interaction.response.send_message("โ
Test command works!", ephemeral=True)
+
+ @self.bot.tree.command(name="create-welcome-banner", description="Create a welcome banner in a channel")
+ @app_commands.describe(channel="Channel to create banner in")
+ async def cmd_create_welcome_banner(interaction: discord.Interaction, channel: discord.TextChannel):
+ await self.cmd_create_welcome_banner(interaction, channel)
+
+ @self.bot.tree.command(name="list-commands", description="List all available bot commands")
+ async def cmd_list_commands_debug(interaction: discord.Interaction):
+ await self.cmd_list_commands_debug(interaction)
+
@self.bot.tree.command(name="help", description="Show available commands")
async def cmd_help(interaction: discord.Interaction):
await self.cmd_help(interaction)
@@ -339,6 +391,568 @@ class DiscordManager:
ephemeral=True
)
+ async def cmd_create_redflag_channels(self, interaction: discord.Interaction):
+ """Create RedFlag development/support Discord channels"""
+ guild = self.bot.get_guild(self.server_id)
+ if not guild:
+ await interaction.response.send_message("โ Could not find server!", ephemeral=True)
+ return
+
+ await interaction.response.defer(ephemeral=True)
+ results = []
+
+ try:
+ # Create categories for community Discord
+ welcome_cat = await guild.create_category_channel("๐ Welcome & Info")
+ results.append("โ
Welcome & Info category")
+
+ support_cat = await guild.create_category_channel("๐ฌ Support & Help")
+ results.append("โ
Support & Help category")
+
+ dev_cat = await guild.create_category_channel("๐ง Development")
+ results.append("โ
Development category")
+
+ community_cat = await guild.create_category_channel("๐ Community")
+ results.append("โ
Community category")
+
+ await asyncio.sleep(1)
+
+ # Welcome & Info channels
+ rules = await guild.create_text_channel(
+ "rules-and-info",
+ category=welcome_cat,
+ reason="Community rules and project information"
+ )
+ results.append("โ
#rules-and-info")
+
+ announcements = await guild.create_text_channel(
+ "announcements",
+ category=welcome_cat,
+ reason="Project announcements and releases"
+ )
+ results.append("โ
#announcements")
+
+ await asyncio.sleep(1)
+
+ # Support & Help channels
+ general_support = await guild.create_text_channel(
+ "general-support",
+ category=support_cat,
+ reason="General RedFlag support and questions"
+ )
+ results.append("โ
#general-support")
+
+ installation = await guild.create_text_channel(
+ "installation-help",
+ category=support_cat,
+ reason="Help with RedFlag installation and setup"
+ )
+ results.append("โ
#installation-help")
+
+ bug_reports = await guild.create_text_channel(
+ "bug-reports",
+ category=support_cat,
+ reason="Bug reports and troubleshooting"
+ )
+ results.append("โ
#bug-reports")
+
+ await asyncio.sleep(1)
+
+ # Development channels
+ general_dev = await guild.create_text_channel(
+ "general-development",
+ category=dev_cat,
+ reason="General development discussions"
+ )
+ results.append("โ
#general-development")
+
+ feature_requests = await guild.create_text_channel(
+ "feature-requests",
+ category=dev_cat,
+ reason="Feature requests and ideas"
+ )
+ results.append("โ
#feature-requests")
+
+ code_review = await guild.create_text_channel(
+ "code-review",
+ category=dev_cat,
+ reason="Code review and development collaboration"
+ )
+ results.append("โ
#code-review")
+
+ await asyncio.sleep(1)
+
+ # Community channels
+ general_chat = await guild.create_text_channel(
+ "general-chat",
+ category=community_cat,
+ reason="Off-topic community chat"
+ )
+ results.append("โ
#general-chat")
+
+ homelab = await guild.create_text_channel(
+ "homelab-showcase",
+ category=community_cat,
+ reason="Share your homelab setups and RedFlag deployments"
+ )
+ results.append("โ
#homelab-showcase")
+
+ # Update .env with important channel IDs
+ discord_env.update_channel_ids("announcements", str(announcements.id))
+ discord_env.update_channel_ids("general-support", str(general_support.id))
+ discord_env.update_channel_ids("bug-reports", str(bug_reports.id))
+ discord_env.update_channel_ids("general-development", str(general_dev.id))
+
+ except Exception as e:
+ logger.error(f"Error creating RedFlag community channels: {e}")
+ results.append(f"โ Error: {e}")
+
+ embed = discord.Embed(
+ title="๐ RedFlag Community Discord Setup",
+ color=discord.Color.green() if "โ" not in str(results) else discord.Color.red(),
+ description="Created RedFlag development/support community channels:\n\n" + "\n".join(results)
+ )
+
+ await interaction.followup.send(embed=embed, ephemeral=True)
+
+ async def cmd_create_roles(self, interaction: discord.Interaction):
+ """Create RedFlag community roles"""
+ guild = self.bot.get_guild(self.server_id)
+ if not guild:
+ await interaction.response.send_message("โ Could not find server!", ephemeral=True)
+ return
+
+ # Only allow administrators to create roles
+ if not interaction.user.guild_permissions.administrator:
+ await interaction.response.send_message("โ Only administrators can create roles!", ephemeral=True)
+ return
+
+ await interaction.response.defer(ephemeral=True)
+ results = []
+
+ # Define RedFlag roles
+ redflag_roles = {
+ "๐ฉ RedFlag Lead Dev": discord.Color.red(),
+ "๐ Backend Dev": discord.Color.blue(),
+ "๐จ Frontend Dev": discord.Color.green(),
+ "๐ QA Tester": discord.Color.orange(),
+ "๐ฌ Community Helper": discord.Color.purple(),
+ "๐ค User": discord.Color.greyple(),
+ "๐ Lurker": discord.Color.dark_grey(),
+ }
+
+ for role_name, role_color in redflag_roles.items():
+ try:
+ # Check if role already exists
+ existing_role = discord.utils.get(guild.roles, name=role_name)
+ if existing_role:
+ results.append(f"โ ๏ธ {role_name} already exists")
+ continue
+
+ # Create the role
+ role = await guild.create_role(
+ name=role_name,
+ color=role_color,
+ reason="RedFlag community role creation",
+ mentionable=True
+ )
+ results.append(f"โ
Created {role_name}")
+
+ # Store role ID in .env for future reference
+ safe_name = role_name.replace("๐ฉ ", "").replace("๐ ", "").replace("๐จ ", "").replace("๐ ", "").replace("๐ฌ ", "").replace("๐ค ", "").replace("๐ ", "").lower().replace(" ", "_")
+ discord_env._config[f"ROLE_{safe_name.upper()}_ID"] = str(role.id)
+
+ except Exception as e:
+ logger.error(f"Error creating role {role_name}: {e}")
+ results.append(f"โ Failed to create {role_name}: {e}")
+
+ embed = discord.Embed(
+ title="๐ญ Role Creation Results",
+ color=discord.Color.green() if "โ" not in str(results) else discord.Color.red(),
+ description="\n".join(results)
+ )
+
+ await interaction.followup.send(embed=embed, ephemeral=True)
+
+ async def cmd_role_menu(self, interaction: discord.Interaction):
+ """Show interactive role assignment menu"""
+ guild = self.bot.get_guild(self.server_id)
+ if not guild:
+ await interaction.response.send_message("โ Could not find server!", ephemeral=True)
+ return
+
+ # Create the view with role buttons
+ view = discord.ui.View(timeout=180) # 3 minutes timeout
+
+ # Available roles for self-assignment (excluding Lead Dev)
+ available_roles = [
+ ("๐ Backend Dev", discord.Color.blue()),
+ ("๐จ Frontend Dev", discord.Color.green()),
+ ("๐ QA Tester", discord.Color.orange()),
+ ("๐ฌ Community Helper", discord.Color.purple()),
+ ("๐ค User", discord.Color.greyple()),
+ ("๐ Lurker", discord.Color.dark_grey()),
+ ]
+
+ # Create buttons for each role
+ for role_name, role_color in available_roles:
+ button = discord.ui.Button(
+ label=role_name.replace("๐ ", "").replace("๐จ ", "").replace("๐ ", "").replace("๐ฌ ", "").replace("๐ค ", "").replace("๐ ", ""),
+ emoji=role_name.split()[0], # Get the emoji
+ style=discord.ButtonStyle.secondary
+ )
+
+ async def button_callback(interaction: discord.Interaction, current_role_name=role_name):
+ await self.handle_role_assignment(interaction, current_role_name)
+
+ button.callback = button_callback
+ view.add_item(button)
+
+ embed = discord.Embed(
+ title="๐ญ Choose Your RedFlag Role",
+ description="Click a button below to assign yourself a role. You can change your role anytime!",
+ color=discord.Color.blue()
+ )
+ embed.add_field(
+ name="๐ฉ RedFlag Lead Dev",
+ value="This role is assigned by administrators only",
+ inline=False
+ )
+ embed.set_footer(text="You can only have one role at a time. Click again to change roles.")
+
+ await interaction.response.send_message(embed=embed, view=view, ephemeral=True)
+
+ async def cmd_assign_lead_dev(self, interaction: discord.Interaction, user: discord.Member):
+ """Assign RedFlag Lead Dev role (admin only)"""
+ guild = self.bot.get_guild(self.server_id)
+ if not guild:
+ await interaction.response.send_message("โ Could not find server!", ephemeral=True)
+ return
+
+ # Only allow administrators to assign Lead Dev role
+ if not interaction.user.guild_permissions.administrator:
+ await interaction.response.send_message("โ Only administrators can assign the Lead Dev role!", ephemeral=True)
+ return
+
+ await interaction.response.defer(ephemeral=True)
+
+ # Find the Lead Dev role
+ lead_role = discord.utils.get(guild.roles, name="๐ฉ RedFlag Lead Dev")
+ if not lead_role:
+ await interaction.followup.send("โ Lead Dev role not found! Please create roles first.", ephemeral=True)
+ return
+
+ try:
+ # Remove existing RedFlag roles from the user
+ redflag_role_prefixes = ["๐ฉ ", "๐ ", "๐จ ", "๐ ", "๐ฌ ", "๐ค ", "๐ "]
+ current_roles = [role for role in user.roles if any(role.name.startswith(prefix) for prefix in redflag_role_prefixes)]
+
+ if current_roles:
+ await user.remove_roles(*current_roles, reason="Assigned Lead Dev role")
+
+ # Assign Lead Dev role
+ await user.add_roles(lead_role, reason="Assigned by admin")
+ await interaction.followup.send(f"โ
Assigned **๐ฉ RedFlag Lead Dev** to {user.mention}", ephemeral=True)
+
+ except Exception as e:
+ logger.error(f"Error assigning Lead Dev role: {e}")
+ await interaction.followup.send(f"โ Failed to assign role: {e}", ephemeral=True)
+
+ async def cmd_setup_welcome(self, interaction: discord.Interaction):
+ """Setup welcome channel with message and role selector"""
+ guild = self.bot.get_guild(self.server_id)
+ if not guild:
+ await interaction.response.send_message("โ Could not find server!", ephemeral=True)
+ return
+
+ # Only allow administrators
+ if not interaction.user.guild_permissions.administrator:
+ await interaction.response.send_message("โ Only administrators can setup the welcome channel!", ephemeral=True)
+ return
+
+ await interaction.response.defer(ephemeral=True)
+ results = []
+
+ try:
+ # Find the general channel (try multiple names)
+ general_channel = None
+ possible_names = ["general", "๐ 127.0.0.1", "๐ localhost", "welcome", "welcome-and-info"]
+
+ for name in possible_names:
+ general_channel = discord.utils.get(guild.text_channels, name=name)
+ if general_channel:
+ break
+
+ if not general_channel:
+ # If no specific channel found, just use the first text channel (any category)
+ logger.info(f"Using first available text channel: {guild.text_channels[0].name}")
+ general_channel = guild.text_channels[0]
+ logger.info(f"Selected channel: {general_channel.name} (Category: {general_channel.category.name if general_channel.category else 'No category'})")
+
+ if not general_channel:
+ await interaction.followup.send("โ Could not find any text channel to use!", ephemeral=True)
+ return
+
+ # Rename the channel to localhost with house emoji
+ await general_channel.edit(name="๐ localhost", reason="Setup welcome channel")
+ results.append("โ
Renamed general to ๐ localhost")
+
+ # Create welcome message with role selector
+ welcome_embed = discord.Embed(
+ title="๐ Welcome to RedFlag",
+ description="**Self-hosted update management for homelabs**",
+ color=discord.Color.blue()
+ )
+
+ welcome_embed.add_field(
+ name="โ ๏ธ ALPHA SOFTWARE",
+ value="This is experimental software in active development. Features may be broken, bugs are expected, and breaking changes happen frequently. Use at your own risk, preferably on test systems only.",
+ inline=False
+ )
+
+ welcome_embed.add_field(
+ name="๐ค Community & Support",
+ value="""**Discord Maintenance:** Full disclosure - Discord community management isn't my strongest area. If we grow over 100 users, I'll be looking to vet a moderator to help keep things organized.
+
+**Response Times:** I *should* get alerts and will try to respond timely, but this place is a community for us all to grow and share in.
+
+**Community Guidelines:** Small requests that are slightly off-topic are totally fine. We're building a community around homelabs, update management, and practical solutions - not a corporate support channel.""",
+ inline=False
+ )
+
+ welcome_embed.add_field(
+ name="๐ Get Started",
+ value="1. **Choose Your Role** below - This helps us know how you're using RedFlag\n2. **Introduce Yourself** in #general-chat\n3. **Share Your Setup** in #homelab-showcase\n4. **Ask Questions** in #general-support",
+ inline=False
+ )
+
+ welcome_embed.set_footer(text="RedFlag - Simple, Honest, Homelab-first")
+ welcome_embed.set_thumbnail(url=guild.icon.url if guild.icon else None)
+
+ # Create role selector view
+ view = discord.ui.View(timeout=None) # Persistent view
+
+ # Available roles for self-assignment
+ available_roles = [
+ ("๐ Backend Dev", discord.Color.blue()),
+ ("๐จ Frontend Dev", discord.Color.green()),
+ ("๐ QA Tester", discord.Color.orange()),
+ ("๐ฌ Community Helper", discord.Color.purple()),
+ ("๐ค User", discord.Color.greyple()),
+ ("๐ Lurker", discord.Color.dark_grey()),
+ ]
+
+ # Create buttons for each role
+ for role_name, role_color in available_roles:
+ button = discord.ui.Button(
+ label=role_name.replace("๐ ", "").replace("๐จ ", "").replace("๐ ", "").replace("๐ฌ ", "").replace("๐ค ", "").replace("๐ ", ""),
+ emoji=role_name.split()[0],
+ style=discord.ButtonStyle.secondary,
+ custom_id=f"role_select_{role_name.replace(' ', '_').replace('๐ ', '').replace('๐จ', '').replace('๐', '').replace('๐ฌ', '').replace('๐ค', '').replace('๐', '')}"
+ )
+
+ async def button_callback(interaction: discord.Interaction, current_role_name=role_name):
+ await self.handle_role_assignment(interaction, current_role_name)
+
+ button.callback = button_callback
+ view.add_item(button)
+
+ # Set channel topic with important info
+ topic = "๐ Welcome! Use /role-menu to choose your role. RedFlag: Self-hosted update management for homelabs. ALPHA SOFTWARE - expect bugs!"
+ await general_channel.edit(topic=topic, reason="Set welcome channel topic")
+
+ # Send the welcome message
+ await general_channel.send(embed=welcome_embed, view=view)
+ results.append("โ
Posted welcome message with role selector and channel topic")
+
+ except Exception as e:
+ logger.error(f"Error setting up welcome channel: {e}")
+ results.append(f"โ Error: {e}")
+
+ embed = discord.Embed(
+ title="๐ Welcome Channel Setup Complete",
+ color=discord.Color.green() if "โ" not in str(results) else discord.Color.red(),
+ description="\n".join(results)
+ )
+
+ await interaction.followup.send(embed=embed, ephemeral=True)
+
+ async def cmd_create_version_channels(self, interaction: discord.Interaction):
+ """Create version-related channels"""
+ guild = self.bot.get_guild(self.server_id)
+ if not guild:
+ await interaction.response.send_message("โ Could not find server!", ephemeral=True)
+ return
+
+ # Only allow administrators
+ if not interaction.user.guild_permissions.administrator:
+ await interaction.response.send_message("โ Only administrators can create version channels!", ephemeral=True)
+ return
+
+ await interaction.response.defer(ephemeral=True)
+ results = []
+
+ try:
+ # Create version category
+ version_cat = await guild.create_category_channel("๐ฆ Version Management")
+ results.append("โ
Version Management category")
+
+ await asyncio.sleep(1)
+
+ # Main version channel
+ main_version = await guild.create_text_channel(
+ "๐ฏmain",
+ category=version_cat,
+ reason="Main stable version discussion"
+ )
+ results.append("โ
#main (stable version)")
+
+ # Tagged versions channel
+ tagged_versions = await guild.create_text_channel(
+ "๐ท๏ธtagged",
+ category=version_cat,
+ reason="Tagged release versions discussion"
+ )
+ results.append("โ
#tagged (release versions)")
+
+ # Unstable dev channel
+ unstable_dev = await guild.create_text_channel(
+ "๐ฎunstable-developer",
+ category=version_cat,
+ reason="Unstable developer branch discussion"
+ )
+ results.append("โ
#unstable-developer (dev branch)")
+
+ # Update .env with channel IDs
+ discord_env.update_channel_ids("main_version", str(main_version.id))
+ discord_env.update_channel_ids("tagged_versions", str(tagged_versions.id))
+ discord_env.update_channel_ids("unstable_dev", str(unstable_dev.id))
+
+ except Exception as e:
+ logger.error(f"Error creating version channels: {e}")
+ results.append(f"โ Error: {e}")
+
+ embed = discord.Embed(
+ title="๐ฆ Version Channels Created",
+ color=discord.Color.green() if "โ" not in str(results) else discord.Color.red(),
+ description="Created version management channels:\n\n" + "\n".join(results)
+ )
+
+ await interaction.followup.send(embed=embed, ephemeral=True)
+
+ async def cmd_sync_commands(self, interaction: discord.Interaction):
+ """Force sync commands"""
+ # Only allow administrators
+ if not interaction.user.guild_permissions.administrator:
+ await interaction.response.send_message("โ Only administrators can sync commands!", ephemeral=True)
+ return
+
+ await interaction.response.defer(ephemeral=True)
+
+ try:
+ # Sync commands globally
+ synced = await self.bot.tree.sync()
+ await interaction.followup.send(f"โ
Synced {len(synced)} commands globally!", ephemeral=True)
+ logger.info(f"Manually synced {len(synced)} commands")
+ except Exception as e:
+ logger.error(f"Error syncing commands: {e}")
+ await interaction.followup.send(f"โ Failed to sync commands: {e}", ephemeral=True)
+
+ async def handle_role_assignment(self, interaction: discord.Interaction, role_name: str):
+ """Handle role assignment from button click"""
+ guild = self.bot.get_guild(self.server_id)
+ if not guild:
+ await interaction.response.send_message("โ Could not find server!", ephemeral=True)
+ return
+
+ # Find the role
+ target_role = discord.utils.get(guild.roles, name=role_name)
+ if not target_role:
+ await interaction.response.send_message("โ Role not found! Please ask an admin to create roles first.", ephemeral=True)
+ return
+
+ # Get all RedFlag roles (for removal)
+ redflag_role_prefixes = ["๐ฉ ", "๐ ", "๐จ ", "๐ ", "๐ฌ ", "๐ค ", "๐ "]
+ current_roles = [role for role in interaction.user.roles if any(role.name.startswith(prefix) for prefix in redflag_role_prefixes)]
+
+ try:
+ # Remove existing RedFlag roles
+ if current_roles:
+ await interaction.user.remove_roles(*current_roles, reason="Role change via bot")
+
+ # Add new role
+ await interaction.user.add_roles(target_role, reason="Self-assigned via bot")
+
+ # Update the original message to show success
+ await interaction.response.edit_message(
+ content=f"โ
Successfully assigned role: **{role_name}**",
+ view=None # Remove buttons after selection
+ )
+
+ except Exception as e:
+ logger.error(f"Error assigning role {role_name}: {e}")
+ await interaction.response.send_message(f"โ Failed to assign role: {e}", ephemeral=True)
+
+ async def cmd_create_welcome_banner(self, interaction: discord.Interaction, channel: discord.TextChannel):
+ """Create a welcome banner in a channel"""
+ try:
+ # Check if user has admin permissions
+ if not interaction.user.guild_permissions.administrator:
+ await interaction.response.send_message("โ This command requires Administrator permissions.", ephemeral=True)
+ return
+
+ await interaction.response.defer()
+
+ # Create simple welcome embed
+ embed = discord.Embed(
+ title="๐ RedFlag",
+ description="Self-hosted update management for homelabs",
+ color=discord.Color.red()
+ )
+ embed.add_field(
+ name="Links",
+ value="[GitHub](https://github.com/Fimeg/RedFlag) โข [Issues](https://github.com/Fimeg/RedFlag/issues)",
+ inline=False
+ )
+ embed.set_thumbnail(url="https://raw.githubusercontent.com/Fimeg/RedFlag/main/website/public/favicon.svg")
+
+ # Send and pin the welcome message
+ message = await channel.send(embed=embed)
+ await message.pin()
+
+ await interaction.followup.send(f"โ
Created welcome banner in #{channel.name}!", ephemeral=True)
+ logger.info(f"Created welcome banner in #{channel.name}")
+
+ except Exception as e:
+ logger.error(f"Error converting announcement channel: {e}")
+ await interaction.followup.send(f"โ Error converting channel: {e}", ephemeral=True)
+
+ async def cmd_list_commands_debug(self, interaction: discord.Interaction):
+ """List all registered commands for debugging"""
+ try:
+ commands = self.bot.tree.get_commands(guild=discord.Object(id=self.server_id))
+ command_list = []
+
+ for cmd in commands:
+ if hasattr(cmd, 'name') and hasattr(cmd, 'description'):
+ command_list.append(f"**/{cmd.name}** - {cmd.description}")
+
+ embed = discord.Embed(
+ title="๐ Registered Commands Debug",
+ description=f"Found {len(command_list)} commands:",
+ color=discord.Color.gold()
+ )
+
+ if command_list:
+ embed.add_field(name="Available Commands", value="\n".join(command_list), inline=False)
+ else:
+ embed.description = "No commands found!"
+
+ await interaction.response.send_message(embed=embed, ephemeral=True)
+
+ except Exception as e:
+ await interaction.response.send_message(f"โ Error listing commands: {e}", ephemeral=True)
+
async def cmd_help(self, interaction: discord.Interaction):
"""Show help information"""
embed = discord.Embed(
@@ -350,6 +964,12 @@ class DiscordManager:
commands_info = [
("`/status`", "๐ Show server status"),
("`/create-channels`", "๐ง Create standard channels"),
+ ("`/create-redflag-channels`", "๐ Create RedFlag community channels"),
+ ("`/create-roles`", "๐ญ Create RedFlag community roles *(Admin only)*"),
+ ("`/setup-welcome`", "๐ Setup welcome channel with role selector *(Admin only)*"),
+ ("`/create-version-channels`", "๐ฆ Create version management channels *(Admin only)*"),
+ ("`/role-menu`", "๐ฎ Show interactive role assignment menu"),
+ ("`/assign-lead-dev`", "๐ฉ Assign Lead Dev role *(Admin only)*"),
("`/list-channels`", "๐ List all channels"),
("`/send-message`", "๐ฌ Send message to channel"),
("`/create-category`", "๐ Create new category"),
diff --git a/install.sh b/install.sh
new file mode 100755
index 0000000..792a93f
--- /dev/null
+++ b/install.sh
@@ -0,0 +1,383 @@
+#!/bin/bash
+set -e
+
+# RedFlag Agent Installation Script
+# This script installs the RedFlag agent as a systemd service with proper security hardening
+
+REDFLAG_SERVER="http://localhost:8080"
+AGENT_USER="redflag-agent"
+AGENT_HOME="/var/lib/redflag-agent"
+AGENT_BINARY="/usr/local/bin/redflag-agent"
+SUDOERS_FILE="/etc/sudoers.d/redflag-agent"
+SERVICE_FILE="/etc/systemd/system/redflag-agent.service"
+CONFIG_DIR="/etc/redflag"
+STATE_DIR="/var/lib/redflag"
+
+echo "=== RedFlag Agent Installation ==="
+echo ""
+
+# Check if running as root
+if [ "$EUID" -ne 0 ]; then
+ echo "ERROR: This script must be run as root (use sudo)"
+ exit 1
+fi
+
+# Detect architecture
+ARCH=$(uname -m)
+case "$ARCH" in
+ x86_64)
+ DOWNLOAD_ARCH="amd64"
+ ;;
+ aarch64|arm64)
+ DOWNLOAD_ARCH="arm64"
+ ;;
+ *)
+ echo "ERROR: Unsupported architecture: $ARCH"
+ echo "Supported: x86_64 (amd64), aarch64 (arm64)"
+ exit 1
+ ;;
+esac
+
+echo "Detected architecture: $ARCH (using linux-$DOWNLOAD_ARCH)"
+echo ""
+
+# Step 1: Create system user
+echo "Step 1: Creating system user..."
+if id "$AGENT_USER" &>/dev/null; then
+ echo "โ User $AGENT_USER already exists"
+else
+ useradd -r -s /bin/false -d "$AGENT_HOME" -m "$AGENT_USER"
+ echo "โ User $AGENT_USER created"
+fi
+
+# Create home directory if it doesn't exist
+if [ ! -d "$AGENT_HOME" ]; then
+ mkdir -p "$AGENT_HOME"
+ chown "$AGENT_USER:$AGENT_USER" "$AGENT_HOME"
+ echo "โ Home directory created"
+fi
+
+# Stop existing service if running (to allow binary update)
+if systemctl is-active --quiet redflag-agent 2>/dev/null; then
+ echo ""
+ echo "Existing service detected - stopping to allow update..."
+ systemctl stop redflag-agent
+ sleep 2
+ echo "โ Service stopped"
+fi
+
+# Step 2: Download agent binary
+echo ""
+echo "Step 2: Downloading agent binary..."
+echo "Downloading from ${REDFLAG_SERVER}/api/v1/downloads/linux-${DOWNLOAD_ARCH}..."
+
+# Download to temporary file first (to avoid root permission issues)
+TEMP_FILE="/tmp/redflag-agent-${DOWNLOAD_ARCH}"
+echo "Downloading to temporary file: $TEMP_FILE"
+
+# Try curl first (most reliable)
+if curl -sL "${REDFLAG_SERVER}/api/v1/downloads/linux-${DOWNLOAD_ARCH}" -o "$TEMP_FILE"; then
+ echo "โ Download successful, moving to final location"
+ mv "$TEMP_FILE" "${AGENT_BINARY}"
+ chmod 755 "${AGENT_BINARY}"
+ chown root:root "${AGENT_BINARY}"
+ echo "โ Agent binary downloaded and installed"
+else
+ echo "โ Download with curl failed"
+ # Fallback to wget if available
+ if command -v wget >/dev/null 2>&1; then
+ echo "Trying wget fallback..."
+ if wget -q "${REDFLAG_SERVER}/api/v1/downloads/linux-${DOWNLOAD_ARCH}" -O "$TEMP_FILE"; then
+ echo "โ Download successful with wget, moving to final location"
+ mv "$TEMP_FILE" "${AGENT_BINARY}"
+ chmod 755 "${AGENT_BINARY}"
+ chown root:root "${AGENT_BINARY}"
+ echo "โ Agent binary downloaded and installed (using wget fallback)"
+ else
+ echo "ERROR: Failed to download agent binary"
+ echo "Both curl and wget failed"
+ echo "Please ensure ${REDFLAG_SERVER} is accessible"
+ # Clean up temp file if it exists
+ rm -f "$TEMP_FILE"
+ exit 1
+ fi
+ else
+ echo "ERROR: Failed to download agent binary"
+ echo "curl failed and wget is not available"
+ echo "Please ensure ${REDFLAG_SERVER} is accessible"
+ # Clean up temp file if it exists
+ rm -f "$TEMP_FILE"
+ exit 1
+ fi
+fi
+
+# Clean up temp file if it still exists
+rm -f "$TEMP_FILE"
+
+# Set SELinux context for binary if SELinux is enabled
+if command -v getenforce >/dev/null 2>&1 && [ "$(getenforce)" != "Disabled" ]; then
+ echo "SELinux detected, setting file context for binary..."
+ restorecon -v "${AGENT_BINARY}" 2>/dev/null || true
+ echo "โ SELinux context set for binary"
+fi
+
+# Step 3: Install sudoers configuration
+echo ""
+echo "Step 3: Installing sudoers configuration..."
+cat > "$SUDOERS_FILE" <<'SUDOERS_EOF'
+# RedFlag Agent minimal sudo permissions
+# This file grants the redflag-agent user limited sudo access for package management
+# Generated automatically during RedFlag agent installation
+
+# APT package management commands (Debian/Ubuntu)
+redflag-agent ALL=(root) NOPASSWD: /usr/bin/apt-get update
+redflag-agent ALL=(root) NOPASSWD: /usr/bin/apt-get install -y *
+redflag-agent ALL=(root) NOPASSWD: /usr/bin/apt-get upgrade -y *
+redflag-agent ALL=(root) NOPASSWD: /usr/bin/apt-get install --dry-run --yes *
+
+# DNF package management commands (RHEL/Fedora/Rocky/Alma)
+redflag-agent ALL=(root) NOPASSWD: /usr/bin/dnf makecache
+redflag-agent ALL=(root) NOPASSWD: /usr/bin/dnf install -y *
+redflag-agent ALL=(root) NOPASSWD: /usr/bin/dnf upgrade -y *
+redflag-agent ALL=(root) NOPASSWD: /usr/bin/dnf install --assumeno --downloadonly *
+
+# Docker operations
+redflag-agent ALL=(root) NOPASSWD: /usr/bin/docker pull *
+redflag-agent ALL=(root) NOPASSWD: /usr/bin/docker image inspect *
+redflag-agent ALL=(root) NOPASSWD: /usr/bin/docker manifest inspect *
+
+# Directory operations for RedFlag
+redflag-agent ALL=(root) NOPASSWD: /bin/mkdir -p /etc/redflag
+redflag-agent ALL=(root) NOPASSWD: /bin/mkdir -p /var/lib/redflag
+redflag-agent ALL=(root) NOPASSWD: /bin/chown redflag-agent:redflag-agent /etc/redflag
+redflag-agent ALL=(root) NOPASSWD: /bin/chown redflag-agent:redflag-agent /var/lib/redflag
+redflag-agent ALL=(root) NOPASSWD: /bin/chmod 755 /etc/redflag
+redflag-agent ALL=(root) NOPASSWD: /bin/chmod 755 /var/lib/redflag
+
+# Migration operations (for existing installations)
+redflag-agent ALL=(root) NOPASSWD: /bin/mv /etc/aggregator /etc/redflag.backup.*
+redflag-agent ALL=(root) NOPASSWD: /bin/mv /var/lib/aggregator/* /var/lib/redflag/
+redflag-agent ALL=(root) NOPASSWD: /bin/rmdir /var/lib/aggregator 2>/dev/null || true
+redflag-agent ALL=(root) NOPASSWD: /bin/rmdir /etc/aggregator 2>/dev/null || true
+SUDOERS_EOF
+
+chmod 440 "$SUDOERS_FILE"
+
+# Validate sudoers file
+if visudo -c -f "$SUDOERS_FILE" &>/dev/null; then
+ echo "โ Sudoers configuration installed and validated"
+else
+ echo "ERROR: Sudoers configuration is invalid"
+ rm -f "$SUDOERS_FILE"
+ exit 1
+fi
+
+# Step 4: Create configuration and state directories
+echo ""
+echo "Step 4: Creating configuration and state directories..."
+mkdir -p "$CONFIG_DIR"
+chown "$AGENT_USER:$AGENT_USER" "$CONFIG_DIR"
+chmod 755 "$CONFIG_DIR"
+
+# Create state directory for acknowledgment tracking (v0.1.19+)
+mkdir -p "$STATE_DIR"
+chown "$AGENT_USER:$AGENT_USER" "$STATE_DIR"
+chmod 755 "$STATE_DIR"
+echo "โ Configuration and state directories created"
+
+# Set SELinux context for directories if SELinux is enabled
+if command -v getenforce >/dev/null 2>&1 && [ "$(getenforce)" != "Disabled" ]; then
+ echo "Setting SELinux context for directories..."
+ restorecon -Rv "$CONFIG_DIR" "$STATE_DIR" 2>/dev/null || true
+ echo "โ SELinux context set for directories"
+fi
+
+# Step 5: Install systemd service
+echo ""
+echo "Step 5: Installing systemd service..."
+cat > "$SERVICE_FILE" < " REGISTRATION_TOKEN
+ else
+ echo ""
+ echo "IMPORTANT: Registration token required!"
+ echo ""
+ echo "Since you're running this via pipe, you need to:"
+ echo ""
+ echo "Option 1 - One-liner with token:"
+ echo " curl -sfL ${REDFLAG_SERVER}/api/v1/install/linux | sudo bash -s -- YOUR_TOKEN"
+ echo ""
+ echo "Option 2 - Download and run interactively:"
+ echo " curl -sfL ${REDFLAG_SERVER}/api/v1/install/linux -o install.sh"
+ echo " chmod +x install.sh"
+ echo " sudo ./install.sh"
+ echo ""
+ echo "Skipping registration for now."
+ echo "Please register manually after installation."
+ fi
+fi
+
+# Check if agent is already registered
+if [ -f "$CONFIG_DIR/config.json" ]; then
+ echo ""
+ echo "[INFO] Agent already registered - configuration file exists"
+ echo "[INFO] Skipping registration to preserve agent history"
+ echo "[INFO] If you need to re-register, delete: $CONFIG_DIR/config.json"
+ echo ""
+elif [ -n "$REGISTRATION_TOKEN" ]; then
+ echo ""
+ echo "Registering agent..."
+
+ # Create config file and register
+ cat > "$CONFIG_DIR/config.json" <