feat: implement agent migration system

- Fix config version inflation bug in main.go
- Add dynamic subsystem checking to prevent false change detection
- Implement migration detection and execution system
- Add directory migration from /etc/aggregator to /etc/redflag
- Update all path references across codebase to use new directories
- Add configuration schema versioning and automatic migration
- Implement backup and rollback capabilities
- Add security feature detection and hardening
- Update installation scripts and sudoers for new paths
- Complete Phase 1 migration system
This commit is contained in:
Fimeg
2025-11-04 14:25:53 -05:00
parent 253022cacd
commit e6ac0b1ec4
8 changed files with 994 additions and 16 deletions

View File

@@ -8,6 +8,7 @@ import (
"math/rand"
"os"
"os/exec"
"path/filepath"
"runtime"
"strings"
"time"
@@ -20,6 +21,7 @@ import (
"github.com/Fimeg/RedFlag/aggregator-agent/internal/crypto"
"github.com/Fimeg/RedFlag/aggregator-agent/internal/display"
"github.com/Fimeg/RedFlag/aggregator-agent/internal/installer"
"github.com/Fimeg/RedFlag/aggregator-agent/internal/migration"
"github.com/Fimeg/RedFlag/aggregator-agent/internal/orchestrator"
"github.com/Fimeg/RedFlag/aggregator-agent/internal/scanner"
"github.com/Fimeg/RedFlag/aggregator-agent/internal/service"
@@ -40,7 +42,7 @@ func getConfigPath() string {
if runtime.GOOS == "windows" {
return "C:\\ProgramData\\RedFlag\\config.json"
}
return "/etc/aggregator/config.json"
return "/etc/redflag/config.json"
}
// getStatePath returns the platform-specific state directory path
@@ -48,7 +50,7 @@ func getStatePath() string {
if runtime.GOOS == "windows" {
return "C:\\ProgramData\\RedFlag\\state"
}
return "/var/lib/aggregator"
return "/var/lib/redflag"
}
// reportLogWithAck reports a command log to the server and tracks it for acknowledgment
@@ -213,12 +215,71 @@ func main() {
configPath = *configFile
}
// Check for migration requirements before loading configuration
migrationConfig := migration.NewFileDetectionConfig()
// Set old paths to detect existing installations
migrationConfig.OldConfigPath = "/etc/aggregator"
migrationConfig.OldStatePath = "/var/lib/aggregator"
// Set new paths that agent will actually use
migrationConfig.NewConfigPath = filepath.Dir(configPath)
migrationConfig.NewStatePath = getStatePath()
// Detect migration requirements
migrationDetection, err := migration.DetectMigrationRequirements(migrationConfig)
if err != nil {
log.Printf("Warning: Failed to detect migration requirements: %v", err)
} else if migrationDetection.RequiresMigration {
log.Printf("[RedFlag Server Migrator] Migration detected: %s → %s", migrationDetection.CurrentAgentVersion, AgentVersion)
log.Printf("[RedFlag Server Migrator] Required migrations: %v", migrationDetection.RequiredMigrations)
// Create migration plan
migrationPlan := &migration.MigrationPlan{
Detection: migrationDetection,
TargetVersion: AgentVersion,
Config: migrationConfig,
}
// Execute migration
executor := migration.NewMigrationExecutor(migrationPlan)
result, err := executor.ExecuteMigration()
if err != nil {
log.Printf("[RedFlag Server Migrator] Migration failed: %v", err)
log.Printf("[RedFlag Server Migrator] Backup available at: %s", result.BackupPath)
log.Printf("[RedFlag Server Migrator] Agent may not function correctly until migration is completed")
} else {
log.Printf("[RedFlag Server Migrator] Migration completed successfully")
if result.RollbackAvailable {
log.Printf("[RedFlag Server Migrator] Rollback available at: %s", result.BackupPath)
}
}
}
// Load configuration with priority: CLI > env > file > defaults
cfg, err := config.Load(configPath, cliFlags)
if err != nil {
log.Fatal("Failed to load configuration:", err)
}
// Always set the current agent version in config
if cfg.AgentVersion != AgentVersion {
if cfg.AgentVersion != "" {
log.Printf("[RedFlag Server Migrator] Version change detected: %s → %s", cfg.AgentVersion, AgentVersion)
log.Printf("[RedFlag Server Migrator] Performing lightweight migration check...")
}
// Update config version to match current agent
cfg.AgentVersion = AgentVersion
// Save updated config
if err := cfg.Save(configPath); err != nil {
log.Printf("Warning: Failed to update agent version in config: %v", err)
} else {
if cfg.AgentVersion != "" {
log.Printf("[RedFlag Server Migrator] Agent version updated in configuration")
}
}
}
// Handle registration
if *registerCmd {
// Validate server URL for Windows users
@@ -456,6 +517,31 @@ func renewTokenIfNeeded(apiClient *client.Client, cfg *config.Config, err error)
return apiClient, nil
}
// getCurrentSubsystemEnabled returns the current enabled state for a subsystem
func getCurrentSubsystemEnabled(cfg *config.Config, subsystemName string) bool {
switch subsystemName {
case "system":
return cfg.Subsystems.System.Enabled
case "updates":
return cfg.Subsystems.Updates.Enabled
case "docker":
return cfg.Subsystems.Docker.Enabled
case "storage":
return cfg.Subsystems.Storage.Enabled
case "apt":
return cfg.Subsystems.APT.Enabled
case "dnf":
return cfg.Subsystems.DNF.Enabled
case "windows":
return cfg.Subsystems.Windows.Enabled
case "winget":
return cfg.Subsystems.Winget.Enabled
default:
// Unknown subsystem, assume disabled
return false
}
}
// syncServerConfig checks for and applies server configuration updates
func syncServerConfig(apiClient *client.Client, cfg *config.Config) error {
// Get current config from server
@@ -472,6 +558,9 @@ func syncServerConfig(apiClient *client.Client, cfg *config.Config) error {
log.Printf("📡 Server config update detected (version: %d)", serverConfig.Version)
changes := false
// Track potential check-in interval changes separately to avoid inflation
newCheckInInterval := cfg.CheckInInterval
// Apply subsystem configuration from server
for subsystemName, subsystemConfig := range serverConfig.Subsystems {
if configMap, ok := subsystemConfig.(map[string]interface{}); ok {
@@ -497,18 +586,18 @@ func syncServerConfig(apiClient *client.Client, cfg *config.Config) error {
}
}
// Only log changes if different from current state
currentEnabled := cfg.Subsystems.APT.Enabled // We'd need to check actual current state here
// Get current subsystem enabled state dynamically
currentEnabled := getCurrentSubsystemEnabled(cfg, subsystemName)
if enabled != currentEnabled {
log.Printf(" → %s: enabled=%v (changed)", subsystemName, enabled)
changes = true
}
if intervalMinutes > 0 && intervalMinutes != cfg.CheckInInterval {
// Check if interval actually changed, but don't modify cfg.CheckInInterval yet
if intervalMinutes > 0 && intervalMinutes != newCheckInInterval {
log.Printf(" → %s: interval=%d minutes (changed)", subsystemName, intervalMinutes)
changes = true
// Note: For now, we use the check-in interval from server config
cfg.CheckInInterval = intervalMinutes
newCheckInInterval = intervalMinutes // Update temp variable, not the config
}
if autoRun {
@@ -517,6 +606,11 @@ func syncServerConfig(apiClient *client.Client, cfg *config.Config) error {
}
}
// Apply the check-in interval change only once after all subsystems processed
if newCheckInInterval != cfg.CheckInInterval {
cfg.CheckInInterval = newCheckInInterval
}
if changes {
log.Printf("✅ Server configuration applied successfully")
} else {

View File

@@ -830,7 +830,7 @@ func loadCachedPublicKeyDirect() ([]byte, error) {
if runtime.GOOS == "windows" {
keyPath = "C:\\ProgramData\\RedFlag\\server_public_key"
} else {
keyPath = "/etc/aggregator/server_public_key"
keyPath = "/etc/redflag/server_public_key"
}
data, err := os.ReadFile(keyPath)

View File

@@ -23,7 +23,7 @@ type LocalCache struct {
}
// CacheDir is the directory where local cache is stored
const CacheDir = "/var/lib/aggregator"
const CacheDir = "/var/lib/redflag"
// CacheFile is the file where scan results are cached
const CacheFile = "last_scan.json"

View File

@@ -47,6 +47,10 @@ type LoggingConfig struct {
// Config holds agent configuration
type Config struct {
// Version Information
Version string `json:"version,omitempty"` // Config schema version
AgentVersion string `json:"agent_version,omitempty"` // Agent binary version
// Server Configuration
ServerURL string `json:"server_url"`
RegistrationToken string `json:"registration_token,omitempty"` // One-time registration token
@@ -133,6 +137,8 @@ type CLIFlags struct {
// getDefaultConfig returns default configuration values
func getDefaultConfig() *Config {
return &Config{
Version: "4", // Current config schema version
AgentVersion: "", // Will be set by the agent at startup
ServerURL: "http://localhost:8080",
CheckInInterval: 300, // 5 minutes
Network: NetworkConfig{
@@ -153,7 +159,7 @@ func getDefaultConfig() *Config {
}
}
// loadFromFile reads configuration from file
// loadFromFile reads configuration from file with backward compatibility migration
func loadFromFile(configPath string) (*Config, error) {
// Ensure directory exists
dir := filepath.Dir(configPath)
@@ -170,12 +176,57 @@ func loadFromFile(configPath string) (*Config, error) {
return nil, fmt.Errorf("failed to read config: %w", err)
}
var config Config
if err := json.Unmarshal(data, &config); err != nil {
// Start with latest default config
config := getDefaultConfig()
// Parse the existing config into a generic map to handle missing fields
var rawConfig map[string]interface{}
if err := json.Unmarshal(data, &rawConfig); err != nil {
return nil, fmt.Errorf("failed to parse config: %w", err)
}
return &config, nil
// Marshal back to JSON and unmarshal into our new structure
// This ensures missing fields get default values from getDefaultConfig()
configJSON, err := json.Marshal(rawConfig)
if err != nil {
return nil, fmt.Errorf("failed to re-marshal config: %w", err)
}
// Carefully merge into our config structure, preserving defaults for missing fields
if err := json.Unmarshal(configJSON, &config); err != nil {
return nil, fmt.Errorf("failed to merge config: %w", err)
}
// Handle specific migrations for known breaking changes
migrateConfig(config)
return config, nil
}
// migrateConfig handles specific known migrations between config versions
func migrateConfig(cfg *Config) {
// Update config schema version to latest
if cfg.Version != "4" {
fmt.Printf("[CONFIG] Migrating config schema from version %s to 4\n", cfg.Version)
cfg.Version = "4"
}
// Migration 1: Ensure minimum check-in interval (30 seconds)
if cfg.CheckInInterval < 30 {
fmt.Printf("[CONFIG] Migrating check_in_interval from %d to minimum 30 seconds\n", cfg.CheckInInterval)
cfg.CheckInInterval = 300 // Default to 5 minutes for better performance
}
// Migration 2: Add missing subsystem fields with defaults
if cfg.Subsystems.System.Timeout == 0 && cfg.Subsystems.System.CircuitBreaker.FailureThreshold == 0 {
fmt.Printf("[CONFIG] Adding missing 'system' subsystem configuration\n")
cfg.Subsystems.System = GetDefaultSubsystemsConfig().System
}
if cfg.Subsystems.Updates.Timeout == 0 && cfg.Subsystems.Updates.CircuitBreaker.FailureThreshold == 0 {
fmt.Printf("[CONFIG] Adding missing 'updates' subsystem configuration\n")
cfg.Subsystems.Updates = GetDefaultSubsystemsConfig().Updates
}
}
// loadFromEnv loads configuration from environment variables

View File

@@ -32,6 +32,8 @@ type CircuitBreakerConfig struct {
// SubsystemsConfig holds all subsystem configurations
type SubsystemsConfig struct {
System SubsystemConfig `json:"system"` // System metrics scanner
Updates SubsystemConfig `json:"updates"` // Virtual subsystem for package update scheduling
APT SubsystemConfig `json:"apt"`
DNF SubsystemConfig `json:"dnf"`
Docker SubsystemConfig `json:"docker"`
@@ -61,6 +63,16 @@ func GetDefaultSubsystemsConfig() SubsystemsConfig {
}
return SubsystemsConfig{
System: SubsystemConfig{
Enabled: true, // System scanner always available
Timeout: 10 * time.Second, // System info should be fast
CircuitBreaker: defaultCB,
},
Updates: SubsystemConfig{
Enabled: true, // Virtual subsystem for package update scheduling
Timeout: 0, // Not used - delegates to individual package scanners
CircuitBreaker: CircuitBreakerConfig{Enabled: false}, // No circuit breaker for virtual subsystem
},
APT: SubsystemConfig{
Enabled: true,
Timeout: 30 * time.Second,

View File

@@ -17,7 +17,7 @@ func getPublicKeyPath() string {
if runtime.GOOS == "windows" {
return "C:\\ProgramData\\RedFlag\\server_public_key"
}
return "/etc/aggregator/server_public_key"
return "/etc/redflag/server_public_key"
}
// PublicKeyResponse represents the server's public key response

View File

@@ -0,0 +1,436 @@
package migration
import (
"crypto/sha256"
"encoding/json"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"time"
)
// AgentFile represents a file associated with the agent
type AgentFile struct {
Path string `json:"path"`
Size int64 `json:"size"`
ModifiedTime time.Time `json:"modified_time"`
Version string `json:"version,omitempty"`
Checksum string `json:"checksum"`
Required bool `json:"required"`
Migrate bool `json:"migrate"`
Description string `json:"description"`
}
// AgentFileInventory represents all files associated with an agent installation
type AgentFileInventory struct {
ConfigFiles []AgentFile `json:"config_files"`
StateFiles []AgentFile `json:"state_files"`
BinaryFiles []AgentFile `json:"binary_files"`
LogFiles []AgentFile `json:"log_files"`
CertificateFiles []AgentFile `json:"certificate_files"`
OldDirectoryPaths []string `json:"old_directory_paths"`
NewDirectoryPaths []string `json:"new_directory_paths"`
}
// MigrationDetection represents the result of migration detection
type MigrationDetection struct {
CurrentAgentVersion string `json:"current_agent_version"`
CurrentConfigVersion int `json:"current_config_version"`
RequiresMigration bool `json:"requires_migration"`
RequiredMigrations []string `json:"required_migrations"`
MissingSecurityFeatures []string `json:"missing_security_features"`
Inventory *AgentFileInventory `json:"inventory"`
DetectionTime time.Time `json:"detection_time"`
}
// SecurityFeature represents a security feature that may be missing
type SecurityFeature struct {
Name string `json:"name"`
Description string `json:"description"`
Required bool `json:"required"`
Enabled bool `json:"enabled"`
}
// FileDetectionConfig holds configuration for file detection
type FileDetectionConfig struct {
OldConfigPath string
OldStatePath string
NewConfigPath string
NewStatePath string
BackupDirPattern string
}
// NewFileDetectionConfig creates a default detection configuration
func NewFileDetectionConfig() *FileDetectionConfig {
return &FileDetectionConfig{
OldConfigPath: "/etc/aggregator",
OldStatePath: "/var/lib/aggregator",
NewConfigPath: "/etc/redflag",
NewStatePath: "/var/lib/redflag",
BackupDirPattern: "/etc/redflag.backup.%s",
}
}
// DetectMigrationRequirements scans for existing agent installations and determines migration needs
func DetectMigrationRequirements(config *FileDetectionConfig) (*MigrationDetection, error) {
detection := &MigrationDetection{
DetectionTime: time.Now(),
Inventory: &AgentFileInventory{},
}
// Scan for existing installations
inventory, err := scanAgentFiles(config)
if err != nil {
return nil, fmt.Errorf("failed to scan agent files: %w", err)
}
detection.Inventory = inventory
// Detect version information
version, configVersion, err := detectVersionInfo(inventory)
if err != nil {
return nil, fmt.Errorf("failed to detect version: %w", err)
}
detection.CurrentAgentVersion = version
detection.CurrentConfigVersion = configVersion
// Identify required migrations
requiredMigrations := determineRequiredMigrations(detection, config)
detection.RequiredMigrations = requiredMigrations
detection.RequiresMigration = len(requiredMigrations) > 0
// Identify missing security features
missingFeatures := identifyMissingSecurityFeatures(detection)
detection.MissingSecurityFeatures = missingFeatures
return detection, nil
}
// scanAgentFiles scans for agent-related files in old and new locations
func scanAgentFiles(config *FileDetectionConfig) (*AgentFileInventory, error) {
inventory := &AgentFileInventory{
OldDirectoryPaths: []string{config.OldConfigPath, config.OldStatePath},
NewDirectoryPaths: []string{config.NewConfigPath, config.NewStatePath},
}
// Define file patterns to look for
filePatterns := map[string][]string{
"config": {
"config.json",
"agent.key",
"server.key",
"ca.crt",
},
"state": {
"pending_acks.json",
"public_key.cache",
"last_scan.json",
"metrics.json",
},
"binary": {
"redflag-agent",
"redflag-agent.exe",
},
"log": {
"redflag-agent.log",
"redflag-agent.*.log",
},
"certificate": {
"*.crt",
"*.key",
"*.pem",
},
}
// Scan old directory paths
for _, dirPath := range inventory.OldDirectoryPaths {
if _, err := os.Stat(dirPath); err == nil {
files, err := scanDirectory(dirPath, filePatterns)
if err != nil {
return nil, fmt.Errorf("failed to scan directory %s: %w", dirPath, err)
}
// Categorize files
for _, file := range files {
switch {
case containsAny(file.Path, filePatterns["config"]):
inventory.ConfigFiles = append(inventory.ConfigFiles, file)
case containsAny(file.Path, filePatterns["state"]):
inventory.StateFiles = append(inventory.StateFiles, file)
case containsAny(file.Path, filePatterns["binary"]):
inventory.BinaryFiles = append(inventory.BinaryFiles, file)
case containsAny(file.Path, filePatterns["log"]):
inventory.LogFiles = append(inventory.LogFiles, file)
case containsAny(file.Path, filePatterns["certificate"]):
inventory.CertificateFiles = append(inventory.CertificateFiles, file)
}
}
}
}
return inventory, nil
}
// scanDirectory scans a directory for files matching specific patterns
func scanDirectory(dirPath string, patterns map[string][]string) ([]AgentFile, error) {
var files []AgentFile
err := filepath.Walk(dirPath, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() {
return nil
}
// Calculate checksum
checksum, err := calculateFileChecksum(path)
if err != nil {
// Skip files we can't read
return nil
}
file := AgentFile{
Path: path,
Size: info.Size(),
ModifiedTime: info.ModTime(),
Checksum: checksum,
Required: isRequiredFile(path, patterns),
Migrate: shouldMigrateFile(path, patterns),
Description: getFileDescription(path),
}
// Try to detect version from filename or content
if version := detectFileVersion(path, info); version != "" {
file.Version = version
}
files = append(files, file)
return nil
})
return files, err
}
// detectVersionInfo attempts to detect agent and config versions from files
func detectVersionInfo(inventory *AgentFileInventory) (string, int, error) {
var detectedVersion string
configVersion := 0
// Try to read config file for version information
for _, configFile := range inventory.ConfigFiles {
if strings.Contains(configFile.Path, "config.json") {
version, cfgVersion, err := readConfigVersion(configFile.Path)
if err == nil {
detectedVersion = version
configVersion = cfgVersion
break
}
}
}
// If no version found in config, try binary files
if detectedVersion == "" {
for _, binaryFile := range inventory.BinaryFiles {
if version := detectBinaryVersion(binaryFile.Path); version != "" {
detectedVersion = version
break
}
}
}
// Default to unknown if nothing found
if detectedVersion == "" {
detectedVersion = "unknown"
}
return detectedVersion, configVersion, nil
}
// readConfigVersion reads version information from a config file
func readConfigVersion(configPath string) (string, int, error) {
data, err := os.ReadFile(configPath)
if err != nil {
return "", 0, err
}
var config map[string]interface{}
if err := json.Unmarshal(data, &config); err != nil {
return "", 0, err
}
// Try to extract version info
var agentVersion string
var cfgVersion int
if version, ok := config["agent_version"].(string); ok {
agentVersion = version
}
if version, ok := config["version"].(float64); ok {
cfgVersion = int(version)
}
return agentVersion, cfgVersion, nil
}
// determineRequiredMigrations determines what migrations are needed
func determineRequiredMigrations(detection *MigrationDetection, config *FileDetectionConfig) []string {
var migrations []string
// Check if old directories exist
for _, oldDir := range detection.Inventory.OldDirectoryPaths {
if _, err := os.Stat(oldDir); err == nil {
migrations = append(migrations, "directory_migration")
break
}
}
// Check config version compatibility
if detection.CurrentConfigVersion < 4 {
migrations = append(migrations, "config_migration")
}
// Check if security features need to be applied
if len(detection.MissingSecurityFeatures) > 0 {
migrations = append(migrations, "security_hardening")
}
return migrations
}
// identifyMissingSecurityFeatures identifies security features that need to be enabled
func identifyMissingSecurityFeatures(detection *MigrationDetection) []string {
var missingFeatures []string
// Check config for security features
if detection.Inventory.ConfigFiles != nil {
for _, configFile := range detection.Inventory.ConfigFiles {
if strings.Contains(configFile.Path, "config.json") {
features := checkConfigSecurityFeatures(configFile.Path)
missingFeatures = append(missingFeatures, features...)
}
}
}
// Default missing features for old versions
if detection.CurrentConfigVersion < 4 {
missingFeatures = append(missingFeatures,
"nonce_validation",
"machine_id_binding",
"ed25519_verification",
"subsystem_configuration",
)
}
return missingFeatures
}
// checkConfigSecurityFeatures checks a config file for security feature settings
func checkConfigSecurityFeatures(configPath string) []string {
data, err := os.ReadFile(configPath)
if err != nil {
return []string{}
}
var config map[string]interface{}
if err := json.Unmarshal(data, &config); err != nil {
return []string{}
}
var missingFeatures []string
// Check for subsystem configuration
if subsystems, ok := config["subsystems"].(map[string]interface{}); ok {
if _, hasSystem := subsystems["system"]; !hasSystem {
missingFeatures = append(missingFeatures, "system_subsystem")
}
if _, hasUpdates := subsystems["updates"]; !hasUpdates {
missingFeatures = append(missingFeatures, "updates_subsystem")
}
} else {
missingFeatures = append(missingFeatures, "subsystem_configuration")
}
// Check for machine ID
if _, hasMachineID := config["machine_id"]; !hasMachineID {
missingFeatures = append(missingFeatures, "machine_id_binding")
}
return missingFeatures
}
// Helper functions
func calculateFileChecksum(filePath string) (string, error) {
file, err := os.Open(filePath)
if err != nil {
return "", err
}
defer file.Close()
hash := sha256.New()
if _, err := io.Copy(hash, file); err != nil {
return "", err
}
return fmt.Sprintf("%x", hash.Sum(nil)), nil
}
func containsAny(path string, patterns []string) bool {
for _, pattern := range patterns {
if matched, _ := filepath.Match(pattern, filepath.Base(path)); matched {
return true
}
}
return false
}
func isRequiredFile(path string, patterns map[string][]string) bool {
base := filepath.Base(path)
return base == "config.json" || base == "pending_acks.json"
}
func shouldMigrateFile(path string, patterns map[string][]string) bool {
return !containsAny(path, []string{"*.log", "*.tmp"})
}
func getFileDescription(path string) string {
base := filepath.Base(path)
switch {
case base == "config.json":
return "Agent configuration file"
case base == "pending_acks.json":
return "Pending command acknowledgments"
case base == "public_key.cache":
return "Server public key cache"
case strings.Contains(base, ".log"):
return "Agent log file"
case strings.Contains(base, ".key"):
return "Private key file"
case strings.Contains(base, ".crt"):
return "Certificate file"
default:
return "Agent file"
}
}
func detectFileVersion(path string, info os.FileInfo) string {
// Try to extract version from filename
base := filepath.Base(path)
if strings.Contains(base, "v0.1.") {
// Extract version from filename like "redflag-agent-v0.1.22"
parts := strings.Split(base, "v0.1.")
if len(parts) > 1 {
return "v0.1." + strings.Split(parts[1], "-")[0]
}
}
return ""
}
func detectBinaryVersion(binaryPath string) string {
// This would involve reading binary headers or executing with --version flag
// For now, return empty
return ""
}

View File

@@ -0,0 +1,385 @@
package migration
import (
"fmt"
"os"
"path/filepath"
"strings"
"time"
)
// MigrationPlan represents a complete migration plan
type MigrationPlan struct {
Detection *MigrationDetection `json:"detection"`
TargetVersion string `json:"target_version"`
Config *FileDetectionConfig `json:"config"`
BackupPath string `json:"backup_path"`
EstimatedDuration time.Duration `json:"estimated_duration"`
RiskLevel string `json:"risk_level"` // low, medium, high
}
// MigrationResult represents the result of a migration execution
type MigrationResult struct {
Success bool `json:"success"`
StartTime time.Time `json:"start_time"`
EndTime time.Time `json:"end_time"`
Duration time.Duration `json:"duration"`
BackupPath string `json:"backup_path"`
MigratedFiles []string `json:"migrated_files"`
Errors []string `json:"errors"`
Warnings []string `json:"warnings"`
AppliedChanges []string `json:"applied_changes"`
RollbackAvailable bool `json:"rollback_available"`
}
// MigrationExecutor handles the execution of migration plans
type MigrationExecutor struct {
plan *MigrationPlan
result *MigrationResult
}
// NewMigrationExecutor creates a new migration executor
func NewMigrationExecutor(plan *MigrationPlan) *MigrationExecutor {
return &MigrationExecutor{
plan: plan,
result: &MigrationResult{},
}
}
// ExecuteMigration executes the complete migration plan
func (e *MigrationExecutor) ExecuteMigration() (*MigrationResult, error) {
e.result.StartTime = time.Now()
e.result.BackupPath = e.plan.BackupPath
fmt.Printf("[MIGRATION] Starting migration from %s to %s\n",
e.plan.Detection.CurrentAgentVersion, e.plan.TargetVersion)
// Phase 1: Create backups
if err := e.createBackups(); err != nil {
return e.completeMigration(false, fmt.Errorf("backup creation failed: %w", err))
}
e.result.AppliedChanges = append(e.result.AppliedChanges, "Created backups at "+e.plan.BackupPath)
// Phase 2: Directory migration
if contains(e.plan.Detection.RequiredMigrations, "directory_migration") {
if err := e.migrateDirectories(); err != nil {
return e.completeMigration(false, fmt.Errorf("directory migration failed: %w", err))
}
e.result.AppliedChanges = append(e.result.AppliedChanges, "Migrated directories")
}
// Phase 3: Configuration migration
if contains(e.plan.Detection.RequiredMigrations, "config_migration") {
if err := e.migrateConfiguration(); err != nil {
return e.completeMigration(false, fmt.Errorf("configuration migration failed: %w", err))
}
e.result.AppliedChanges = append(e.result.AppliedChanges, "Migrated configuration")
}
// Phase 4: Security hardening
if contains(e.plan.Detection.RequiredMigrations, "security_hardening") {
if err := e.applySecurityHardening(); err != nil {
e.result.Warnings = append(e.result.Warnings,
fmt.Sprintf("Security hardening incomplete: %v", err))
} else {
e.result.AppliedChanges = append(e.result.AppliedChanges, "Applied security hardening")
}
}
// Phase 5: Validation
if err := e.validateMigration(); err != nil {
return e.completeMigration(false, fmt.Errorf("migration validation failed: %w", err))
}
return e.completeMigration(true, nil)
}
// createBackups creates backups of all existing files
func (e *MigrationExecutor) createBackups() error {
backupPath := e.plan.BackupPath
if backupPath == "" {
timestamp := time.Now().Format("2006-01-02-150405")
backupPath = fmt.Sprintf(e.plan.Config.BackupDirPattern, timestamp)
e.plan.BackupPath = backupPath
}
fmt.Printf("[MIGRATION] Creating backup at: %s\n", backupPath)
// Create backup directory
if err := os.MkdirAll(backupPath, 0755); err != nil {
return fmt.Errorf("failed to create backup directory: %w", err)
}
// Backup all files in inventory
allFiles := e.collectAllFiles()
for _, file := range allFiles {
if err := e.backupFile(file, backupPath); err != nil {
return fmt.Errorf("failed to backup file %s: %w", file.Path, err)
}
e.result.MigratedFiles = append(e.result.MigratedFiles, file.Path)
}
return nil
}
// migrateDirectories handles directory migration from old to new paths
func (e *MigrationExecutor) migrateDirectories() error {
fmt.Printf("[MIGRATION] Migrating directories...\n")
// Create new directories
newDirectories := []string{e.plan.Config.NewConfigPath, e.plan.Config.NewStatePath}
for _, newPath := range newDirectories {
if err := os.MkdirAll(newPath, 0755); err != nil {
return fmt.Errorf("failed to create directory %s: %w", newPath, err)
}
fmt.Printf("[MIGRATION] Created directory: %s\n", newPath)
}
// Migrate files from old to new directories
for _, oldDir := range e.plan.Detection.Inventory.OldDirectoryPaths {
newDir := e.getNewDirectoryPath(oldDir)
if newDir == "" {
continue
}
if err := e.migrateDirectoryContents(oldDir, newDir); err != nil {
return fmt.Errorf("failed to migrate directory %s to %s: %w", oldDir, newDir, err)
}
fmt.Printf("[MIGRATION] Migrated: %s → %s\n", oldDir, newDir)
}
return nil
}
// migrateConfiguration handles configuration file migration
func (e *MigrationExecutor) migrateConfiguration() error {
fmt.Printf("[MIGRATION] Migrating configuration...\n")
// Find and migrate config files
for _, configFile := range e.plan.Detection.Inventory.ConfigFiles {
if strings.Contains(configFile.Path, "config.json") {
newPath := e.getNewConfigPath(configFile.Path)
if newPath != "" && newPath != configFile.Path {
if err := e.migrateConfigFile(configFile.Path, newPath); err != nil {
return fmt.Errorf("failed to migrate config file: %w", err)
}
fmt.Printf("[MIGRATION] Migrated config: %s → %s\n", configFile.Path, newPath)
}
}
}
return nil
}
// applySecurityHardening applies security-related configurations
func (e *MigrationExecutor) applySecurityHardening() error {
fmt.Printf("[MIGRATION] Applying security hardening...\n")
// This would integrate with the config system to apply security defaults
// For now, just log what would be applied
for _, feature := range e.plan.Detection.MissingSecurityFeatures {
switch feature {
case "nonce_validation":
fmt.Printf("[MIGRATION] Enabling nonce validation\n")
case "machine_id_binding":
fmt.Printf("[MIGRATION] Configuring machine ID binding\n")
case "ed25519_verification":
fmt.Printf("[MIGRATION] Enabling Ed25519 verification\n")
case "subsystem_configuration":
fmt.Printf("[MIGRATION] Adding missing subsystem configurations\n")
case "system_subsystem":
fmt.Printf("[MIGRATION] Adding system scanner configuration\n")
case "updates_subsystem":
fmt.Printf("[MIGRATION] Adding updates subsystem configuration\n")
}
}
return nil
}
// validateMigration validates that the migration was successful
func (e *MigrationExecutor) validateMigration() error {
fmt.Printf("[MIGRATION] Validating migration...\n")
// Check that new directories exist
newDirectories := []string{e.plan.Config.NewConfigPath, e.plan.Config.NewStatePath}
for _, newDir := range newDirectories {
if _, err := os.Stat(newDir); err != nil {
return fmt.Errorf("new directory %s not found: %w", newDir, err)
}
}
// Check that config files exist in new location
for _, configFile := range e.plan.Detection.Inventory.ConfigFiles {
newPath := e.getNewConfigPath(configFile.Path)
if newPath != "" {
if _, err := os.Stat(newPath); err != nil {
return fmt.Errorf("migrated config file %s not found: %w", newPath, err)
}
}
}
fmt.Printf("[MIGRATION] ✅ Migration validation successful\n")
return nil
}
// Helper methods
func (e *MigrationExecutor) collectAllFiles() []AgentFile {
var allFiles []AgentFile
allFiles = append(allFiles, e.plan.Detection.Inventory.ConfigFiles...)
allFiles = append(allFiles, e.plan.Detection.Inventory.StateFiles...)
allFiles = append(allFiles, e.plan.Detection.Inventory.BinaryFiles...)
allFiles = append(allFiles, e.plan.Detection.Inventory.LogFiles...)
allFiles = append(allFiles, e.plan.Detection.Inventory.CertificateFiles...)
return allFiles
}
func (e *MigrationExecutor) backupFile(file AgentFile, backupPath string) error {
relPath, err := filepath.Rel(e.plan.Config.OldConfigPath, file.Path)
if err != nil {
// Try relative to old state path
relPath, err = filepath.Rel(e.plan.Config.OldStatePath, file.Path)
if err != nil {
relPath = filepath.Base(file.Path)
}
}
backupFilePath := filepath.Join(backupPath, relPath)
backupDir := filepath.Dir(backupFilePath)
if err := os.MkdirAll(backupDir, 0755); err != nil {
return fmt.Errorf("failed to create backup directory: %w", err)
}
// Copy file to backup location
if err := copyFile(file.Path, backupFilePath); err != nil {
return fmt.Errorf("failed to copy file to backup: %w", err)
}
return nil
}
func (e *MigrationExecutor) migrateDirectoryContents(oldDir, newDir string) error {
return filepath.Walk(oldDir, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() {
return nil
}
relPath, err := filepath.Rel(oldDir, path)
if err != nil {
return err
}
newPath := filepath.Join(newDir, relPath)
newDirPath := filepath.Dir(newPath)
if err := os.MkdirAll(newDirPath, 0755); err != nil {
return fmt.Errorf("failed to create directory: %w", err)
}
if err := copyFile(path, newPath); err != nil {
return fmt.Errorf("failed to copy file: %w", err)
}
return nil
})
}
func (e *MigrationExecutor) migrateConfigFile(oldPath, newPath string) error {
// This would use the config migration logic from the config package
// For now, just copy the file
return copyFile(oldPath, newPath)
}
func (e *MigrationExecutor) getNewDirectoryPath(oldPath string) string {
if oldPath == e.plan.Config.OldConfigPath {
return e.plan.Config.NewConfigPath
}
if oldPath == e.plan.Config.OldStatePath {
return e.plan.Config.NewStatePath
}
return ""
}
func (e *MigrationExecutor) getNewConfigPath(oldPath string) string {
if strings.HasPrefix(oldPath, e.plan.Config.OldConfigPath) {
relPath := strings.TrimPrefix(oldPath, e.plan.Config.OldConfigPath)
return filepath.Join(e.plan.Config.NewConfigPath, relPath)
}
if strings.HasPrefix(oldPath, e.plan.Config.OldStatePath) {
relPath := strings.TrimPrefix(oldPath, e.plan.Config.OldStatePath)
return filepath.Join(e.plan.Config.NewStatePath, relPath)
}
return ""
}
func (e *MigrationExecutor) completeMigration(success bool, err error) (*MigrationResult, error) {
e.result.EndTime = time.Now()
e.result.Duration = e.result.EndTime.Sub(e.result.StartTime)
e.result.Success = success
e.result.RollbackAvailable = success && e.result.BackupPath != ""
if err != nil {
e.result.Errors = append(e.result.Errors, err.Error())
}
if success {
fmt.Printf("[MIGRATION] ✅ Migration completed successfully in %v\n", e.result.Duration)
if e.result.RollbackAvailable {
fmt.Printf("[MIGRATION] 📦 Rollback available at: %s\n", e.result.BackupPath)
}
} else {
fmt.Printf("[MIGRATION] ❌ Migration failed after %v\n", e.result.Duration)
if len(e.result.Errors) > 0 {
for _, errMsg := range e.result.Errors {
fmt.Printf("[MIGRATION] Error: %s\n", errMsg)
}
}
}
return e.result, err
}
// Utility functions
func contains(slice []string, item string) bool {
for _, s := range slice {
if s == item {
return true
}
}
return false
}
func copyFile(src, dst string) error {
sourceFile, err := os.Open(src)
if err != nil {
return err
}
defer sourceFile.Close()
destFile, err := os.Create(dst)
if err != nil {
return err
}
defer destFile.Close()
_, err = destFile.ReadFrom(sourceFile)
if err != nil {
return err
}
// Preserve file permissions
sourceInfo, err := os.Stat(src)
if err != nil {
return err
}
return os.Chmod(dst, sourceInfo.Mode())
}