Implement proper storage metrics (P0-009)\n\n- Add dedicated storage_metrics table\n- Create StorageMetricReport models with proper field names\n- Add ReportStorageMetrics to agent client\n- Update storage scanner to use new method\n- Implement server-side handlers and queries\n- Register new routes and update UI\n- Remove legacy Scan() method\n- Follow ETHOS principles: honest naming, clean architecture

This commit is contained in:
Fimeg
2025-12-17 16:38:36 -05:00
parent f7c8d23c5d
commit 0fff047cb5
43 changed files with 3641 additions and 248 deletions

View File

@@ -8,6 +8,7 @@ import (
"time"
"github.com/Fimeg/RedFlag/aggregator-agent/internal/client"
"github.com/Fimeg/RedFlag/aggregator-agent/internal/constants"
"github.com/google/uuid"
)
@@ -22,15 +23,12 @@ type LocalCache struct {
AgentStatus string `json:"agent_status"`
}
// CacheDir is the directory where local cache is stored
const CacheDir = "/var/lib/redflag-agent"
// CacheFile is the file where scan results are cached
const CacheFile = "last_scan.json"
// cacheFile is the file where scan results are cached
const cacheFile = "last_scan.json"
// GetCachePath returns the full path to the cache file
func GetCachePath() string {
return filepath.Join(CacheDir, CacheFile)
return filepath.Join(constants.GetAgentCacheDir(), cacheFile)
}
// Load reads the local cache from disk
@@ -62,7 +60,7 @@ func (c *LocalCache) Save() error {
cachePath := GetCachePath()
// Ensure cache directory exists
if err := os.MkdirAll(CacheDir, 0755); err != nil {
if err := os.MkdirAll(constants.GetAgentCacheDir(), 0755); err != nil {
return fmt.Errorf("failed to create cache directory: %w", err)
}

View File

@@ -576,6 +576,37 @@ func (c *Client) ReportDockerImages(agentID uuid.UUID, report DockerReport) erro
return nil
}
// ReportStorageMetrics sends storage metrics to the server via dedicated endpoint
func (c *Client) ReportStorageMetrics(agentID uuid.UUID, report StorageMetricReport) error {
url := fmt.Sprintf("%s/api/v1/agents/%s/storage-metrics", c.baseURL, agentID)
body, err := json.Marshal(report)
if err != nil {
return fmt.Errorf("failed to marshal storage metrics: %w", err)
}
req, err := http.NewRequest("POST", url, bytes.NewBuffer(body))
if err != nil {
return fmt.Errorf("failed to create request: %w", err)
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Authorization", "Bearer "+c.token)
c.addMachineIDHeader(req) // Security: Validate machine binding (v0.1.22+)
resp, err := c.http.Do(req)
if err != nil {
return fmt.Errorf("failed to send request: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
bodyBytes, _ := io.ReadAll(resp.Body)
return fmt.Errorf("failed to report storage metrics: %s - %s", resp.Status, string(bodyBytes))
}
return nil
}
// LogReport represents an execution log
type LogReport struct {
CommandID string `json:"command_id"`

View File

@@ -31,7 +31,7 @@ func CalculateChecksum(filePath string) (string, error) {
// IsRequiredFile determines if a file is required for agent operation
func IsRequiredFile(path string) bool {
requiredFiles := []string{
"/etc/redflag/config.json",
"/etc/redflag/agent/config.json", // Agent config in nested structure
"/usr/local/bin/redflag-agent",
"/etc/systemd/system/redflag-agent.service",
}

View File

@@ -324,12 +324,13 @@ func migrateConfig(cfg *Config) {
}
// Migration 2: Add missing subsystem fields with defaults
if cfg.Subsystems.System.Timeout == 0 && cfg.Subsystems.System.CircuitBreaker.FailureThreshold == 0 {
// Check if subsystem is zero value (truly missing), not just has zero fields
if cfg.Subsystems.System == (SubsystemConfig{}) {
fmt.Printf("[CONFIG] Adding missing 'system' subsystem configuration\n")
cfg.Subsystems.System = GetDefaultSubsystemsConfig().System
}
if cfg.Subsystems.Updates.Timeout == 0 && cfg.Subsystems.Updates.CircuitBreaker.FailureThreshold == 0 {
if cfg.Subsystems.Updates == (SubsystemConfig{}) {
fmt.Printf("[CONFIG] Adding missing 'updates' subsystem configuration\n")
cfg.Subsystems.Updates = GetDefaultSubsystemsConfig().Updates
}

View File

@@ -0,0 +1,96 @@
// Package constants provides centralized path definitions for the RedFlag agent.
// This package ensures consistency across all components and makes path management
// maintainable and testable.
package constants
import (
"runtime"
"path/filepath"
)
// Base directories
const (
LinuxBaseDir = "/var/lib/redflag"
WindowsBaseDir = "C:\\ProgramData\\RedFlag"
)
// Subdirectory structure
const (
AgentDir = "agent"
ServerDir = "server"
CacheSubdir = "cache"
StateSubdir = "state"
MigrationSubdir = "migration_backups"
)
// Config paths
const (
LinuxConfigBase = "/etc/redflag"
WindowsConfigBase = "C:\\ProgramData\\RedFlag"
ConfigFile = "config.json"
)
// Log paths
const (
LinuxLogBase = "/var/log/redflag"
)
// Legacy paths for migration
const (
LegacyConfigPath = "/etc/aggregator/config.json"
LegacyStatePath = "/var/lib/aggregator"
)
// GetBaseDir returns platform-specific base directory
func GetBaseDir() string {
if runtime.GOOS == "windows" {
return WindowsBaseDir
}
return LinuxBaseDir
}
// GetAgentStateDir returns /var/lib/redflag/agent/state
func GetAgentStateDir() string {
return filepath.Join(GetBaseDir(), AgentDir, StateSubdir)
}
// GetAgentCacheDir returns /var/lib/redflag/agent/cache
func GetAgentCacheDir() string {
return filepath.Join(GetBaseDir(), AgentDir, CacheSubdir)
}
// GetMigrationBackupDir returns /var/lib/redflag/agent/migration_backups
func GetMigrationBackupDir() string {
return filepath.Join(GetBaseDir(), AgentDir, MigrationSubdir)
}
// GetAgentConfigPath returns /etc/redflag/agent/config.json
func GetAgentConfigPath() string {
if runtime.GOOS == "windows" {
return filepath.Join(WindowsConfigBase, AgentDir, ConfigFile)
}
return filepath.Join(LinuxConfigBase, AgentDir, ConfigFile)
}
// GetAgentConfigDir returns /etc/redflag/agent
func GetAgentConfigDir() string {
if runtime.GOOS == "windows" {
return filepath.Join(WindowsConfigBase, AgentDir)
}
return filepath.Join(LinuxConfigBase, AgentDir)
}
// GetAgentLogDir returns /var/log/redflag/agent
func GetAgentLogDir() string {
return filepath.Join(LinuxLogBase, AgentDir)
}
// GetLegacyAgentConfigPath returns legacy /etc/aggregator/config.json
func GetLegacyAgentConfigPath() string {
return LegacyConfigPath
}
// GetLegacyAgentStatePath returns legacy /var/lib/aggregator
func GetLegacyAgentStatePath() string {
return LegacyStatePath
}

View File

@@ -12,6 +12,7 @@ import (
"time"
"github.com/Fimeg/RedFlag/aggregator-agent/internal/common"
"github.com/Fimeg/RedFlag/aggregator-agent/internal/constants"
"github.com/Fimeg/RedFlag/aggregator-agent/internal/version"
)
@@ -58,11 +59,11 @@ type FileDetectionConfig struct {
// NewFileDetectionConfig creates a default detection configuration
func NewFileDetectionConfig() *FileDetectionConfig {
return &FileDetectionConfig{
OldConfigPath: "/etc/aggregator",
OldStatePath: "/var/lib/aggregator",
NewConfigPath: "/etc/redflag",
NewStatePath: "/var/lib/redflag-agent",
BackupDirPattern: "/var/lib/redflag-agent/migration_backups_%s",
OldConfigPath: constants.LegacyConfigPath,
OldStatePath: constants.LegacyStatePath,
NewConfigPath: constants.GetAgentConfigDir(),
NewStatePath: constants.GetAgentStateDir(),
BackupDirPattern: constants.GetMigrationBackupDir() + "/%d",
}
}

View File

@@ -0,0 +1,235 @@
package pathutils
import (
"fmt"
"os"
"path/filepath"
"strings"
)
// PathManager provides centralized path operations with validation
type PathManager struct {
config *Config
}
// Config holds path configuration for migration
type Config struct {
OldConfigPath string
OldStatePath string
NewConfigPath string
NewStatePath string
BackupDirPattern string
}
// NewPathManager creates a new path manager with cleaned configuration
func NewPathManager(config *Config) *PathManager {
// Clean all paths to remove trailing slashes and normalize
cleanConfig := &Config{
OldConfigPath: filepath.Clean(strings.TrimSpace(config.OldConfigPath)),
OldStatePath: filepath.Clean(strings.TrimSpace(config.OldStatePath)),
NewConfigPath: filepath.Clean(strings.TrimSpace(config.NewConfigPath)),
NewStatePath: filepath.Clean(strings.TrimSpace(config.NewStatePath)),
BackupDirPattern: strings.TrimSpace(config.BackupDirPattern),
}
return &PathManager{config: cleanConfig}
}
// NormalizeToAbsolute ensures a path is absolute and cleaned
func (pm *PathManager) NormalizeToAbsolute(path string) (string, error) {
if path == "" {
return "", fmt.Errorf("path cannot be empty")
}
// Clean and make absolute
cleaned := filepath.Clean(path)
// Check for path traversal attempts
if strings.Contains(cleaned, "..") {
return "", fmt.Errorf("path contains parent directory reference: %s", path)
}
// Ensure it's absolute
if !filepath.IsAbs(cleaned) {
return "", fmt.Errorf("path must be absolute: %s", path)
}
return cleaned, nil
}
// ValidatePath validates a single path exists
func (pm *PathManager) ValidatePath(path string) error {
if path == "" {
return fmt.Errorf("path cannot be empty")
}
// Normalize path first
normalized, err := pm.NormalizeToAbsolute(path)
if err != nil {
return err
}
// Check existence
info, err := os.Stat(normalized)
if err != nil {
if os.IsNotExist(err) {
return fmt.Errorf("path does not exist: %s", normalized)
}
return fmt.Errorf("failed to access path %s: %w", normalized, err)
}
// Additional validation for security
if filepath.IsAbs(normalized) && strings.HasPrefix(normalized, "/etc/") {
// Config files should be owned by root or agent user (checking basic permissions)
if info.Mode().Perm()&0004 == 0 && info.Mode().Perm()&0002 == 0 {
return fmt.Errorf("config file is not readable: %s", normalized)
}
}
return nil
}
// EnsureDirectory creates directory if it doesn't exist
func (pm *PathManager) EnsureDirectory(path string) error {
normalized, err := pm.NormalizeToAbsolute(path)
if err != nil {
return err
}
// Check if it exists and is a directory
if info, err := os.Stat(normalized); err == nil {
if !info.IsDir() {
return fmt.Errorf("path exists but is not a directory: %s", normalized)
}
return nil
}
// Create directory with proper permissions
if err := os.MkdirAll(normalized, 0755); err != nil {
return fmt.Errorf("failed to create directory %s: %w", normalized, err)
}
return nil
}
// GetRelativePath gets relative path from base directory
// Returns error if path would traverse outside base
func (pm *PathManager) GetRelativePath(basePath, fullPath string) (string, error) {
normBase, err := pm.NormalizeToAbsolute(basePath)
if err != nil {
return "", fmt.Errorf("invalid base path: %w", err)
}
normFull, err := pm.NormalizeToAbsolute(fullPath)
if err != nil {
return "", fmt.Errorf("invalid full path: %w", err)
}
// Check if full path is actually under base path
if !strings.HasPrefix(normFull, normBase) {
// Not under base path, use filename-only approach
return filepath.Base(normFull), nil
}
rel, err := filepath.Rel(normBase, normFull)
if err != nil {
return "", fmt.Errorf("failed to get relative path from %s to %s: %w", normBase, normFull, err)
}
// Final safety check
if strings.Contains(rel, "..") {
return filepath.Base(normFull), nil
}
return rel, nil
}
// JoinPath joins path components safely
func (pm *PathManager) JoinPath(base, components ...string) string {
// Ensure base is absolute and cleaned
if absBase, err := pm.NormalizeToAbsolute(base); err == nil {
base = absBase
}
// Clean all components
cleanComponents := make([]string, len(components))
for i, comp := range components {
cleanComponents[i] = filepath.Clean(comp)
}
// Join all components
result := filepath.Join(append([]string{base}, cleanComponents...)...)
// Final safety check
if strings.Contains(result, "..") {
// Fallback to string-based join if path traversal detected
return filepath.Join(base, filepath.Join(cleanComponents...))
}
return result
}
// GetConfig returns the path configuration
func (pm *PathManager) GetConfig() *Config {
return pm.config
}
// ValidateConfig validates all configured paths
func (pm *PathManager) ValidateConfig() error {
if pm.config.OldConfigPath == "" || pm.config.OldStatePath == "" {
return fmt.Errorf("old paths cannot be empty")
}
if pm.config.NewConfigPath == "" || pm.config.NewStatePath == "" {
return fmt.Errorf("new paths cannot be empty")
}
if pm.config.BackupDirPattern == "" {
return fmt.Errorf("backup dir pattern cannot be empty")
}
// Validate paths are absolute
paths := []string{
pm.config.OldConfigPath,
pm.config.OldStatePath,
pm.config.NewConfigPath,
pm.config.NewStatePath,
}
for _, path := range paths {
if !filepath.IsAbs(path) {
return fmt.Errorf("path must be absolute: %s", path)
}
}
return nil
}
// GetNewPathForOldPath determines the new path for a file that was in an old location
func (pm *PathManager) GetNewPathForOldPath(oldPath string) (string, error) {
// Validate old path
normalizedOld, err := pm.NormalizeToAbsolute(oldPath)
if err != nil {
return "", fmt.Errorf("invalid old path: %w", err)
}
// Check if it's in old config path
if strings.HasPrefix(normalizedOld, pm.config.OldConfigPath) {
relPath, err := pm.GetRelativePath(pm.config.OldConfigPath, normalizedOld)
if err != nil {
return "", err
}
return pm.JoinPath(pm.config.NewConfigPath, relPath), nil
}
// Check if it's in old state path
if strings.HasPrefix(normalizedOld, pm.config.OldStatePath) {
relPath, err := pm.GetRelativePath(pm.config.OldStatePath, normalizedOld)
if err != nil {
return "", err
}
return pm.JoinPath(pm.config.NewStatePath, relPath), nil
}
// File is not in expected old locations, return as is
return normalizedOld, nil
}

View File

@@ -0,0 +1,172 @@
package migration
import (
"encoding/json"
"fmt"
"os"
"time"
"github.com/Fimeg/RedFlag/aggregator-agent/internal/config"
)
// MigrationState is imported from config package to avoid duplication
// StateManager manages migration state persistence
type StateManager struct {
configPath string
}
// NewStateManager creates a new state manager
func NewStateManager(configPath string) *StateManager {
return &StateManager{
configPath: configPath,
}
}
// LoadState loads migration state from config file
func (sm *StateManager) LoadState() (*config.MigrationState, error) {
// Load config to get migration state
cfg, err := sm.loadConfig()
if err != nil {
if os.IsNotExist(err) {
// Fresh install - no migration state yet
return &config.MigrationState{
LastCompleted: make(map[string]time.Time),
AgentVersion: "",
ConfigVersion: "",
Timestamp: time.Now(),
Success: false,
CompletedMigrations: []string{},
}, nil
}
return nil, fmt.Errorf("failed to load config: %w", err)
}
// Check if migration state exists in config
if cfg.MigrationState == nil {
return &config.MigrationState{
LastCompleted: make(map[string]time.Time),
AgentVersion: cfg.AgentVersion,
ConfigVersion: cfg.Version,
Timestamp: time.Now(),
Success: false,
CompletedMigrations: []string{},
}, nil
}
return cfg.MigrationState, nil
}
// SaveState saves migration state to config file
func (sm *StateManager) SaveState(state *config.MigrationState) error {
// Load current config
cfg, err := sm.loadConfig()
if err != nil {
return fmt.Errorf("failed to load config for state save: %w", err)
}
// Update migration state
cfg.MigrationState = state
state.Timestamp = time.Now()
// Save config with updated state
return sm.saveConfig(cfg)
}
// IsMigrationCompleted checks if a specific migration was completed
func (sm *StateManager) IsMigrationCompleted(migrationType string) (bool, error) {
state, err := sm.LoadState()
if err != nil {
return false, err
}
// Check completed migrations list
for _, completed := range state.CompletedMigrations {
if completed == migrationType {
return true, nil
}
}
// Also check legacy last_completed map for backward compatibility
if timestamp, exists := state.LastCompleted[migrationType]; exists {
return !timestamp.IsZero(), nil
}
return false, nil
}
// MarkMigrationCompleted marks a migration as completed
func (sm *StateManager) MarkMigrationCompleted(migrationType string, rollbackPath string, agentVersion string) error {
state, err := sm.LoadState()
if err != nil {
return err
}
// Update completed migrations list
found := false
for _, completed := range state.CompletedMigrations {
if completed == migrationType {
found = true
// Update timestamp
state.LastCompleted[migrationType] = time.Now()
break
}
}
if !found {
state.CompletedMigrations = append(state.CompletedMigrations, migrationType)
}
state.LastCompleted[migrationType] = time.Now()
state.AgentVersion = agentVersion
state.Success = true
if rollbackPath != "" {
state.RollbackPath = rollbackPath
}
return sm.SaveState(state)
}
// CleanupOldDirectories removes old migration directories after successful migration
func (sm *StateManager) CleanupOldDirectories() error {
oldDirs := []string{
"/etc/aggregator",
"/var/lib/aggregator",
}
for _, oldDir := range oldDirs {
if _, err := os.Stat(oldDir); err == nil {
fmt.Printf("[MIGRATION] Cleaning up old directory: %s\n", oldDir)
if err := os.RemoveAll(oldDir); err != nil {
fmt.Printf("[MIGRATION] Warning: Failed to remove old directory %s: %v\n", oldDir, err)
}
}
}
return nil
}
// loadConfig loads configuration from file
func (sm *StateManager) loadConfig() (*config.Config, error) {
data, err := os.ReadFile(sm.configPath)
if err != nil {
return nil, err
}
var cfg config.Config
if err := json.Unmarshal(data, &cfg); err != nil {
return nil, err
}
return &cfg, nil
}
// saveConfig saves configuration to file
func (sm *StateManager) saveConfig(cfg *config.Config) error {
data, err := json.MarshalIndent(cfg, "", " ")
if err != nil {
return err
}
return os.WriteFile(sm.configPath, data, 0644)
}

View File

@@ -0,0 +1,398 @@
package validation
import (
"fmt"
"os"
"path/filepath"
"strings"
"time"
"github.com/Fimeg/RedFlag/aggregator-agent/internal/common"
"github.com/Fimeg/RedFlag/aggregator-agent/internal/event"
"github.com/Fimeg/RedFlag/aggregator-agent/internal/migration/pathutils"
"github.com/Fimeg/RedFlag/aggregator-agent/internal/models"
"github.com/google/uuid"
)
// FileValidator handles comprehensive file validation for migration
type FileValidator struct {
pathManager *pathutils.PathManager
eventBuffer *event.Buffer
agentID uuid.UUID
}
// NewFileValidator creates a new file validator
func NewFileValidator(pm *pathutils.PathManager, eventBuffer *event.Buffer, agentID uuid.UUID) *FileValidator {
return &FileValidator{
pathManager: pm,
eventBuffer: eventBuffer,
agentID: agentID,
}
}
// ValidationResult holds validation results
type ValidationResult struct {
Valid bool `json:"valid"`
Errors []string `json:"errors"`
Warnings []string `json:"warnings"`
Inventory *FileInventory `json:"inventory"`
Statistics *ValidationStats `json:"statistics"`
}
// FileInventory represents validated files
type FileInventory struct {
ValidFiles []common.AgentFile `json:"valid_files"`
InvalidFiles []InvalidFile `json:"invalid_files"`
MissingFiles []string `json:"missing_files"`
SkippedFiles []SkippedFile `json:"skipped_files"`
Directories []string `json:"directories"`
}
// InvalidFile represents a file that failed validation
type InvalidFile struct {
Path string `json:"path"`
Reason string `json:"reason"`
ErrorType string `json:"error_type"` // "not_found", "permission", "traversal", "other"
Expected string `json:"expected"`
}
// SkippedFile represents a file that was intentionally skipped
type SkippedFile struct {
Path string `json:"path"`
Reason string `json:"reason"`
}
// ValidationStats holds statistics about validation
type ValidationStats struct {
TotalFiles int `json:"total_files"`
ValidFiles int `json:"valid_files"`
InvalidFiles int `json:"invalid_files"`
MissingFiles int `json:"missing_files"`
SkippedFiles int `json:"skipped_files"`
ValidationTime int64 `json:"validation_time_ms"`
TotalSizeBytes int64 `json:"total_size_bytes"`
}
// ValidateInventory performs comprehensive validation of file inventory
func (v *FileValidator) ValidateInventory(files []common.AgentFile, requiredPatterns []string) (*ValidationResult, error) {
start := time.Now()
result := &ValidationResult{
Valid: true,
Errors: []string{},
Warnings: []string{},
Inventory: &FileInventory{
ValidFiles: []common.AgentFile{},
InvalidFiles: []InvalidFile{},
MissingFiles: []string{},
SkippedFiles: []SkippedFile{},
Directories: []string{},
},
Statistics: &ValidationStats{},
}
// Group files by directory and collect statistics
dirMap := make(map[string]bool)
var totalSize int64
for _, file := range files {
result.Statistics.TotalFiles++
// Skip log files (.log, .tmp) as they shouldn't be migrated
if containsAny(file.Path, []string{"*.log", "*.tmp"}) {
result.Inventory.SkippedFiles = append(result.Inventory.SkippedFiles, SkippedFile{
Path: file.Path,
Reason: "Log/temp files are not migrated",
})
result.Statistics.SkippedFiles++
continue
}
// Validate file path and existence
if err := v.pathManager.ValidatePath(file.Path); err != nil {
result.Valid = false
result.Statistics.InvalidFiles++
errorType := "other"
reason := err.Error()
if os.IsNotExist(err) {
errorType = "not_found"
reason = fmt.Sprintf("File does not exist: %s", file.Path)
} else if os.IsPermission(err) {
errorType = "permission"
reason = fmt.Sprintf("Permission denied: %s", file.Path)
}
result.Errors = append(result.Errors, reason)
result.Inventory.InvalidFiles = append(result.Inventory.InvalidFiles, InvalidFile{
Path: file.Path,
Reason: reason,
ErrorType: errorType,
})
// Log the validation failure
v.bufferEvent("file_validation_failed", "warning", "migration_validator",
reason,
map[string]interface{}{
"file_path": file.Path,
"error_type": errorType,
"file_size": file.Size,
})
continue
}
// Track directory
dir := filepath.Dir(file.Path)
if !dirMap[dir] {
dirMap[dir] = true
result.Inventory.Directories = append(result.Inventory.Directories, dir)
}
result.Inventory.ValidFiles = append(result.Inventory.ValidFiles, file)
result.Statistics.ValidFiles++
totalSize += file.Size
}
result.Statistics.TotalSizeBytes = totalSize
// Check for required files
for _, pattern := range requiredPatterns {
found := false
for _, file := range result.Inventory.ValidFiles {
if matched, _ := filepath.Match(pattern, filepath.Base(file.Path)); matched {
found = true
break
}
}
if !found {
result.Valid = false
missing := fmt.Sprintf("Required file pattern not found: %s", pattern)
result.Errors = append(result.Errors, missing)
result.Inventory.MissingFiles = append(result.Inventory.MissingFiles, pattern)
result.Statistics.MissingFiles++
// Log missing required file
v.bufferEvent("required_file_missing", "error", "migration_validator",
missing,
map[string]interface{}{
"required_pattern": pattern,
"phase": "validation",
})
}
}
result.Statistics.ValidationTime = time.Since(start).Milliseconds()
// Log validation completion
v.bufferEvent("validation_completed", "info", "migration_validator",
fmt.Sprintf("File validation completed: %d total, %d valid, %d invalid, %d skipped",
result.Statistics.TotalFiles,
result.Statistics.ValidFiles,
result.Statistics.InvalidFiles,
result.Statistics.SkippedFiles),
map[string]interface{}{
"stats": result.Statistics,
"valid": result.Valid,
})
return result, nil
}
// ValidateBackupLocation validates backup location is writable and safe
func (v *FileValidator) ValidateBackupLocation(backupPath string) error {
// Normalize path
normalized, err := v.pathManager.NormalizeToAbsolute(backupPath)
if err != nil {
return fmt.Errorf("invalid backup path: %w", err)
}
// Ensure backup path isn't in system directories
if strings.HasPrefix(normalized, "/bin/") || strings.HasPrefix(normalized, "/sbin/") ||
strings.HasPrefix(normalized, "/usr/bin/") || strings.HasPrefix(normalized, "/usr/sbin/") {
return fmt.Errorf("backup path cannot be in system directory: %s", normalized)
}
// Ensure parent directory exists and is writable
parent := filepath.Dir(normalized)
if err := v.pathManager.EnsureDirectory(parent); err != nil {
return fmt.Errorf("cannot create backup directory: %w", err)
}
// Test write permission (create a temp file)
testFile := filepath.Join(parent, ".migration_test_"+uuid.New().String()[:8])
if err := os.WriteFile(testFile, []byte("test"), 0600); err != nil {
return fmt.Errorf("backup directory not writable: %w", err)
}
// Clean up test file
_ = os.Remove(testFile)
return nil
}
// PreValidate validates all conditions before migration starts
func (v *FileValidator) PreValidate(detection *MigrationDetection, backupPath string) (*ValidationResult, error) {
v.bufferEvent("pre_validation_started", "info", "migration_validator",
"Starting comprehensive migration validation",
map[string]interface{}{
"agent_version": detection.CurrentAgentVersion,
"config_version": detection.CurrentConfigVersion,
})
// Collect all files from inventory
allFiles := v.collectAllFiles(detection.Inventory)
// Define required patterns based on migration needs
requiredPatterns := []string{
"config.json", // Config is essential
// Note: agent.key files are generated if missing
}
// Validate inventory
result, err := v.ValidateInventory(allFiles, requiredPatterns)
if err != nil {
v.bufferEvent("validation_error", "error", "migration_validator",
fmt.Sprintf("Validation failed: %v", err),
map[string]interface{}{
"error": err.Error(),
"phase": "pre_validation",
})
return nil, fmt.Errorf("validation failed: %w", err)
}
// Validate backup location
if err := v.ValidateBackupLocation(backupPath); err != nil {
result.Valid = false
result.Errors = append(result.Errors, fmt.Sprintf("Backup location invalid: %v", err))
v.bufferEvent("backup_validation_failed", "error", "migration_validator",
fmt.Sprintf("Backup validation failed: %v", err),
map[string]interface{}{
"backup_path": backupPath,
"error": err.Error(),
"phase": "validation",
})
}
// Validate new directories can be created (but don't create them yet)
newDirs := []string{
v.pathManager.GetConfig().NewConfigPath,
v.pathManager.GetConfig().NewStatePath,
}
for _, dir := range newDirs {
normalized, err := v.pathManager.NormalizeToAbsolute(dir)
if err != nil {
result.Valid = false
result.Errors = append(result.Errors, fmt.Sprintf("Invalid new directory %s: %v", dir, err))
continue
}
// Check if parent is writable
parent := filepath.Dir(normalized)
if _, err := os.Stat(parent); err != nil {
if os.IsNotExist(err) {
result.Warnings = append(result.Warnings, fmt.Sprintf("Parent directory for %s does not exist: %s", dir, parent))
}
}
}
// Log final validation status
v.bufferEvent("pre_validation_completed", "info", "migration_validator",
fmt.Sprintf("Pre-validation completed: %s", func() string {
if result.Valid {
return "PASSED"
}
return "FAILED"
}()),
map[string]interface{}{
"errors_count": len(result.Errors),
"warnings_count": len(result.Warnings),
"files_valid": result.Statistics.ValidFiles,
"files_invalid": result.Statistics.InvalidFiles,
"files_skipped": result.Statistics.SkippedFiles,
})
return result, nil
}
// collectAllFiles collects all files from the migration inventory
func (v *FileValidator) collectAllFiles(inventory *AgentFileInventory) []common.AgentFile {
var allFiles []common.AgentFile
if inventory != nil {
allFiles = append(allFiles, inventory.ConfigFiles...)
allFiles = append(allFiles, inventory.StateFiles...)
allFiles = append(allFiles, inventory.BinaryFiles...)
allFiles = append(allFiles, inventory.LogFiles...)
allFiles = append(allFiles, inventory.CertificateFiles...)
}
return allFiles
}
// bufferEvent logs an event to the event buffer
func (v *FileValidator) bufferEvent(eventSubtype, severity, component, message string, metadata map[string]interface{}) {
if v.eventBuffer == nil {
return
}
event := &models.SystemEvent{
ID: uuid.New(),
AgentID: &v.agentID,
EventType: models.EventTypeAgentMigration, // Using model constant
EventSubtype: eventSubtype,
Severity: severity,
Component: component,
Message: message,
Metadata: metadata,
CreatedAt: time.Now(),
}
if err := v.eventBuffer.BufferEvent(event); err != nil {
fmt.Printf("[VALIDATION] Warning: Failed to buffer event: %v\n", err)
}
}
// containsAny checks if path matches any of the patterns
func containsAny(path string, patterns []string) bool {
for _, pattern := range patterns {
if matched, _ := filepath.Match(pattern, filepath.Base(path)); matched {
return true
}
}
return false
}
// ValidateFileForBackup validates a single file before backup
func (v *FileValidator) ValidateFileForBackup(file common.AgentFile) error {
// Check if file exists
if _, err := os.Stat(file.Path); err != nil {
if os.IsNotExist(err) {
v.bufferEvent("backup_file_missing", "warning", "migration_validator",
fmt.Sprintf("Skipping backup of non-existent file: %s", file.Path),
map[string]interface{}{
"file_path": file.Path,
"phase": "backup",
})
return fmt.Errorf("file does not exist: %s", file.Path)
}
return fmt.Errorf("failed to access file %s: %w", file.Path, err)
}
// Additional validation for sensitive files
if strings.Contains(file.Path, ".key") || strings.Contains(file.Path, "config") {
// Key files should be readable only by owner
info, err := os.Stat(file.Path)
if err == nil {
perm := info.Mode().Perm()
// Check if others have read permission
if perm&0004 != 0 {
v.bufferEvent("insecure_file_permissions", "warning", "migration_validator",
fmt.Sprintf("Sensitive file has world-readable permissions: %s (0%o)", file.Path, perm),
map[string]interface{}{
"file_path": file.Path,
"permissions": perm,
})
}
}
}
return nil
}

View File

@@ -0,0 +1,31 @@
package models
import (
"time"
"github.com/google/uuid"
)
// StorageMetricReport represents storage metrics from an agent
type StorageMetricReport struct {
AgentID uuid.UUID `json:"agent_id"`
CommandID string `json:"command_id"`
Timestamp time.Time `json:"timestamp"`
Metrics []StorageMetric `json:"metrics"`
}
// StorageMetric represents a single disk/storage metric
type StorageMetric struct {
Mountpoint string `json:"mountpoint"`
Device string `json:"device"`
DiskType string `json:"disk_type"`
Filesystem string `json:"filesystem"`
TotalBytes int64 `json:"total_bytes"`
UsedBytes int64 `json:"used_bytes"`
AvailableBytes int64 `json:"available_bytes"`
UsedPercent float64 `json:"used_percent"`
IsRoot bool `json:"is_root"`
IsLargest bool `json:"is_largest"`
Severity string `json:"severity"`
Metadata map[string]interface{} `json:"metadata,omitempty"`
}

View File

@@ -68,35 +68,6 @@ func (s *StorageScanner) Name() string {
return "Disk Usage Reporter"
}
// --- Legacy Compatibility Methods ---
// Scan collects disk usage information and returns it as "updates" for reporting (LEGACY)
// This method is kept for backwards compatibility with the old Scanner interface
func (s *StorageScanner) Scan() ([]client.UpdateReportItem, error) {
metrics, err := s.ScanStorage()
if err != nil {
return nil, err
}
// Convert proper StorageMetric back to legacy UpdateReportItem format
var items []client.UpdateReportItem
for _, metric := range metrics {
item := client.UpdateReportItem{
PackageName: fmt.Sprintf("disk-%s", metric.Mountpoint),
CurrentVersion: fmt.Sprintf("%.1f%% used", metric.UsedPercent),
AvailableVersion: fmt.Sprintf("%d GB available", metric.AvailableBytes/(1024*1024*1024)),
PackageType: "storage",
Severity: metric.Severity,
PackageDescription: fmt.Sprintf("Disk: %s (%s) - %s", metric.Mountpoint, metric.Filesystem, metric.Device),
Metadata: metric.Metadata,
}
items = append(items, item)
}
return items, nil
}
// --- Typed Scanner Implementation ---
// GetType returns the scanner type

View File

@@ -7,25 +7,14 @@ import (
"time"
)
// Build-time injected version information
// These will be set via ldflags during build (SERVER AUTHORITY)
// Build-time injected version information (SERVER AUTHORITY)
// Injected by server during build via ldflags
var (
// Version is the agent version (e.g., "0.1.23.6")
// Injected by server during build: -ldflags "-X github.com/redflag/redflag/internal/version.Version=0.1.23.6"
Version = "dev"
// ConfigVersion is the config schema version this agent expects (e.g., "6")
// Injected by server during build: -ldflags "-X github.com/redflag/redflag/internal/version.ConfigVersion=6"
ConfigVersion = "dev"
// BuildTime is when this binary was built
BuildTime = "unknown"
// GitCommit is the git commit hash
GitCommit = "unknown"
// GoVersion is the Go version used to build
GoVersion = runtime.Version()
Version = "dev" // Agent version (format: 0.1.26.0)
ConfigVersion = "dev" // Config schema version (format: 0, 1, 2, etc.)
BuildTime = "unknown"
GitCommit = "unknown"
GoVersion = runtime.Version()
)
// ExtractConfigVersionFromAgent extracts the config version from the agent version