WIP: Save current state - security subsystems, migrations, logging

This commit is contained in:
Fimeg
2025-12-16 14:19:59 -05:00
parent f792ab23c7
commit f7c8d23c5d
89 changed files with 8884 additions and 1394 deletions

View File

@@ -13,17 +13,17 @@ import (
// LocalCache stores scan results locally for offline viewing
type LocalCache struct {
LastScanTime time.Time `json:"last_scan_time"`
LastCheckIn time.Time `json:"last_check_in"`
AgentID uuid.UUID `json:"agent_id"`
ServerURL string `json:"server_url"`
UpdateCount int `json:"update_count"`
Updates []client.UpdateReportItem `json:"updates"`
AgentStatus string `json:"agent_status"`
LastScanTime time.Time `json:"last_scan_time"`
LastCheckIn time.Time `json:"last_check_in"`
AgentID uuid.UUID `json:"agent_id"`
ServerURL string `json:"server_url"`
UpdateCount int `json:"update_count"`
Updates []client.UpdateReportItem `json:"updates"`
AgentStatus string `json:"agent_status"`
}
// CacheDir is the directory where local cache is stored
const CacheDir = "/var/lib/redflag"
const CacheDir = "/var/lib/redflag-agent"
// CacheFile is the file where scan results are cached
const CacheFile = "last_scan.json"
@@ -126,4 +126,4 @@ func (c *LocalCache) Clear() {
c.UpdateCount = 0
c.Updates = []client.UpdateReportItem{}
c.AgentStatus = ""
}
}

View File

@@ -7,10 +7,13 @@ import (
"io"
"net/http"
"os"
"path/filepath"
"runtime"
"strings"
"time"
"github.com/Fimeg/RedFlag/aggregator-agent/internal/event"
"github.com/Fimeg/RedFlag/aggregator-agent/internal/models"
"github.com/Fimeg/RedFlag/aggregator-agent/internal/system"
"github.com/google/uuid"
)
@@ -23,6 +26,8 @@ type Client struct {
RapidPollingEnabled bool
RapidPollingUntil time.Time
machineID string // Cached machine ID for security binding
eventBuffer *event.Buffer
agentID uuid.UUID
}
// NewClient creates a new API client
@@ -45,6 +50,58 @@ func NewClient(baseURL, token string) *Client {
}
}
// NewClientWithEventBuffer creates a new API client with event buffering capability
func NewClientWithEventBuffer(baseURL, token string, statePath string, agentID uuid.UUID) *Client {
client := NewClient(baseURL, token)
client.agentID = agentID
// Initialize event buffer if state path is provided
if statePath != "" {
eventBufferPath := filepath.Join(statePath, "events_buffer.json")
client.eventBuffer = event.NewBuffer(eventBufferPath)
}
return client
}
// bufferEvent buffers a system event for later reporting
func (c *Client) bufferEvent(eventType, eventSubtype, severity, component, message string, metadata map[string]interface{}) {
if c.eventBuffer == nil {
return // Event buffering not enabled
}
// Use agent ID if available, otherwise create event with nil agent ID
var agentIDPtr *uuid.UUID
if c.agentID != uuid.Nil {
agentIDPtr = &c.agentID
}
event := &models.SystemEvent{
ID: uuid.New(),
AgentID: agentIDPtr,
EventType: eventType,
EventSubtype: eventSubtype,
Severity: severity,
Component: component,
Message: message,
Metadata: metadata,
CreatedAt: time.Now(),
}
// Buffer the event (best effort - don't fail if buffering fails)
if err := c.eventBuffer.BufferEvent(event); err != nil {
fmt.Printf("Warning: Failed to buffer event: %v\n", err)
}
}
// GetBufferedEvents returns all buffered events and clears the buffer
func (c *Client) GetBufferedEvents() ([]*models.SystemEvent, error) {
if c.eventBuffer == nil {
return nil, nil // Event buffering not enabled
}
return c.eventBuffer.GetBufferedEvents()
}
// addMachineIDHeader adds X-Machine-ID header to authenticated requests (v0.1.22+)
func (c *Client) addMachineIDHeader(req *http.Request) {
if c.machineID != "" {
@@ -95,11 +152,25 @@ func (c *Client) Register(req RegisterRequest) (*RegisterResponse, error) {
body, err := json.Marshal(req)
if err != nil {
// Buffer registration failure event
c.bufferEvent("registration_failure", "marshal_error", "error", "client",
fmt.Sprintf("Failed to marshal registration request: %v", err),
map[string]interface{}{
"error": err.Error(),
"hostname": req.Hostname,
})
return nil, err
}
httpReq, err := http.NewRequest("POST", url, bytes.NewBuffer(body))
if err != nil {
// Buffer registration failure event
c.bufferEvent("registration_failure", "request_creation_error", "error", "client",
fmt.Sprintf("Failed to create registration request: %v", err),
map[string]interface{}{
"error": err.Error(),
"hostname": req.Hostname,
})
return nil, err
}
httpReq.Header.Set("Content-Type", "application/json")
@@ -112,22 +183,49 @@ func (c *Client) Register(req RegisterRequest) (*RegisterResponse, error) {
resp, err := c.http.Do(httpReq)
if err != nil {
// Buffer registration failure event
c.bufferEvent("registration_failure", "network_error", "error", "client",
fmt.Sprintf("Registration request failed: %v", err),
map[string]interface{}{
"error": err.Error(),
"hostname": req.Hostname,
"server_url": c.baseURL,
})
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
bodyBytes, _ := io.ReadAll(resp.Body)
return nil, fmt.Errorf("registration failed: %s - %s", resp.Status, string(bodyBytes))
errorMsg := fmt.Sprintf("registration failed: %s - %s", resp.Status, string(bodyBytes))
// Buffer registration failure event
c.bufferEvent("registration_failure", "api_error", "error", "client",
errorMsg,
map[string]interface{}{
"status_code": resp.StatusCode,
"response_body": string(bodyBytes),
"hostname": req.Hostname,
"server_url": c.baseURL,
})
return nil, fmt.Errorf(errorMsg)
}
var result RegisterResponse
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
// Buffer registration failure event
c.bufferEvent("registration_failure", "decode_error", "error", "client",
fmt.Sprintf("Failed to decode registration response: %v", err),
map[string]interface{}{
"error": err.Error(),
"hostname": req.Hostname,
})
return nil, err
}
// Update client token
// Update client token and agent ID
c.token = result.Token
c.agentID = result.AgentID
return &result, nil
}
@@ -136,6 +234,7 @@ func (c *Client) Register(req RegisterRequest) (*RegisterResponse, error) {
type TokenRenewalRequest struct {
AgentID uuid.UUID `json:"agent_id"`
RefreshToken string `json:"refresh_token"`
AgentVersion string `json:"agent_version,omitempty"` // Agent's current version for upgrade tracking
}
// TokenRenewalResponse is returned after successful token renewal
@@ -144,38 +243,79 @@ type TokenRenewalResponse struct {
}
// RenewToken uses refresh token to get a new access token (proper implementation)
func (c *Client) RenewToken(agentID uuid.UUID, refreshToken string) error {
func (c *Client) RenewToken(agentID uuid.UUID, refreshToken string, agentVersion string) error {
url := fmt.Sprintf("%s/api/v1/agents/renew", c.baseURL)
renewalReq := TokenRenewalRequest{
AgentID: agentID,
RefreshToken: refreshToken,
AgentVersion: agentVersion,
}
body, err := json.Marshal(renewalReq)
if err != nil {
// Buffer token renewal failure event
c.bufferEvent("token_renewal_failure", "marshal_error", "error", "client",
fmt.Sprintf("Failed to marshal token renewal request: %v", err),
map[string]interface{}{
"error": err.Error(),
"agent_id": agentID.String(),
})
return err
}
httpReq, err := http.NewRequest("POST", url, bytes.NewBuffer(body))
if err != nil {
// Buffer token renewal failure event
c.bufferEvent("token_renewal_failure", "request_creation_error", "error", "client",
fmt.Sprintf("Failed to create token renewal request: %v", err),
map[string]interface{}{
"error": err.Error(),
"agent_id": agentID.String(),
})
return err
}
httpReq.Header.Set("Content-Type", "application/json")
resp, err := c.http.Do(httpReq)
if err != nil {
// Buffer token renewal failure event
c.bufferEvent("token_renewal_failure", "network_error", "error", "client",
fmt.Sprintf("Token renewal request failed: %v", err),
map[string]interface{}{
"error": err.Error(),
"agent_id": agentID.String(),
"server_url": c.baseURL,
})
return err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
bodyBytes, _ := io.ReadAll(resp.Body)
return fmt.Errorf("token renewal failed: %s - %s", resp.Status, string(bodyBytes))
errorMsg := fmt.Sprintf("token renewal failed: %s - %s", resp.Status, string(bodyBytes))
// Buffer token renewal failure event
c.bufferEvent("token_renewal_failure", "api_error", "error", "client",
errorMsg,
map[string]interface{}{
"status_code": resp.StatusCode,
"response_body": string(bodyBytes),
"agent_id": agentID.String(),
"server_url": c.baseURL,
})
return fmt.Errorf(errorMsg)
}
var result TokenRenewalResponse
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
// Buffer token renewal failure event
c.bufferEvent("token_renewal_failure", "decode_error", "error", "client",
fmt.Sprintf("Failed to decode token renewal response: %v", err),
map[string]interface{}{
"error": err.Error(),
"agent_id": agentID.String(),
})
return err
}
@@ -187,11 +327,15 @@ func (c *Client) RenewToken(agentID uuid.UUID, refreshToken string) error {
// Command represents a command from the server
type Command struct {
ID string `json:"id"`
Type string `json:"type"`
Params map[string]interface{} `json:"params"`
ID string `json:"id"`
Type string `json:"type"`
Params map[string]interface{} `json:"params"`
Signature string `json:"signature,omitempty"` // Ed25519 signature of the command
}
// CommandItem is an alias for Command for consistency with server models
type CommandItem = Command
// CommandsResponse contains pending commands
type CommandsResponse struct {
Commands []Command `json:"commands"`

View File

@@ -0,0 +1,44 @@
package common
import (
"crypto/sha256"
"encoding/hex"
"os"
"time"
)
type AgentFile struct {
Path string `json:"path"`
Size int64 `json:"size"`
ModifiedTime time.Time `json:"modified_time"`
Version string `json:"version,omitempty"`
Checksum string `json:"checksum"`
Required bool `json:"required"`
Migrate bool `json:"migrate"`
Description string `json:"description"`
}
// CalculateChecksum computes SHA256 checksum of a file
func CalculateChecksum(filePath string) (string, error) {
data, err := os.ReadFile(filePath)
if err != nil {
return "", err
}
hash := sha256.Sum256(data)
return hex.EncodeToString(hash[:]), nil
}
// IsRequiredFile determines if a file is required for agent operation
func IsRequiredFile(path string) bool {
requiredFiles := []string{
"/etc/redflag/config.json",
"/usr/local/bin/redflag-agent",
"/etc/systemd/system/redflag-agent.service",
}
for _, rf := range requiredFiles {
if path == rf {
return true
}
}
return false
}

View File

@@ -5,11 +5,24 @@ import (
"fmt"
"os"
"path/filepath"
"strings"
"time"
"github.com/Fimeg/RedFlag/aggregator-agent/internal/version"
"github.com/google/uuid"
)
// MigrationState tracks migration completion status (used by migration package)
type MigrationState struct {
LastCompleted map[string]time.Time `json:"last_completed"`
AgentVersion string `json:"agent_version"`
ConfigVersion string `json:"config_version"`
Timestamp time.Time `json:"timestamp"`
Success bool `json:"success"`
RollbackPath string `json:"rollback_path,omitempty"`
CompletedMigrations []string `json:"completed_migrations"`
}
// ProxyConfig holds proxy configuration
type ProxyConfig struct {
Enabled bool `json:"enabled"`
@@ -45,6 +58,24 @@ type LoggingConfig struct {
MaxAge int `json:"max_age"` // Max age of log files in days
}
// SecurityLogConfig holds configuration for security logging
type SecurityLogConfig struct {
Enabled bool `json:"enabled" env:"REDFLAG_AGENT_SECURITY_LOG_ENABLED" default:"true"`
Level string `json:"level" env:"REDFLAG_AGENT_SECURITY_LOG_LEVEL" default:"warning"` // none, error, warn, info, debug
LogSuccesses bool `json:"log_successes" env:"REDFLAG_AGENT_SECURITY_LOG_SUCCESSES" default:"false"`
FilePath string `json:"file_path" env:"REDFLAG_AGENT_SECURITY_LOG_PATH"` // Relative to agent data directory
MaxSizeMB int `json:"max_size_mb" env:"REDFLAG_AGENT_SECURITY_LOG_MAX_SIZE" default:"50"`
MaxFiles int `json:"max_files" env:"REDFLAG_AGENT_SECURITY_LOG_MAX_FILES" default:"5"`
BatchSize int `json:"batch_size" env:"REDFLAG_AGENT_SECURITY_LOG_BATCH_SIZE" default:"10"`
SendToServer bool `json:"send_to_server" env:"REDFLAG_AGENT_SECURITY_LOG_SEND" default:"true"`
}
// CommandSigningConfig holds configuration for command signature verification
type CommandSigningConfig struct {
Enabled bool `json:"enabled" env:"REDFLAG_AGENT_COMMAND_SIGNING_ENABLED" default:"true"`
EnforcementMode string `json:"enforcement_mode" env:"REDFLAG_AGENT_COMMAND_ENFORCEMENT_MODE" default:"strict"` // strict, warning, disabled
}
// Config holds agent configuration
type Config struct {
// Version Information
@@ -79,6 +110,12 @@ type Config struct {
// Logging Configuration
Logging LoggingConfig `json:"logging,omitempty"`
// Security Logging Configuration
SecurityLogging SecurityLogConfig `json:"security_logging,omitempty"`
// Command Signing Configuration
CommandSigning CommandSigningConfig `json:"command_signing,omitempty"`
// Agent Metadata
Tags []string `json:"tags,omitempty"` // User-defined tags
Metadata map[string]string `json:"metadata,omitempty"` // Custom metadata
@@ -87,6 +124,9 @@ type Config struct {
// Subsystem Configuration
Subsystems SubsystemsConfig `json:"subsystems,omitempty"` // Scanner subsystem configs
// Migration State
MigrationState *MigrationState `json:"migration_state,omitempty"` // Migration completion tracking
}
// Load reads configuration from multiple sources with priority order:
@@ -95,12 +135,11 @@ type Config struct {
// 3. Configuration file
// 4. Default values
func Load(configPath string, cliFlags *CLIFlags) (*Config, error) {
// Start with defaults
config := getDefaultConfig()
// Load from config file if it exists
if fileConfig, err := loadFromFile(configPath); err == nil {
mergeConfig(config, fileConfig)
// Load existing config from file first
config, err := loadFromFile(configPath)
if err != nil {
// Only use defaults if file doesn't exist or can't be read
config = getDefaultConfig()
}
// Override with environment variables
@@ -134,13 +173,53 @@ type CLIFlags struct {
InsecureTLS bool
}
// getConfigVersionForAgent extracts the config version from the agent version
// Agent version format: v0.1.23.6 where the fourth octet (.6) maps to config version
func getConfigVersionForAgent(agentVersion string) string {
// Strip 'v' prefix if present
cleanVersion := strings.TrimPrefix(agentVersion, "v")
// Split version parts
parts := strings.Split(cleanVersion, ".")
if len(parts) == 4 {
// Return the fourth octet as the config version
// v0.1.23.6 → "6"
return parts[3]
}
// TODO: Integrate with global error logging system when available
// For now, default to "6" to match current agent version
return "6"
}
// getDefaultConfig returns default configuration values
func getDefaultConfig() *Config {
// Use version package for single source of truth
configVersion := version.ConfigVersion
if configVersion == "dev" {
// Fallback to extracting from agent version if not injected
configVersion = version.ExtractConfigVersionFromAgent(version.Version)
}
return &Config{
Version: "4", // Current config schema version
AgentVersion: "", // Will be set by the agent at startup
Version: configVersion, // Config schema version from version package
AgentVersion: version.Version, // Agent version from version package
ServerURL: "http://localhost:8080",
CheckInInterval: 300, // 5 minutes
// Server Authentication
RegistrationToken: "", // One-time registration token (embedded by install script)
AgentID: uuid.Nil, // Will be set during registration
Token: "", // Will be set during registration
RefreshToken: "", // Will be set during registration
// Agent Behavior
RapidPollingEnabled: false,
RapidPollingUntil: time.Time{},
// Network Security
Proxy: ProxyConfig{},
TLS: TLSConfig{},
Network: NetworkConfig{
Timeout: 30 * time.Second,
RetryCount: 3,
@@ -153,6 +232,20 @@ func getDefaultConfig() *Config {
MaxBackups: 3,
MaxAge: 28, // 28 days
},
SecurityLogging: SecurityLogConfig{
Enabled: true,
Level: "warning",
LogSuccesses: false,
FilePath: "security.log",
MaxSizeMB: 50,
MaxFiles: 5,
BatchSize: 10,
SendToServer: true,
},
CommandSigning: CommandSigningConfig{
Enabled: true,
EnforcementMode: "strict",
},
Subsystems: GetDefaultSubsystemsConfig(),
Tags: []string{},
Metadata: make(map[string]string),
@@ -171,32 +264,36 @@ func loadFromFile(configPath string) (*Config, error) {
data, err := os.ReadFile(configPath)
if err != nil {
if os.IsNotExist(err) {
return getDefaultConfig(), nil // Return defaults if file doesn't exist
return nil, fmt.Errorf("config file does not exist") // Return error so caller uses defaults
}
return nil, fmt.Errorf("failed to read config: %w", err)
}
// Start with latest default config
config := getDefaultConfig()
// Parse the existing config into a generic map to handle missing fields
// Parse the existing config into a generic map to preserve all fields
var rawConfig map[string]interface{}
if err := json.Unmarshal(data, &rawConfig); err != nil {
return nil, fmt.Errorf("failed to parse config: %w", err)
}
// Marshal back to JSON and unmarshal into our new structure
// This ensures missing fields get default values from getDefaultConfig()
// Create a new config with ALL defaults to fill missing fields
config := getDefaultConfig()
// Carefully merge the loaded config into our defaults
// This preserves existing values while filling missing ones with defaults
configJSON, err := json.Marshal(rawConfig)
if err != nil {
return nil, fmt.Errorf("failed to re-marshal config: %w", err)
}
// Carefully merge into our config structure, preserving defaults for missing fields
if err := json.Unmarshal(configJSON, &config); err != nil {
return nil, fmt.Errorf("failed to merge config: %w", err)
// Create a temporary config to hold loaded values
tempConfig := &Config{}
if err := json.Unmarshal(configJSON, &tempConfig); err != nil {
return nil, fmt.Errorf("failed to unmarshal temp config: %w", err)
}
// Merge loaded config into defaults (only non-zero values)
mergeConfigPreservingDefaults(config, tempConfig)
// Handle specific migrations for known breaking changes
migrateConfig(config)
@@ -205,10 +302,19 @@ func loadFromFile(configPath string) (*Config, error) {
// migrateConfig handles specific known migrations between config versions
func migrateConfig(cfg *Config) {
// Save the registration token before migration
savedRegistrationToken := cfg.RegistrationToken
// Update config schema version to latest
if cfg.Version != "5" {
fmt.Printf("[CONFIG] Migrating config schema from version %s to 5\n", cfg.Version)
cfg.Version = "5"
targetVersion := version.ConfigVersion
if targetVersion == "dev" {
// Fallback to extracting from agent version
targetVersion = version.ExtractConfigVersionFromAgent(version.Version)
}
if cfg.Version != targetVersion {
fmt.Printf("[CONFIG] Migrating config schema from version %s to %s\n", cfg.Version, targetVersion)
cfg.Version = targetVersion
}
// Migration 1: Ensure minimum check-in interval (30 seconds)
@@ -227,6 +333,12 @@ func migrateConfig(cfg *Config) {
fmt.Printf("[CONFIG] Adding missing 'updates' subsystem configuration\n")
cfg.Subsystems.Updates = GetDefaultSubsystemsConfig().Updates
}
// CRITICAL: Restore the registration token after migration
// This ensures the token is never overwritten by migration logic
if savedRegistrationToken != "" {
cfg.RegistrationToken = savedRegistrationToken
}
}
// loadFromEnv loads configuration from environment variables
@@ -263,6 +375,32 @@ func loadFromEnv() *Config {
config.DisplayName = displayName
}
// Security logging environment variables
if secEnabled := os.Getenv("REDFLAG_AGENT_SECURITY_LOG_ENABLED"); secEnabled != "" {
if config.SecurityLogging == (SecurityLogConfig{}) {
config.SecurityLogging = SecurityLogConfig{}
}
config.SecurityLogging.Enabled = secEnabled == "true"
}
if secLevel := os.Getenv("REDFLAG_AGENT_SECURITY_LOG_LEVEL"); secLevel != "" {
if config.SecurityLogging == (SecurityLogConfig{}) {
config.SecurityLogging = SecurityLogConfig{}
}
config.SecurityLogging.Level = secLevel
}
if secLogSucc := os.Getenv("REDFLAG_AGENT_SECURITY_LOG_SUCCESSES"); secLogSucc != "" {
if config.SecurityLogging == (SecurityLogConfig{}) {
config.SecurityLogging = SecurityLogConfig{}
}
config.SecurityLogging.LogSuccesses = secLogSucc == "true"
}
if secPath := os.Getenv("REDFLAG_AGENT_SECURITY_LOG_PATH"); secPath != "" {
if config.SecurityLogging == (SecurityLogConfig{}) {
config.SecurityLogging = SecurityLogConfig{}
}
config.SecurityLogging.FilePath = secPath
}
return config
}
@@ -341,6 +479,12 @@ func mergeConfig(target, source *Config) {
if source.Logging != (LoggingConfig{}) {
target.Logging = source.Logging
}
if source.SecurityLogging != (SecurityLogConfig{}) {
target.SecurityLogging = source.SecurityLogging
}
if source.CommandSigning != (CommandSigningConfig{}) {
target.CommandSigning = source.CommandSigning
}
// Merge metadata
if source.Tags != nil {
@@ -436,3 +580,89 @@ func (c *Config) NeedsRegistration() bool {
func (c *Config) HasRegistrationToken() bool {
return c.RegistrationToken != ""
}
// mergeConfigPreservingDefaults merges source config into target config
// but only overwrites fields that are explicitly set (non-zero)
// This is different from mergeConfig which blindly copies non-zero values
func mergeConfigPreservingDefaults(target, source *Config) {
// Server Configuration
if source.ServerURL != "" && source.ServerURL != getDefaultConfig().ServerURL {
target.ServerURL = source.ServerURL
}
// IMPORTANT: Never overwrite registration token if target already has one
if source.RegistrationToken != "" && target.RegistrationToken == "" {
target.RegistrationToken = source.RegistrationToken
}
// Agent Configuration
if source.CheckInInterval != 0 {
target.CheckInInterval = source.CheckInInterval
}
if source.AgentID != uuid.Nil {
target.AgentID = source.AgentID
}
if source.Token != "" {
target.Token = source.Token
}
if source.RefreshToken != "" {
target.RefreshToken = source.RefreshToken
}
// Merge nested configs only if they're not default values
if source.Network != (NetworkConfig{}) {
target.Network = source.Network
}
if source.Proxy != (ProxyConfig{}) {
target.Proxy = source.Proxy
}
if source.TLS != (TLSConfig{}) {
target.TLS = source.TLS
}
if source.Logging != (LoggingConfig{}) && source.Logging.Level != "" {
target.Logging = source.Logging
}
if source.SecurityLogging != (SecurityLogConfig{}) {
target.SecurityLogging = source.SecurityLogging
}
if source.CommandSigning != (CommandSigningConfig{}) {
target.CommandSigning = source.CommandSigning
}
// Merge metadata
if source.Tags != nil && len(source.Tags) > 0 {
target.Tags = source.Tags
}
if source.Metadata != nil {
if target.Metadata == nil {
target.Metadata = make(map[string]string)
}
for k, v := range source.Metadata {
target.Metadata[k] = v
}
}
if source.DisplayName != "" {
target.DisplayName = source.DisplayName
}
if source.Organization != "" {
target.Organization = source.Organization
}
// Merge rapid polling settings
target.RapidPollingEnabled = source.RapidPollingEnabled
if !source.RapidPollingUntil.IsZero() {
target.RapidPollingUntil = source.RapidPollingUntil
}
// Merge subsystems config
if source.Subsystems != (SubsystemsConfig{}) {
target.Subsystems = source.Subsystems
}
// Version info
if source.Version != "" {
target.Version = source.Version
}
if source.AgentVersion != "" {
target.AgentVersion = source.AgentVersion
}
}

View File

@@ -7,6 +7,10 @@ type SubsystemConfig struct {
// Execution settings
Enabled bool `json:"enabled"`
Timeout time.Duration `json:"timeout"` // Timeout for this subsystem
// Interval for this subsystem (in minutes)
// This controls how often the server schedules scans for this subsystem
IntervalMinutes int `json:"interval_minutes,omitempty"`
// Circuit breaker settings
CircuitBreaker CircuitBreakerConfig `json:"circuit_breaker"`
@@ -64,44 +68,52 @@ func GetDefaultSubsystemsConfig() SubsystemsConfig {
return SubsystemsConfig{
System: SubsystemConfig{
Enabled: true, // System scanner always available
Timeout: 10 * time.Second, // System info should be fast
CircuitBreaker: defaultCB,
Enabled: true, // System scanner always available
Timeout: 10 * time.Second, // System info should be fast
IntervalMinutes: 5, // Default: 5 minutes
CircuitBreaker: defaultCB,
},
Updates: SubsystemConfig{
Enabled: true, // Virtual subsystem for package update scheduling
Timeout: 0, // Not used - delegates to individual package scanners
CircuitBreaker: CircuitBreakerConfig{Enabled: false}, // No circuit breaker for virtual subsystem
Enabled: true, // Virtual subsystem for package update scheduling
Timeout: 0, // Not used - delegates to individual package scanners
IntervalMinutes: 15, // Default: 15 minutes
CircuitBreaker: CircuitBreakerConfig{Enabled: false}, // No circuit breaker for virtual subsystem
},
APT: SubsystemConfig{
Enabled: true,
Timeout: 30 * time.Second,
CircuitBreaker: defaultCB,
Enabled: true,
Timeout: 30 * time.Second,
IntervalMinutes: 15, // Default: 15 minutes
CircuitBreaker: defaultCB,
},
DNF: SubsystemConfig{
Enabled: true,
Timeout: 15 * time.Minute, // TODO: Make scanner timeouts user-adjustable via settings. DNF operations can take a long time on large systems
CircuitBreaker: defaultCB,
Enabled: true,
Timeout: 15 * time.Minute, // TODO: Make scanner timeouts user-adjustable via settings. DNF operations can take a long time on large systems
IntervalMinutes: 15, // Default: 15 minutes
CircuitBreaker: defaultCB,
},
Docker: SubsystemConfig{
Enabled: true,
Timeout: 60 * time.Second, // Registry queries can be slow
CircuitBreaker: defaultCB,
Enabled: true,
Timeout: 60 * time.Second, // Registry queries can be slow
IntervalMinutes: 15, // Default: 15 minutes
CircuitBreaker: defaultCB,
},
Windows: SubsystemConfig{
Enabled: true,
Timeout: 10 * time.Minute, // Windows Update can be VERY slow
CircuitBreaker: windowsCB,
Enabled: true,
Timeout: 10 * time.Minute, // Windows Update can be VERY slow
IntervalMinutes: 15, // Default: 15 minutes
CircuitBreaker: windowsCB,
},
Winget: SubsystemConfig{
Enabled: true,
Timeout: 2 * time.Minute, // Winget has multiple retry strategies
CircuitBreaker: defaultCB,
Enabled: true,
Timeout: 2 * time.Minute, // Winget has multiple retry strategies
IntervalMinutes: 15, // Default: 15 minutes
CircuitBreaker: defaultCB,
},
Storage: SubsystemConfig{
Enabled: true,
Timeout: 10 * time.Second, // Disk info should be fast
CircuitBreaker: defaultCB,
Enabled: true,
Timeout: 10 * time.Second, // Disk info should be fast
IntervalMinutes: 5, // Default: 5 minutes
CircuitBreaker: defaultCB,
},
}
}

View File

@@ -0,0 +1,152 @@
package crypto
import (
"crypto/ed25519"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"fmt"
"time"
"github.com/Fimeg/RedFlag/aggregator-agent/internal/client"
)
// CommandVerifier handles Ed25519 signature verification for commands
type CommandVerifier struct {
// In the future, this could include:
// - Key rotation support
// - Multiple trusted keys
// - Revocation checking
}
// NewCommandVerifier creates a new command verifier
func NewCommandVerifier() *CommandVerifier {
return &CommandVerifier{}
}
// VerifyCommand verifies that a command's signature is valid
func (v *CommandVerifier) VerifyCommand(cmd client.Command, serverPubKey ed25519.PublicKey) error {
// Check if signature is present
if cmd.Signature == "" {
return fmt.Errorf("command missing signature")
}
// Decode the signature
sig, err := hex.DecodeString(cmd.Signature)
if err != nil {
return fmt.Errorf("invalid signature encoding: %w", err)
}
// Verify signature length
if len(sig) != ed25519.SignatureSize {
return fmt.Errorf("invalid signature length: expected %d bytes, got %d",
ed25519.SignatureSize, len(sig))
}
// Reconstruct the signed message
message, err := v.reconstructMessage(cmd)
if err != nil {
return fmt.Errorf("failed to reconstruct message: %w", err)
}
// Verify the Ed25519 signature
if !ed25519.Verify(serverPubKey, message, sig) {
return fmt.Errorf("signature verification failed")
}
return nil
}
// reconstructMessage recreates the message that was signed by the server
// This must exactly match the server's signing implementation
func (v *CommandVerifier) reconstructMessage(cmd client.Command) ([]byte, error) {
// Marshal parameters to JSON
paramsJSON, err := json.Marshal(cmd.Params)
if err != nil {
return nil, fmt.Errorf("failed to marshal parameters: %w", err)
}
// Create SHA256 hash of parameters
paramsHash := sha256.Sum256(paramsJSON)
paramsHashHex := hex.EncodeToString(paramsHash[:])
// Create the message in the exact format the server uses
// Format: "ID:CommandType:ParamsHash"
message := fmt.Sprintf("%s:%s:%s",
cmd.ID,
cmd.Type,
paramsHashHex)
return []byte(message), nil
}
// VerifyCommandWithTimestamp verifies a command and checks its timestamp
// This prevents replay attacks with old commands
// Note: Timestamp verification requires the CreatedAt field which is not sent to agents
// This method is kept for future enhancement when we add timestamp to the command payload
func (v *CommandVerifier) VerifyCommandWithTimestamp(
cmd client.Command,
serverPubKey ed25519.PublicKey,
maxAge time.Duration,
) error {
// First verify the signature
if err := v.VerifyCommand(cmd, serverPubKey); err != nil {
return err
}
// Timestamp checking is currently disabled as CreatedAt is not included in the command sent to agents
// TODO: Add CreatedAt to command payload if timestamp verification is needed
return nil
}
// VerifyCommandBatch verifies multiple commands efficiently
// This is useful when processing multiple commands at once
func (v *CommandVerifier) VerifyCommandBatch(
commands []client.Command,
serverPubKey ed25519.PublicKey,
) []error {
errors := make([]error, len(commands))
for i, cmd := range commands {
errors[i] = v.VerifyCommand(cmd, serverPubKey)
}
return errors
}
// ExtractCommandIDFromSignature attempts to verify a signature and returns the command ID
// This is useful for debugging and logging
func (v *CommandVerifier) ExtractCommandIDFromSignature(
signature string,
expectedMessage string,
serverPubKey ed25519.PublicKey,
) (string, error) {
// Decode signature
sig, err := hex.DecodeString(signature)
if err != nil {
return "", fmt.Errorf("invalid signature encoding: %w", err)
}
// Verify signature
if !ed25519.Verify(serverPubKey, []byte(expectedMessage), sig) {
return "", fmt.Errorf("signature verification failed")
}
// In a real implementation, we might embed the command ID in the signature
// For now, we return an empty string since the ID is part of the message
return "", nil
}
// CheckKeyRotation checks if a public key needs to be rotated
// This is a placeholder for future key rotation support
func (v *CommandVerifier) CheckKeyRotation(currentKey ed25519.PublicKey) (ed25519.PublicKey, bool, error) {
// In the future, this could:
// - Check a key rotation endpoint
// - Load multiple trusted keys
// - Implement key pinning with fallback
// - Handle emergency key revocation
// For now, just return the current key
return currentKey, false, nil
}

View File

@@ -0,0 +1,135 @@
package event
import (
"encoding/json"
"fmt"
"os"
"path/filepath"
"github.com/google/uuid"
"sync"
"github.com/Fimeg/RedFlag/aggregator-agent/internal/models"
)
const (
defaultMaxBufferSize = 1000 // Max events to buffer
)
// Buffer handles local event buffering for offline resilience
type Buffer struct {
filePath string
maxSize int
mu sync.Mutex
}
// NewBuffer creates a new event buffer with the specified file path
func NewBuffer(filePath string) *Buffer {
return &Buffer{
filePath: filePath,
maxSize: defaultMaxBufferSize,
}
}
// BufferEvent saves an event to the local buffer file
func (b *Buffer) BufferEvent(event *models.SystemEvent) error {
b.mu.Lock()
defer b.mu.Unlock()
// Ensure event has an ID
if event.ID == uuid.Nil {
return fmt.Errorf("event ID cannot be nil")
}
// Create directory if needed
dir := filepath.Dir(b.filePath)
if err := os.MkdirAll(dir, 0755); err != nil {
return fmt.Errorf("failed to create buffer directory: %w", err)
}
// Read existing buffer
var events []*models.SystemEvent
if data, err := os.ReadFile(b.filePath); err == nil {
if err := json.Unmarshal(data, &events); err != nil {
// If we can't unmarshal, start fresh
events = []*models.SystemEvent{}
}
}
// Append new event
events = append(events, event)
// Keep only last N events if buffer too large (circular buffer)
if len(events) > b.maxSize {
events = events[len(events)-b.maxSize:]
}
// Write back to file
data, err := json.Marshal(events)
if err != nil {
return fmt.Errorf("failed to marshal events: %w", err)
}
if err := os.WriteFile(b.filePath, data, 0644); err != nil {
return fmt.Errorf("failed to write buffer file: %w", err)
}
return nil
}
// GetBufferedEvents retrieves and clears the buffer
func (b *Buffer) GetBufferedEvents() ([]*models.SystemEvent, error) {
b.mu.Lock()
defer b.mu.Unlock()
// Read buffer file
var events []*models.SystemEvent
data, err := os.ReadFile(b.filePath)
if err != nil {
if os.IsNotExist(err) {
return nil, nil // No buffer file means no events
}
return nil, fmt.Errorf("failed to read buffer file: %w", err)
}
if err := json.Unmarshal(data, &events); err != nil {
return nil, fmt.Errorf("failed to unmarshal events: %w", err)
}
// Clear buffer file after reading
if err := os.Remove(b.filePath); err != nil && !os.IsNotExist(err) {
// Log warning but don't fail - events were still retrieved
fmt.Printf("Warning: Failed to clear buffer file: %v\n", err)
}
return events, nil
}
// SetMaxSize sets the maximum number of events to buffer
func (b *Buffer) SetMaxSize(size int) {
b.mu.Lock()
defer b.mu.Unlock()
b.maxSize = size
}
// GetStats returns buffer statistics
func (b *Buffer) GetStats() (int, error) {
b.mu.Lock()
defer b.mu.Unlock()
data, err := os.ReadFile(b.filePath)
if err != nil {
if os.IsNotExist(err) {
return 0, nil
}
return 0, err
}
var events []*models.SystemEvent
if err := json.Unmarshal(data, &events); err != nil {
return 0, err
}
return len(events), nil
}

View File

@@ -0,0 +1,138 @@
package logging
// This file contains example code showing how to integrate the security logger
// into various parts of the agent application.
import (
"fmt"
"time"
"github.com/Fimeg/RedFlag/aggregator-agent/internal/config"
"github.com/denisbrodbeck/machineid"
)
// Example of how to initialize the security logger in main.go
func ExampleInitializeSecurityLogger(cfg *config.Config, dataDir string) (*SecurityLogger, error) {
// Create the security logger
securityLogger, err := NewSecurityLogger(cfg, dataDir)
if err != nil {
return nil, err
}
return securityLogger, nil
}
// Example of using the security logger in command executor
func ExampleCommandExecution(securityLogger *SecurityLogger, command string, signature string) {
// Simulate signature verification
signatureValid := false // In real code, this would be actual verification
if !signatureValid {
securityLogger.LogCommandVerificationFailure(
"cmd-123",
"signature verification failed: crypto/rsa: verification error",
)
} else {
// Only log success if configured
event := &SecurityEvent{
Timestamp: time.Now().UTC(),
Level: "INFO",
EventType: SecurityEventTypes.CmdSignatureVerificationSuccess,
Message: "Command signature verified successfully",
}
securityLogger.Log(event)
}
}
// Example of using the security logger in update handler
func ExampleUpdateHandler(securityLogger *SecurityLogger, updateID string, updateData []byte, signature string) {
// Simulate nonce validation
nonceValid := false
if !nonceValid {
securityLogger.LogNonceValidationFailure(
"deadbeef-1234-5678-9abc-1234567890ef",
"nonce expired or reused",
)
}
// Simulate signature verification
signatureValid := false
if !signatureValid {
securityLogger.LogUpdateSignatureVerificationFailure(
updateID,
"signature does not match update data",
)
}
}
// Example of machine ID monitoring
func ExampleMachineIDMonitoring(securityLogger *SecurityLogger) {
// Get current machine ID
currentID, err := machineid.ID()
if err != nil {
return
}
// In real code, you would store the previous ID somewhere
// This is just an example of how to log when it changes
previousID := "previous-machine-id-here"
if currentID != previousID {
securityLogger.LogMachineIDChangeDetected(
previousID,
currentID,
)
}
}
// Example of configuration monitoring
func ExampleConfigMonitoring(securityLogger *SecurityLogger, configPath string) {
// In real code, you would calculate and store a hash of the config
// and validate it periodically
configTampered := true // Simulate detection
if configTampered {
securityLogger.LogConfigTamperingWarning(
configPath,
"configuration hash mismatch",
)
}
}
// Example of unauthorized command attempt
func ExampleUnauthorizedCommand(securityLogger *SecurityLogger, command string) {
// Check if command is in allowed list
allowedCommands := map[string]bool{
"scan": true,
"update": true,
"cleanup": true,
}
if !allowedCommands[command] {
securityLogger.LogUnauthorizedCommandAttempt(
command,
"command not in allowed list",
)
}
}
// Example of sending security events to server
func ExampleSendSecurityEvents(securityLogger *SecurityLogger, client interface{}) {
// Get batch of security events
events := securityLogger.GetBatch()
if len(events) > 0 {
// In real code, you would send these to the server
// If successful:
fmt.Printf("Sending %d security events to server...\n", len(events))
// Simulate successful send
success := true
if success {
securityLogger.ClearBatch()
fmt.Printf("Security events sent successfully\n")
} else {
// Events remain in buffer for next attempt
fmt.Printf("Failed to send security events, will retry\n")
}
}
}

View File

@@ -0,0 +1,444 @@
package logging
import (
"encoding/json"
"fmt"
"log"
"os"
"path/filepath"
"sync"
"time"
"github.com/Fimeg/RedFlag/aggregator-agent/internal/config"
)
// SecurityEvent represents a security event on the agent side
// This is a simplified version of the server model to avoid circular dependencies
type SecurityEvent struct {
Timestamp time.Time `json:"timestamp"`
Level string `json:"level"` // CRITICAL, WARNING, INFO, DEBUG
EventType string `json:"event_type"`
Message string `json:"message"`
Details map[string]interface{} `json:"details,omitempty"`
}
// SecurityLogConfig holds configuration for security logging on the agent
type SecurityLogConfig struct {
Enabled bool `json:"enabled" env:"REDFLAG_AGENT_SECURITY_LOG_ENABLED" default:"true"`
Level string `json:"level" env:"REDFLAG_AGENT_SECURITY_LOG_LEVEL" default:"warning"` // none, error, warn, info, debug
LogSuccesses bool `json:"log_successes" env:"REDFLAG_AGENT_SECURITY_LOG_SUCCESSES" default:"false"`
FilePath string `json:"file_path" env:"REDFLAG_AGENT_SECURITY_LOG_PATH"` // Relative to agent data directory
MaxSizeMB int `json:"max_size_mb" env:"REDFLAG_AGENT_SECURITY_LOG_MAX_SIZE" default:"50"`
MaxFiles int `json:"max_files" env:"REDFLAG_AGENT_SECURITY_LOG_MAX_FILES" default:"5"`
BatchSize int `json:"batch_size" env:"REDFLAG_AGENT_SECURITY_LOG_BATCH_SIZE" default:"10"`
SendToServer bool `json:"send_to_server" env:"REDFLAG_AGENT_SECURITY_LOG_SEND" default:"true"`
}
// SecurityLogger handles security event logging on the agent
type SecurityLogger struct {
config SecurityLogConfig
logger *log.Logger
file *os.File
mu sync.Mutex
buffer []*SecurityEvent
flushTimer *time.Timer
lastFlush time.Time
closed bool
}
// SecurityEventTypes defines all possible security event types on the agent
var SecurityEventTypes = struct {
CmdSignatureVerificationFailed string
CmdSignatureVerificationSuccess string
UpdateNonceInvalid string
UpdateSignatureVerificationFailed string
MachineIDChangeDetected string
ConfigTamperingWarning string
UnauthorizedCommandAttempt string
}{
CmdSignatureVerificationFailed: "CMD_SIGNATURE_VERIFICATION_FAILED",
CmdSignatureVerificationSuccess: "CMD_SIGNATURE_VERIFICATION_SUCCESS",
UpdateNonceInvalid: "UPDATE_NONCE_INVALID",
UpdateSignatureVerificationFailed: "UPDATE_SIGNATURE_VERIFICATION_FAILED",
MachineIDChangeDetected: "MACHINE_ID_CHANGE_DETECTED",
ConfigTamperingWarning: "CONFIG_TAMPERING_WARNING",
UnauthorizedCommandAttempt: "UNAUTHORIZED_COMMAND_ATTEMPT",
}
// NewSecurityLogger creates a new agent security logger
func NewSecurityLogger(agentConfig *config.Config, logDir string) (*SecurityLogger, error) {
// Create default security log config
secConfig := SecurityLogConfig{
Enabled: true,
Level: "warning",
LogSuccesses: false,
FilePath: "security.log",
MaxSizeMB: 50,
MaxFiles: 5,
BatchSize: 10,
SendToServer: true,
}
// Ensure log directory exists
if err := os.MkdirAll(logDir, 0755); err != nil {
return nil, fmt.Errorf("failed to create security log directory: %w", err)
}
// Open log file
logPath := filepath.Join(logDir, secConfig.FilePath)
file, err := os.OpenFile(logPath, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0600)
if err != nil {
return nil, fmt.Errorf("failed to open security log file: %w", err)
}
logger := &SecurityLogger{
config: secConfig,
logger: log.New(file, "[SECURITY] ", log.LstdFlags|log.LUTC),
file: file,
buffer: make([]*SecurityEvent, 0, secConfig.BatchSize),
lastFlush: time.Now(),
}
// Start flush timer
logger.flushTimer = time.AfterFunc(30*time.Second, logger.flushBuffer)
return logger, nil
}
// Log writes a security event
func (sl *SecurityLogger) Log(event *SecurityEvent) error {
if !sl.config.Enabled || sl.config.Level == "none" {
return nil
}
// Skip successes unless configured to log them
if !sl.config.LogSuccesses && event.EventType == SecurityEventTypes.CmdSignatureVerificationSuccess {
return nil
}
// Filter by log level
if !sl.shouldLogLevel(event.Level) {
return nil
}
sl.mu.Lock()
defer sl.mu.Unlock()
if sl.closed {
return fmt.Errorf("security logger is closed")
}
// Add prefix to distinguish security events
event.Message = "SECURITY: " + event.Message
// Write immediately for critical events
if event.Level == "CRITICAL" {
return sl.writeEvent(event)
}
// Add to buffer
sl.buffer = append(sl.buffer, event)
// Flush if buffer is full
if len(sl.buffer) >= sl.config.BatchSize {
sl.flushBufferUnsafe()
}
return nil
}
// LogCommandVerificationFailure logs a command signature verification failure
func (sl *SecurityLogger) LogCommandVerificationFailure(commandID string, reason string) {
if sl == nil {
return
}
event := &SecurityEvent{
Timestamp: time.Now().UTC(),
Level: "CRITICAL",
EventType: SecurityEventTypes.CmdSignatureVerificationFailed,
Message: "Command signature verification failed",
Details: map[string]interface{}{
"command_id": commandID,
"reason": reason,
},
}
_ = sl.Log(event)
}
// LogNonceValidationFailure logs a nonce validation failure
func (sl *SecurityLogger) LogNonceValidationFailure(nonce string, reason string) {
if sl == nil {
return
}
event := &SecurityEvent{
Timestamp: time.Now().UTC(),
Level: "WARNING",
EventType: SecurityEventTypes.UpdateNonceInvalid,
Message: "Update nonce validation failed",
Details: map[string]interface{}{
"nonce": nonce[:min(len(nonce), 16)] + "...", // Truncate for security
"reason": reason,
},
}
_ = sl.Log(event)
}
// LogUpdateSignatureVerificationFailure logs an update signature verification failure
func (sl *SecurityLogger) LogUpdateSignatureVerificationFailure(updateID string, reason string) {
if sl == nil {
return
}
event := &SecurityEvent{
Timestamp: time.Now().UTC(),
Level: "CRITICAL",
EventType: SecurityEventTypes.UpdateSignatureVerificationFailed,
Message: "Update signature verification failed",
Details: map[string]interface{}{
"update_id": updateID,
"reason": reason,
},
}
_ = sl.Log(event)
}
// LogMachineIDChangeDetected logs when machine ID changes
func (sl *SecurityLogger) LogMachineIDChangeDetected(oldID, newID string) {
if sl == nil {
return
}
event := &SecurityEvent{
Timestamp: time.Now().UTC(),
Level: "WARNING",
EventType: SecurityEventTypes.MachineIDChangeDetected,
Message: "Machine ID change detected",
Details: map[string]interface{}{
"old_machine_id": oldID,
"new_machine_id": newID,
},
}
_ = sl.Log(event)
}
// LogConfigTamperingWarning logs when configuration tampering is suspected
func (sl *SecurityLogger) LogConfigTamperingWarning(configPath string, reason string) {
if sl == nil {
return
}
event := &SecurityEvent{
Timestamp: time.Now().UTC(),
Level: "WARNING",
EventType: SecurityEventTypes.ConfigTamperingWarning,
Message: "Configuration file tampering detected",
Details: map[string]interface{}{
"config_file": configPath,
"reason": reason,
},
}
_ = sl.Log(event)
}
// LogUnauthorizedCommandAttempt logs an attempt to run an unauthorized command
func (sl *SecurityLogger) LogUnauthorizedCommandAttempt(command string, reason string) {
if sl == nil {
return
}
event := &SecurityEvent{
Timestamp: time.Now().UTC(),
Level: "WARNING",
EventType: SecurityEventTypes.UnauthorizedCommandAttempt,
Message: "Unauthorized command execution attempt",
Details: map[string]interface{}{
"command": command,
"reason": reason,
},
}
_ = sl.Log(event)
}
// LogCommandVerificationSuccess logs a successful command signature verification
func (sl *SecurityLogger) LogCommandVerificationSuccess(commandID string) {
if sl == nil {
return
}
event := &SecurityEvent{
Timestamp: time.Now().UTC(),
Level: "INFO",
EventType: SecurityEventTypes.CmdSignatureVerificationSuccess,
Message: "Command signature verified successfully",
Details: map[string]interface{}{
"command_id": commandID,
},
}
_ = sl.Log(event)
}
// LogCommandVerificationFailed logs a failed command signature verification
func (sl *SecurityLogger) LogCommandVerificationFailed(commandID, reason string) {
if sl == nil {
return
}
event := &SecurityEvent{
Timestamp: time.Now().UTC(),
Level: "CRITICAL",
EventType: SecurityEventTypes.CmdSignatureVerificationFailed,
Message: "Command signature verification failed",
Details: map[string]interface{}{
"command_id": commandID,
"reason": reason,
},
}
_ = sl.Log(event)
}
// LogCommandSkipped logs when a command is skipped due to signing configuration
func (sl *SecurityLogger) LogCommandSkipped(commandID, reason string) {
if sl == nil {
return
}
event := &SecurityEvent{
Timestamp: time.Now().UTC(),
Level: "INFO",
EventType: "COMMAND_SKIPPED",
Message: "Command skipped due to signing configuration",
Details: map[string]interface{}{
"command_id": commandID,
"reason": reason,
},
}
_ = sl.Log(event)
}
// GetBatch returns a batch of events for sending to server
func (sl *SecurityLogger) GetBatch() []*SecurityEvent {
sl.mu.Lock()
defer sl.mu.Unlock()
if len(sl.buffer) == 0 {
return nil
}
// Copy buffer
batch := make([]*SecurityEvent, len(sl.buffer))
copy(batch, sl.buffer)
// Clear buffer
sl.buffer = sl.buffer[:0]
return batch
}
// ClearBatch clears the buffer after successful send to server
func (sl *SecurityLogger) ClearBatch() {
sl.mu.Lock()
defer sl.mu.Unlock()
sl.buffer = sl.buffer[:0]
}
// writeEvent writes an event to the log file
func (sl *SecurityLogger) writeEvent(event *SecurityEvent) error {
jsonData, err := json.Marshal(event)
if err != nil {
return fmt.Errorf("failed to marshal security event: %w", err)
}
sl.logger.Println(string(jsonData))
return nil
}
// flushBuffer flushes all buffered events to file
func (sl *SecurityLogger) flushBuffer() {
sl.mu.Lock()
defer sl.mu.Unlock()
sl.flushBufferUnsafe()
}
// flushBufferUnsafe flushes buffer without acquiring lock (must be called with lock held)
func (sl *SecurityLogger) flushBufferUnsafe() {
for _, event := range sl.buffer {
if err := sl.writeEvent(event); err != nil {
log.Printf("[ERROR] Failed to write security event: %v", err)
}
}
sl.buffer = sl.buffer[:0]
sl.lastFlush = time.Now()
// Reset timer if not closed
if !sl.closed && sl.flushTimer != nil {
sl.flushTimer.Stop()
sl.flushTimer.Reset(30 * time.Second)
}
}
// shouldLogLevel checks if the event should be logged based on the configured level
func (sl *SecurityLogger) shouldLogLevel(eventLevel string) bool {
levels := map[string]int{
"NONE": 0,
"ERROR": 1,
"WARNING": 2,
"INFO": 3,
"DEBUG": 4,
}
configLevel := levels[sl.config.Level]
eventLvl, exists := levels[eventLevel]
if !exists {
eventLvl = 2 // Default to WARNING
}
return eventLvl <= configLevel
}
// Close closes the security logger
func (sl *SecurityLogger) Close() error {
sl.mu.Lock()
defer sl.mu.Unlock()
if sl.closed {
return nil
}
// Stop flush timer
if sl.flushTimer != nil {
sl.flushTimer.Stop()
}
// Flush remaining events
sl.flushBufferUnsafe()
// Close file
if sl.file != nil {
err := sl.file.Close()
sl.closed = true
return err
}
sl.closed = true
return nil
}
// min returns the minimum of two integers
func min(a, b int) int {
if a < b {
return a
}
return b
}

View File

@@ -7,10 +7,12 @@ import (
"io"
"os"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/Fimeg/RedFlag/aggregator/pkg/common"
"github.com/Fimeg/RedFlag/aggregator-agent/internal/common"
"github.com/Fimeg/RedFlag/aggregator-agent/internal/version"
)
// AgentFileInventory represents all files associated with an agent installation
@@ -26,14 +28,14 @@ type AgentFileInventory struct {
// MigrationDetection represents the result of migration detection
type MigrationDetection struct {
CurrentAgentVersion string `json:"current_agent_version"`
CurrentConfigVersion int `json:"current_config_version"`
RequiresMigration bool `json:"requires_migration"`
RequiredMigrations []string `json:"required_migrations"`
MissingSecurityFeatures []string `json:"missing_security_features"`
CurrentAgentVersion string `json:"current_agent_version"`
CurrentConfigVersion int `json:"current_config_version"`
RequiresMigration bool `json:"requires_migration"`
RequiredMigrations []string `json:"required_migrations"`
MissingSecurityFeatures []string `json:"missing_security_features"`
Inventory *AgentFileInventory `json:"inventory"`
DockerDetection *DockerDetection `json:"docker_detection,omitempty"`
DetectionTime time.Time `json:"detection_time"`
DockerDetection *DockerDetection `json:"docker_detection,omitempty"`
DetectionTime time.Time `json:"detection_time"`
}
// SecurityFeature represents a security feature that may be missing
@@ -59,8 +61,8 @@ func NewFileDetectionConfig() *FileDetectionConfig {
OldConfigPath: "/etc/aggregator",
OldStatePath: "/var/lib/aggregator",
NewConfigPath: "/etc/redflag",
NewStatePath: "/var/lib/redflag",
BackupDirPattern: "/etc/redflag.backup.%s",
NewStatePath: "/var/lib/redflag-agent",
BackupDirPattern: "/var/lib/redflag-agent/migration_backups_%s",
}
}
@@ -155,15 +157,15 @@ func scanAgentFiles(config *FileDetectionConfig) (*AgentFileInventory, error) {
// Categorize files
for _, file := range files {
switch {
case containsAny(file.Path, filePatterns["config"]):
case ContainsAny(file.Path, filePatterns["config"]):
inventory.ConfigFiles = append(inventory.ConfigFiles, file)
case containsAny(file.Path, filePatterns["state"]):
case ContainsAny(file.Path, filePatterns["state"]):
inventory.StateFiles = append(inventory.StateFiles, file)
case containsAny(file.Path, filePatterns["binary"]):
case ContainsAny(file.Path, filePatterns["binary"]):
inventory.BinaryFiles = append(inventory.BinaryFiles, file)
case containsAny(file.Path, filePatterns["log"]):
case ContainsAny(file.Path, filePatterns["log"]):
inventory.LogFiles = append(inventory.LogFiles, file)
case containsAny(file.Path, filePatterns["certificate"]):
case ContainsAny(file.Path, filePatterns["certificate"]):
inventory.CertificateFiles = append(inventory.CertificateFiles, file)
}
}
@@ -280,32 +282,98 @@ func readConfigVersion(configPath string) (string, int, error) {
func determineRequiredMigrations(detection *MigrationDetection, config *FileDetectionConfig) []string {
var migrations []string
// Check migration state to skip already completed migrations
configPath := filepath.Join(config.NewConfigPath, "config.json")
stateManager := NewStateManager(configPath)
// Check if old directories exist
for _, oldDir := range detection.Inventory.OldDirectoryPaths {
if _, err := os.Stat(oldDir); err == nil {
migrations = append(migrations, "directory_migration")
// Check if directory migration was already completed
completed, err := stateManager.IsMigrationCompleted("directory_migration")
if err == nil && !completed {
migrations = append(migrations, "directory_migration")
}
break
}
}
// Check config version compatibility
if detection.CurrentConfigVersion < 4 {
migrations = append(migrations, "config_migration")
// Check for legacy installation (old path migration)
hasLegacyDirs := false
for _, oldDir := range detection.Inventory.OldDirectoryPaths {
if _, err := os.Stat(oldDir); err == nil {
hasLegacyDirs = true
break
}
}
// Check if Docker secrets migration is needed (v5)
if detection.CurrentConfigVersion < 5 {
migrations = append(migrations, "config_v5_migration")
// Legacy migration: always migrate if old directories exist
if hasLegacyDirs {
if detection.CurrentConfigVersion < 4 {
// Check if already completed
completed, err := stateManager.IsMigrationCompleted("config_migration")
if err == nil && !completed {
migrations = append(migrations, "config_migration")
}
}
// Check if Docker secrets migration is needed (v5)
if detection.CurrentConfigVersion < 5 {
// Check if already completed
completed, err := stateManager.IsMigrationCompleted("config_v5_migration")
if err == nil && !completed {
migrations = append(migrations, "config_v5_migration")
}
}
} else {
// Version-based migration: compare current config version with expected
// This handles upgrades for agents already in correct location
// Use version package for single source of truth
agentVersion := version.Version
expectedConfigVersionStr := version.ExtractConfigVersionFromAgent(agentVersion)
// Convert to int for comparison (e.g., "6" -> 6)
expectedConfigVersion := 6 // Default fallback
if expectedConfigInt, err := strconv.Atoi(expectedConfigVersionStr); err == nil {
expectedConfigVersion = expectedConfigInt
}
// If config file exists but version is old, migrate
if detection.CurrentConfigVersion < expectedConfigVersion {
if detection.CurrentConfigVersion < 4 {
// Check if already completed
completed, err := stateManager.IsMigrationCompleted("config_migration")
if err == nil && !completed {
migrations = append(migrations, "config_migration")
}
}
// Check if Docker secrets migration is needed (v5)
if detection.CurrentConfigVersion < 5 {
// Check if already completed
completed, err := stateManager.IsMigrationCompleted("config_v5_migration")
if err == nil && !completed {
migrations = append(migrations, "config_v5_migration")
}
}
}
}
// Check if Docker secrets migration is needed
if detection.DockerDetection != nil && detection.DockerDetection.MigrateToSecrets {
migrations = append(migrations, "docker_secrets_migration")
// Check if already completed
completed, err := stateManager.IsMigrationCompleted("docker_secrets_migration")
if err == nil && !completed {
migrations = append(migrations, "docker_secrets_migration")
}
}
// Check if security features need to be applied
if len(detection.MissingSecurityFeatures) > 0 {
migrations = append(migrations, "security_hardening")
// Check if already completed
completed, err := stateManager.IsMigrationCompleted("security_hardening")
if err == nil && !completed {
migrations = append(migrations, "security_hardening")
}
}
return migrations
@@ -389,7 +457,7 @@ func calculateFileChecksum(filePath string) (string, error) {
return fmt.Sprintf("%x", hash.Sum(nil)), nil
}
func containsAny(path string, patterns []string) bool {
func ContainsAny(path string, patterns []string) bool {
for _, pattern := range patterns {
if matched, _ := filepath.Match(pattern, filepath.Base(path)); matched {
return true
@@ -404,7 +472,7 @@ func isRequiredFile(path string, patterns map[string][]string) bool {
}
func shouldMigrateFile(path string, patterns map[string][]string) bool {
return !containsAny(path, []string{"*.log", "*.tmp"})
return !ContainsAny(path, []string{"*.log", "*.tmp"})
}
func getFileDescription(path string) string {
@@ -444,4 +512,4 @@ func detectBinaryVersion(binaryPath string) string {
// This would involve reading binary headers or executing with --version flag
// For now, return empty
return ""
}
}

View File

@@ -15,7 +15,7 @@ import (
"strings"
"time"
"github.com/Fimeg/RedFlag/aggregator/pkg/common"
"github.com/Fimeg/RedFlag/aggregator-agent/internal/common"
)
// DockerDetection represents Docker secrets detection results

View File

@@ -8,7 +8,7 @@ import (
"strings"
"time"
"github.com/Fimeg/RedFlag/aggregator/pkg/common"
"github.com/Fimeg/RedFlag/aggregator-agent/internal/common"
)
// DockerSecretsExecutor handles the execution of Docker secrets migration

View File

@@ -7,7 +7,10 @@ import (
"strings"
"time"
"github.com/Fimeg/RedFlag/aggregator/pkg/common"
"github.com/Fimeg/RedFlag/aggregator-agent/internal/common"
"github.com/Fimeg/RedFlag/aggregator-agent/internal/event"
"github.com/Fimeg/RedFlag/aggregator-agent/internal/models"
"github.com/google/uuid"
)
// MigrationPlan represents a complete migration plan
@@ -36,15 +39,60 @@ type MigrationResult struct {
// MigrationExecutor handles the execution of migration plans
type MigrationExecutor struct {
plan *MigrationPlan
result *MigrationResult
plan *MigrationPlan
result *MigrationResult
eventBuffer *event.Buffer
agentID uuid.UUID
stateManager *StateManager
}
// NewMigrationExecutor creates a new migration executor
func NewMigrationExecutor(plan *MigrationPlan) *MigrationExecutor {
func NewMigrationExecutor(plan *MigrationPlan, configPath string) *MigrationExecutor {
return &MigrationExecutor{
plan: plan,
result: &MigrationResult{},
plan: plan,
result: &MigrationResult{},
stateManager: NewStateManager(configPath),
}
}
// NewMigrationExecutorWithEvents creates a new migration executor with event buffering
func NewMigrationExecutorWithEvents(plan *MigrationPlan, eventBuffer *event.Buffer, agentID uuid.UUID, configPath string) *MigrationExecutor {
return &MigrationExecutor{
plan: plan,
result: &MigrationResult{},
eventBuffer: eventBuffer,
agentID: agentID,
stateManager: NewStateManager(configPath),
}
}
// bufferEvent buffers a migration failure event
func (e *MigrationExecutor) bufferEvent(eventSubtype, severity, component, message string, metadata map[string]interface{}) {
if e.eventBuffer == nil {
return // Event buffering not enabled
}
// Use agent ID if available
var agentIDPtr *uuid.UUID
if e.agentID != uuid.Nil {
agentIDPtr = &e.agentID
}
event := &models.SystemEvent{
ID: uuid.New(),
AgentID: agentIDPtr,
EventType: "migration_failure",
EventSubtype: eventSubtype,
Severity: severity,
Component: component,
Message: message,
Metadata: metadata,
CreatedAt: time.Now(),
}
// Buffer the event (best effort)
if err := e.eventBuffer.BufferEvent(event); err != nil {
fmt.Printf("Warning: Failed to buffer migration event: %v\n", err)
}
}
@@ -58,6 +106,13 @@ func (e *MigrationExecutor) ExecuteMigration() (*MigrationResult, error) {
// Phase 1: Create backups
if err := e.createBackups(); err != nil {
e.bufferEvent("backup_creation_failure", "error", "migration_executor",
fmt.Sprintf("Backup creation failed: %v", err),
map[string]interface{}{
"error": err.Error(),
"backup_path": e.plan.BackupPath,
"phase": "backup_creation",
})
return e.completeMigration(false, fmt.Errorf("backup creation failed: %w", err))
}
e.result.AppliedChanges = append(e.result.AppliedChanges, "Created backups at "+e.plan.BackupPath)
@@ -65,30 +120,69 @@ func (e *MigrationExecutor) ExecuteMigration() (*MigrationResult, error) {
// Phase 2: Directory migration
if contains(e.plan.Detection.RequiredMigrations, "directory_migration") {
if err := e.migrateDirectories(); err != nil {
e.bufferEvent("directory_migration_failure", "error", "migration_executor",
fmt.Sprintf("Directory migration failed: %v", err),
map[string]interface{}{
"error": err.Error(),
"phase": "directory_migration",
})
return e.completeMigration(false, fmt.Errorf("directory migration failed: %w", err))
}
e.result.AppliedChanges = append(e.result.AppliedChanges, "Migrated directories")
// Mark directory migration as completed
if err := e.stateManager.MarkMigrationCompleted("directory_migration", e.plan.BackupPath, e.plan.TargetVersion); err != nil {
fmt.Printf("[MIGRATION] Warning: Failed to mark directory migration as completed: %v\n", err)
}
}
// Phase 3: Configuration migration
if contains(e.plan.Detection.RequiredMigrations, "config_migration") {
if err := e.migrateConfiguration(); err != nil {
e.bufferEvent("configuration_migration_failure", "error", "migration_executor",
fmt.Sprintf("Configuration migration failed: %v", err),
map[string]interface{}{
"error": err.Error(),
"phase": "configuration_migration",
})
return e.completeMigration(false, fmt.Errorf("configuration migration failed: %w", err))
}
e.result.AppliedChanges = append(e.result.AppliedChanges, "Migrated configuration")
// Mark configuration migration as completed
if err := e.stateManager.MarkMigrationCompleted("config_migration", e.plan.BackupPath, e.plan.TargetVersion); err != nil {
fmt.Printf("[MIGRATION] Warning: Failed to mark configuration migration as completed: %v\n", err)
}
}
// Phase 4: Docker secrets migration (if available)
if contains(e.plan.Detection.RequiredMigrations, "docker_secrets_migration") {
if e.plan.Detection.DockerDetection == nil {
e.bufferEvent("docker_migration_failure", "error", "migration_executor",
"Docker secrets migration requested but detection data missing",
map[string]interface{}{
"error": "missing detection data",
"phase": "docker_secrets_migration",
})
return e.completeMigration(false, fmt.Errorf("docker secrets migration requested but detection data missing"))
}
dockerExecutor := NewDockerSecretsExecutor(e.plan.Detection.DockerDetection, e.plan.Config)
if err := dockerExecutor.ExecuteDockerSecretsMigration(); err != nil {
e.bufferEvent("docker_migration_failure", "error", "migration_executor",
fmt.Sprintf("Docker secrets migration failed: %v", err),
map[string]interface{}{
"error": err.Error(),
"phase": "docker_secrets_migration",
})
return e.completeMigration(false, fmt.Errorf("docker secrets migration failed: %w", err))
}
e.result.AppliedChanges = append(e.result.AppliedChanges, "Migrated to Docker secrets")
// Mark docker secrets migration as completed
if err := e.stateManager.MarkMigrationCompleted("docker_secrets_migration", e.plan.BackupPath, e.plan.TargetVersion); err != nil {
fmt.Printf("[MIGRATION] Warning: Failed to mark docker secrets migration as completed: %v\n", err)
}
}
// Phase 5: Security hardening
@@ -98,11 +192,22 @@ func (e *MigrationExecutor) ExecuteMigration() (*MigrationResult, error) {
fmt.Sprintf("Security hardening incomplete: %v", err))
} else {
e.result.AppliedChanges = append(e.result.AppliedChanges, "Applied security hardening")
// Mark security hardening as completed
if err := e.stateManager.MarkMigrationCompleted("security_hardening", e.plan.BackupPath, e.plan.TargetVersion); err != nil {
fmt.Printf("[MIGRATION] Warning: Failed to mark security hardening as completed: %v\n", err)
}
}
}
// Phase 6: Validation
if err := e.validateMigration(); err != nil {
e.bufferEvent("migration_validation_failure", "error", "migration_executor",
fmt.Sprintf("Migration validation failed: %v", err),
map[string]interface{}{
"error": err.Error(),
"phase": "validation",
})
return e.completeMigration(false, fmt.Errorf("migration validation failed: %w", err))
}
@@ -252,27 +357,78 @@ func (e *MigrationExecutor) collectAllFiles() []common.AgentFile {
}
func (e *MigrationExecutor) backupFile(file common.AgentFile, backupPath string) error {
relPath, err := filepath.Rel(e.plan.Config.OldConfigPath, file.Path)
if err != nil {
// Try relative to old state path
relPath, err = filepath.Rel(e.plan.Config.OldStatePath, file.Path)
if err != nil {
relPath = filepath.Base(file.Path)
// Check if file exists before attempting backup
if _, err := os.Stat(file.Path); err != nil {
if os.IsNotExist(err) {
// File doesn't exist, log and skip
fmt.Printf("[MIGRATION] [agent] [migration_executor] File does not exist, skipping backup: %s\n", file.Path)
e.bufferEvent("backup_file_missing", "warning", "migration_executor",
fmt.Sprintf("File does not exist, skipping backup: %s", file.Path),
map[string]interface{}{
"file_path": file.Path,
"phase": "backup",
})
return nil
}
return fmt.Errorf("migration: failed to stat file %s: %w", file.Path, err)
}
// Clean paths to fix trailing slash issues
cleanOldConfig := filepath.Clean(e.plan.Config.OldConfigPath)
cleanOldState := filepath.Clean(e.plan.Config.OldStatePath)
cleanPath := filepath.Clean(file.Path)
var relPath string
var err error
// Try to get relative path based on expected file location
// If file is under old config path, use that as base
if strings.HasPrefix(cleanPath, cleanOldConfig) {
relPath, err = filepath.Rel(cleanOldConfig, cleanPath)
if err != nil || strings.Contains(relPath, "..") {
// Fallback to filename if path traversal or error
relPath = filepath.Base(cleanPath)
}
} else if strings.HasPrefix(cleanPath, cleanOldState) {
relPath, err = filepath.Rel(cleanOldState, cleanPath)
if err != nil || strings.Contains(relPath, "..") {
// Fallback to filename if path traversal or error
relPath = filepath.Base(cleanPath)
}
} else {
// File is not in expected old locations - use just the filename
// This happens for files already in the new location
relPath = filepath.Base(cleanPath)
// Add subdirectory based on file type to avoid collisions
switch {
case ContainsAny(cleanPath, []string{"config.json", "agent.key", "server.key", "ca.crt"}):
relPath = filepath.Join("config", relPath)
case ContainsAny(cleanPath, []string{
"pending_acks.json", "public_key.cache", "last_scan.json", "metrics.json"}):
relPath = filepath.Join("state", relPath)
}
}
backupFilePath := filepath.Join(backupPath, relPath)
// Ensure backup path is clean
cleanBackupPath := filepath.Clean(backupPath)
backupFilePath := filepath.Join(cleanBackupPath, relPath)
backupFilePath = filepath.Clean(backupFilePath)
backupDir := filepath.Dir(backupFilePath)
// Final safety check
if strings.Contains(backupFilePath, "..") {
return fmt.Errorf("migration: backup path contains parent directory reference: %s", backupFilePath)
}
if err := os.MkdirAll(backupDir, 0755); err != nil {
return fmt.Errorf("failed to create backup directory: %w", err)
return fmt.Errorf("migration: failed to create backup directory %s: %w", backupDir, err)
}
// Copy file to backup location
if err := copyFile(file.Path, backupFilePath); err != nil {
return fmt.Errorf("failed to copy file to backup: %w", err)
if err := copyFile(cleanPath, backupFilePath); err != nil {
return fmt.Errorf("migration: failed to copy file to backup: %w", err)
}
fmt.Printf("[MIGRATION] [agent] [migration_executor] Successfully backed up: %s\n", cleanPath)
return nil
}
@@ -349,6 +505,11 @@ func (e *MigrationExecutor) completeMigration(success bool, err error) (*Migrati
if e.result.RollbackAvailable {
fmt.Printf("[MIGRATION] 📦 Rollback available at: %s\n", e.result.BackupPath)
}
// Clean up old directories after successful migration
if err := e.stateManager.CleanupOldDirectories(); err != nil {
fmt.Printf("[MIGRATION] Warning: Failed to cleanup old directories: %v\n", err)
}
} else {
fmt.Printf("[MIGRATION] ❌ Migration failed after %v\n", e.result.Duration)
if len(e.result.Errors) > 0 {

View File

@@ -0,0 +1,79 @@
package models
import (
"time"
"github.com/google/uuid"
)
// SystemEvent represents a unified event log entry for all system events
// This is a copy of the server model to avoid circular dependencies
type SystemEvent struct {
ID uuid.UUID `json:"id" db:"id"`
AgentID *uuid.UUID `json:"agent_id,omitempty" db:"agent_id"` // Pointer to allow NULL for server events
EventType string `json:"event_type" db:"event_type"` // e.g., 'agent_update', 'agent_startup', 'server_build'
EventSubtype string `json:"event_subtype" db:"event_subtype"` // e.g., 'success', 'failed', 'info', 'warning'
Severity string `json:"severity" db:"severity"` // 'info', 'warning', 'error', 'critical'
Component string `json:"component" db:"component"` // 'agent', 'server', 'build', 'download', 'config', etc.
Message string `json:"message" db:"message"`
Metadata map[string]interface{} `json:"metadata,omitempty" db:"metadata"` // JSONB for structured data
CreatedAt time.Time `json:"created_at" db:"created_at"`
}
// Event type constants
const (
EventTypeAgentStartup = "agent_startup"
EventTypeAgentRegistration = "agent_registration"
EventTypeAgentCheckIn = "agent_checkin"
EventTypeAgentScan = "agent_scan"
EventTypeAgentUpdate = "agent_update"
EventTypeAgentConfig = "agent_config"
EventTypeAgentMigration = "agent_migration"
EventTypeAgentShutdown = "agent_shutdown"
EventTypeServerBuild = "server_build"
EventTypeServerDownload = "server_download"
EventTypeServerConfig = "server_config"
EventTypeServerAuth = "server_auth"
EventTypeDownload = "download"
EventTypeMigration = "migration"
EventTypeError = "error"
)
// Event subtype constants
const (
SubtypeSuccess = "success"
SubtypeFailed = "failed"
SubtypeInfo = "info"
SubtypeWarning = "warning"
SubtypeCritical = "critical"
SubtypeDownloadFailed = "download_failed"
SubtypeValidationFailed = "validation_failed"
SubtypeConfigCorrupted = "config_corrupted"
SubtypeMigrationNeeded = "migration_needed"
SubtypePanicRecovered = "panic_recovered"
SubtypeTokenExpired = "token_expired"
SubtypeNetworkTimeout = "network_timeout"
SubtypePermissionDenied = "permission_denied"
SubtypeServiceUnavailable = "service_unavailable"
)
// Severity constants
const (
SeverityInfo = "info"
SeverityWarning = "warning"
SeverityError = "error"
SeverityCritical = "critical"
)
// Component constants
const (
ComponentAgent = "agent"
ComponentServer = "server"
ComponentBuild = "build"
ComponentDownload = "download"
ComponentConfig = "config"
ComponentDatabase = "database"
ComponentNetwork = "network"
ComponentSecurity = "security"
ComponentMigration = "migration"
)

View File

@@ -0,0 +1,104 @@
package orchestrator
import (
"crypto/ed25519"
"fmt"
"log"
"github.com/Fimeg/RedFlag/aggregator-agent/internal/client"
"github.com/Fimeg/RedFlag/aggregator-agent/internal/config"
"github.com/Fimeg/RedFlag/aggregator-agent/internal/crypto"
"github.com/Fimeg/RedFlag/aggregator-agent/internal/logging"
"github.com/google/uuid"
)
// CommandHandler handles command processing with signature verification
type CommandHandler struct {
verifier *crypto.CommandVerifier
securityLogger *logging.SecurityLogger
serverPublicKey ed25519.PublicKey
logger *log.Logger
}
// CommandSigningConfig holds configuration for command signing
type CommandSigningConfig struct {
Enabled bool `json:"enabled" env:"REDFLAG_AGENT_COMMAND_SIGNING_ENABLED" default:"true"`
EnforcementMode string `json:"enforcement_mode" env:"REDFLAG_AGENT_COMMAND_ENFORCEMENT_MODE" default:"strict"` // strict, warning, disabled
}
// NewCommandHandler creates a new command handler
func NewCommandHandler(cfg *config.Config, securityLogger *logging.SecurityLogger, logger *log.Logger) (*CommandHandler, error) {
handler := &CommandHandler{
securityLogger: securityLogger,
logger: logger,
verifier: crypto.NewCommandVerifier(),
}
// Load server public key if command signing is enabled
if cfg.CommandSigning.Enabled {
publicKey, err := crypto.LoadCachedPublicKey()
if err != nil {
// Try to fetch from server if not cached
publicKey, err = crypto.GetPublicKey(cfg.ServerURL)
if err != nil {
return nil, fmt.Errorf("failed to load server public key: %w", err)
}
}
handler.serverPublicKey = publicKey
}
return handler, nil
}
// ProcessCommand processes a command with signature verification
func (h *CommandHandler) ProcessCommand(cmd client.CommandItem, cfg *config.Config, agentID uuid.UUID) error {
config := cfg.CommandSigning
if config.Enabled {
if config.EnforcementMode == "strict" {
// Strict mode: Verification is required
if cmd.Signature == "" {
err := fmt.Errorf("strict enforcement enabled but command not signed")
h.securityLogger.LogCommandVerificationFailure(cmd.ID, "missing signature")
return fmt.Errorf("command verification failed: %w", err)
}
err := h.verifier.VerifyCommand(cmd, h.serverPublicKey)
if err != nil {
h.securityLogger.LogCommandVerificationFailure(cmd.ID, err.Error())
return fmt.Errorf("command verification failed: %w", err)
}
h.securityLogger.LogCommandVerificationSuccess(cmd.ID)
} else if config.EnforcementMode == "warning" {
// Warning mode: Log failures but allow execution
if cmd.Signature != "" {
err := h.verifier.VerifyCommand(cmd, h.serverPublicKey)
if err != nil {
h.logger.Printf("[WARNING] Command verification failed but allowed in warning mode: %v", err)
h.securityLogger.LogCommandVerificationFailure(cmd.ID, err.Error())
} else {
h.securityLogger.LogCommandVerificationSuccess(cmd.ID)
}
} else {
h.logger.Printf("[WARNING] Command not signed but allowed in warning mode")
}
}
// disabled mode: Skip verification entirely
} else if cmd.Signature != "" {
// Signing is disabled but command has signature - log info
h.logger.Printf("[INFO] Command has signature but signing is disabled")
}
return nil
}
// UpdateServerPublicKey updates the cached server public key
func (h *CommandHandler) UpdateServerPublicKey(serverURL string) error {
publicKey, err := crypto.FetchAndCacheServerPublicKey(serverURL)
if err != nil {
return fmt.Errorf("failed to update server public key: %w", err)
}
h.serverPublicKey = publicKey
h.logger.Printf("Server public key updated successfully")
return nil
}

View File

@@ -9,6 +9,8 @@ import (
"github.com/Fimeg/RedFlag/aggregator-agent/internal/circuitbreaker"
"github.com/Fimeg/RedFlag/aggregator-agent/internal/client"
"github.com/Fimeg/RedFlag/aggregator-agent/internal/event"
"github.com/Fimeg/RedFlag/aggregator-agent/internal/models"
)
// Scanner represents a generic update scanner
@@ -42,8 +44,9 @@ type ScanResult struct {
// Orchestrator manages and coordinates multiple scanners
type Orchestrator struct {
scanners map[string]*ScannerConfig
mu sync.RWMutex
scanners map[string]*ScannerConfig
eventBuffer *event.Buffer
mu sync.RWMutex
}
// NewOrchestrator creates a new scanner orchestrator
@@ -53,6 +56,14 @@ func NewOrchestrator() *Orchestrator {
}
}
// NewOrchestratorWithEvents creates a new scanner orchestrator with event buffering
func NewOrchestratorWithEvents(buffer *event.Buffer) *Orchestrator {
return &Orchestrator{
scanners: make(map[string]*ScannerConfig),
eventBuffer: buffer,
}
}
// RegisterScanner adds a scanner to the orchestrator
func (o *Orchestrator) RegisterScanner(name string, scanner Scanner, cb *circuitbreaker.CircuitBreaker, timeout time.Duration, enabled bool) {
o.mu.Lock()
@@ -135,6 +146,27 @@ func (o *Orchestrator) executeScan(ctx context.Context, name string, cfg *Scanne
if !cfg.Enabled {
result.Status = "disabled"
log.Printf("[%s] Scanner disabled via configuration", name)
// Buffer disabled event if event buffer is available
if o.eventBuffer != nil {
event := &models.SystemEvent{
EventType: "agent_scan",
EventSubtype: "skipped",
Severity: "info",
Component: "scanner",
Message: fmt.Sprintf("Scanner %s is disabled via configuration", name),
Metadata: map[string]interface{}{
"scanner_name": name,
"status": "disabled",
"reason": "configuration",
},
CreatedAt: time.Now(),
}
if err := o.eventBuffer.BufferEvent(event); err != nil {
log.Printf("Warning: Failed to buffer scanner disabled event: %v", err)
}
}
return result
}
@@ -142,6 +174,27 @@ func (o *Orchestrator) executeScan(ctx context.Context, name string, cfg *Scanne
if !cfg.Scanner.IsAvailable() {
result.Status = "unavailable"
log.Printf("[%s] Scanner not available on this system", name)
// Buffer unavailable event if event buffer is available
if o.eventBuffer != nil {
event := &models.SystemEvent{
EventType: "agent_scan",
EventSubtype: "skipped",
Severity: "info",
Component: "scanner",
Message: fmt.Sprintf("Scanner %s is not available on this system", name),
Metadata: map[string]interface{}{
"scanner_name": name,
"status": "unavailable",
"reason": "system_incompatible",
},
CreatedAt: time.Now(),
}
if err := o.eventBuffer.BufferEvent(event); err != nil {
log.Printf("Warning: Failed to buffer scanner unavailable event: %v", err)
}
}
return result
}
@@ -185,12 +238,55 @@ func (o *Orchestrator) executeScan(ctx context.Context, name string, cfg *Scanne
result.Error = err
result.Status = "failed"
log.Printf("[%s] Scan failed: %v", name, err)
// Buffer event if event buffer is available
if o.eventBuffer != nil {
event := &models.SystemEvent{
EventType: "agent_scan",
EventSubtype: "failed",
Severity: "error",
Component: "scanner",
Message: fmt.Sprintf("Scanner %s failed: %v", name, err),
Metadata: map[string]interface{}{
"scanner_name": name,
"error_type": "scan_failed",
"error_details": err.Error(),
"duration_ms": result.Duration.Milliseconds(),
},
CreatedAt: time.Now(),
}
if err := o.eventBuffer.BufferEvent(event); err != nil {
log.Printf("Warning: Failed to buffer scanner failure event: %v", err)
}
}
return result
}
result.Updates = updates
result.Status = "success"
log.Printf("[%s] Scan completed: found %d updates (took %v)", name, len(updates), result.Duration)
// Buffer success event if event buffer is available
if o.eventBuffer != nil {
event := &models.SystemEvent{
EventType: "agent_scan",
EventSubtype: "completed",
Severity: "info",
Component: "scanner",
Message: fmt.Sprintf("Scanner %s completed successfully", name),
Metadata: map[string]interface{}{
"scanner_name": name,
"updates_found": len(updates),
"duration_ms": result.Duration.Milliseconds(),
"status": "success",
},
CreatedAt: time.Now(),
}
if err := o.eventBuffer.BufferEvent(event); err != nil {
log.Printf("Warning: Failed to buffer scanner success event: %v", err)
}
}
return result
}

View File

@@ -536,7 +536,7 @@ func (s *redflagService) renewTokenIfNeeded(apiClient *client.Client, err error)
tempClient := client.NewClient(s.agent.ServerURL, "")
// Attempt to renew access token using refresh token
if err := tempClient.RenewToken(s.agent.AgentID, s.agent.RefreshToken); err != nil {
if err := tempClient.RenewToken(s.agent.AgentID, s.agent.RefreshToken, AgentVersion); err != nil {
log.Printf("❌ Refresh token renewal failed: %v", err)
elog.Error(1, fmt.Sprintf("Refresh token renewal failed: %v", err))
log.Printf("💡 Refresh token may be expired (>90 days) - re-registration required")

View File

@@ -0,0 +1,123 @@
package version
import (
"fmt"
"runtime"
"strings"
"time"
)
// Build-time injected version information
// These will be set via ldflags during build (SERVER AUTHORITY)
var (
// Version is the agent version (e.g., "0.1.23.6")
// Injected by server during build: -ldflags "-X github.com/redflag/redflag/internal/version.Version=0.1.23.6"
Version = "dev"
// ConfigVersion is the config schema version this agent expects (e.g., "6")
// Injected by server during build: -ldflags "-X github.com/redflag/redflag/internal/version.ConfigVersion=6"
ConfigVersion = "dev"
// BuildTime is when this binary was built
BuildTime = "unknown"
// GitCommit is the git commit hash
GitCommit = "unknown"
// GoVersion is the Go version used to build
GoVersion = runtime.Version()
)
// ExtractConfigVersionFromAgent extracts the config version from the agent version
// Agent version format: v0.1.23.6 where the fourth octet (.6) maps to config version
// This provides the traditional mapping when only agent version is available
func ExtractConfigVersionFromAgent(agentVer string) string {
// Strip 'v' prefix if present
cleanVersion := strings.TrimPrefix(agentVer, "v")
// Split version parts
parts := strings.Split(cleanVersion, ".")
if len(parts) == 4 {
// Return the fourth octet as the config version
// v0.1.23.6 → "6"
return parts[3]
}
// If we have a build-time injected ConfigVersion, use it
if ConfigVersion != "dev" {
return ConfigVersion
}
// Default fallback
return "6"
}
// Info holds complete version information
type Info struct {
AgentVersion string `json:"agent_version"`
ConfigVersion string `json:"config_version"`
BuildTime string `json:"build_time"`
GitCommit string `json:"git_commit"`
GoVersion string `json:"go_version"`
BuildTimestamp int64 `json:"build_timestamp"`
}
// GetInfo returns complete version information
func GetInfo() Info {
// Parse build time if available
timestamp := time.Now().Unix()
if BuildTime != "unknown" {
if t, err := time.Parse(time.RFC3339, BuildTime); err == nil {
timestamp = t.Unix()
}
}
return Info{
AgentVersion: Version,
ConfigVersion: ConfigVersion,
BuildTime: BuildTime,
GitCommit: GitCommit,
GoVersion: GoVersion,
BuildTimestamp: timestamp,
}
}
// String returns a human-readable version string
func String() string {
return fmt.Sprintf("RedFlag Agent v%s (config v%s)", Version, ConfigVersion)
}
// FullString returns detailed version information
func FullString() string {
info := GetInfo()
return fmt.Sprintf("RedFlag Agent v%s (config v%s)\n"+
"Built: %s\n"+
"Commit: %s\n"+
"Go: %s",
info.AgentVersion,
info.ConfigVersion,
info.BuildTime,
info.GitCommit,
info.GoVersion)
}
// CheckCompatible checks if the given config version is compatible with this agent
func CheckCompatible(configVer string) error {
if configVer == "" {
return fmt.Errorf("config version is empty")
}
// For now, require exact match
// In the future, we may support backward/forward compatibility matrices
if configVer != ConfigVersion {
return fmt.Errorf("config version mismatch: agent expects v%s, config has v%s",
ConfigVersion, configVer)
}
return nil
}
// Valid checks if version information is properly set
func Valid() bool {
return Version != "dev" && ConfigVersion != "dev"
}