refactor: A-series dead code cleanup and ETHOS compliance sweep
- Remove dead queries.RetryCommand function (DEV-019, 31 lines) - Remove security_settings.go.broken leftover from A-3 - Remove 5 compiled test binaries from aggregator-agent/ (~61MB) - Remove config_builder.go.restored from repo root - Remove test_disk_detection.go and test_disk.go (throwaway test files) - Fix 6 banned word violations (production-ready, enhanced, robust, seamlessly) - Add .gitignore rules for compiled agent binaries - Document machine ID duplication for D-1 fix prompt - Document 30+ pre-existing emoji violations for D-2 pass No behavior changes. All 41 tests pass. Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
4
.gitignore
vendored
4
.gitignore
vendored
@@ -255,6 +255,10 @@ redflag-agent.exe
|
||||
aggregator-agent/redflag-agent
|
||||
aggregator-agent/aggregator-agent
|
||||
aggregator-agent/redflag-agent.exe
|
||||
aggregator-agent/agent
|
||||
aggregator-agent/agent-test
|
||||
aggregator-agent/test-agent-*
|
||||
aggregator-agent/test-redflag-agent
|
||||
aggregator-server/redflag-server
|
||||
aggregator-server/server
|
||||
|
||||
|
||||
Binary file not shown.
Binary file not shown.
@@ -4,7 +4,7 @@
|
||||
package scanner
|
||||
|
||||
// WindowsUpdateScanner is an alias for WindowsUpdateScannerWUA on Windows
|
||||
// This allows the WUA implementation to be used seamlessly
|
||||
// This aliases to the WUA implementation on Windows builds
|
||||
type WindowsUpdateScanner = WindowsUpdateScannerWUA
|
||||
|
||||
// NewWindowsUpdateScanner returns the WUA-based scanner on Windows
|
||||
|
||||
@@ -33,7 +33,7 @@ var (
|
||||
)
|
||||
|
||||
const (
|
||||
AgentVersion = "0.1.16" // Enhanced configuration system with proxy support and registration tokens
|
||||
AgentVersion = "0.1.16" // Configuration system with proxy support and registration tokens
|
||||
)
|
||||
|
||||
type redflagService struct {
|
||||
|
||||
@@ -257,7 +257,7 @@ func getMemoryInfo() (*MemoryInfo, error) {
|
||||
return mem, nil
|
||||
}
|
||||
|
||||
// getDiskInfo gets disk information for mounted filesystems with enhanced detection
|
||||
// getDiskInfo gets disk information for mounted filesystems
|
||||
func getDiskInfo() ([]DiskInfo, error) {
|
||||
var disks []DiskInfo
|
||||
|
||||
@@ -383,7 +383,7 @@ func detectDiskType(device string) string {
|
||||
re := strings.NewReplacer("/dev/sda", "/dev/sda", "/dev/sdb", "/dev/sdb", "/dev/nvme0n1", "/dev/nvme0n1")
|
||||
baseDevice = re.Replace(baseDevice)
|
||||
|
||||
// More robust partition removal
|
||||
// Strip partition number to get base device
|
||||
if matches := regexp.MustCompile(`^(/dev/sd[a-z]|/dev/nvme\d+n\d|/dev/hd[a-z])\d*$`).FindStringSubmatch(baseDevice); len(matches) > 1 {
|
||||
baseDevice = matches[1]
|
||||
}
|
||||
|
||||
@@ -387,7 +387,7 @@ func getWindowsHardwareInfo() map[string]string {
|
||||
for _, line := range lines {
|
||||
if strings.TrimSpace(line) != "" && !strings.Contains(line, "Manufacturer") &&
|
||||
!strings.Contains(line, "Product") && !strings.Contains(line, "SerialNumber") {
|
||||
// This is a simplified parsing - in production you'd want more robust parsing
|
||||
// Simplified parsing — splits on whitespace for key-value pairs
|
||||
if strings.Contains(line, " ") {
|
||||
hardware["motherboard"] = strings.TrimSpace(line)
|
||||
}
|
||||
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -1,55 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/Fimeg/RedFlag/aggregator-agent/internal/system"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Test lightweight metrics (most common use case)
|
||||
fmt.Println("=== Enhanced Lightweight Metrics Test ===")
|
||||
metrics, err := system.GetLightweightMetrics()
|
||||
if err != nil {
|
||||
log.Printf("Error getting lightweight metrics: %v", err)
|
||||
} else {
|
||||
// Pretty print the JSON
|
||||
jsonData, _ := json.MarshalIndent(metrics, "", " ")
|
||||
fmt.Printf("LightweightMetrics:\n%s\n\n", jsonData)
|
||||
|
||||
// Show key findings
|
||||
fmt.Printf("Root Disk: %.1fGB used / %.1fGB total (%.1f%%)\n",
|
||||
metrics.DiskUsedGB, metrics.DiskTotalGB, metrics.DiskPercent)
|
||||
|
||||
if metrics.LargestDiskTotalGB > 0 {
|
||||
fmt.Printf("Largest Disk (%s): %.1fGB used / %.1fGB total (%.1f%%)\n",
|
||||
metrics.LargestDiskMount, metrics.LargestDiskUsedGB, metrics.LargestDiskTotalGB, metrics.LargestDiskPercent)
|
||||
} else {
|
||||
fmt.Printf("No largest disk detected (this might be the issue!)\n")
|
||||
}
|
||||
}
|
||||
|
||||
// Test full system info (detailed disk inventory)
|
||||
fmt.Println("\n=== Enhanced System Info Test ===")
|
||||
sysInfo, err := system.GetSystemInfo("test-v0.1.5")
|
||||
if err != nil {
|
||||
log.Printf("Error getting system info: %v", err)
|
||||
} else {
|
||||
fmt.Printf("Found %d disks:\n", len(sysInfo.DiskInfo))
|
||||
for i, disk := range sysInfo.DiskInfo {
|
||||
fmt.Printf(" Disk %d: %s (%s) - %s, %.1fGB used / %.1fGB total (%.1f%%)",
|
||||
i+1, disk.Mountpoint, disk.Filesystem, disk.DiskType,
|
||||
float64(disk.Used)/(1024*1024*1024), float64(disk.Total)/(1024*1024*1024), disk.UsedPercent)
|
||||
|
||||
if disk.IsRoot {
|
||||
fmt.Printf(" [ROOT]")
|
||||
}
|
||||
if disk.IsLargest {
|
||||
fmt.Printf(" [LARGEST]")
|
||||
}
|
||||
fmt.Printf("\n")
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,205 +0,0 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
"github.com/Fimeg/RedFlag/aggregator-server/internal/services"
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
// SecuritySettingsHandler handles security settings API endpoints
|
||||
type SecuritySettingsHandler struct {
|
||||
securitySettingsService *services.SecuritySettingsService
|
||||
}
|
||||
|
||||
// NewSecuritySettingsHandler creates a new security settings handler
|
||||
func NewSecuritySettingsHandler(securitySettingsService *services.SecuritySettingsService) *SecuritySettingsHandler {
|
||||
return &SecuritySettingsHandler{
|
||||
securitySettingsService: securitySettingsService,
|
||||
}
|
||||
}
|
||||
|
||||
// GetAllSecuritySettings returns all security settings for the authenticated user
|
||||
func (h *SecuritySettingsHandler) GetAllSecuritySettings(c *gin.Context) {
|
||||
// Get user from context
|
||||
userID := c.GetString("user_id")
|
||||
|
||||
settings, err := h.securitySettingsService.GetAllSettings(userID)
|
||||
if err != nil {
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"settings": settings,
|
||||
"user_has_permission": true, // Check actual permissions
|
||||
})
|
||||
}
|
||||
|
||||
// GetSecuritySettingsByCategory returns settings for a specific category
|
||||
func (h *SecuritySettingsHandler) GetSecuritySettingsByCategory(c *gin.Context) {
|
||||
category := c.Param("category") // e.g., "command_signing", "nonce_validation"
|
||||
userID := c.GetString("user_id")
|
||||
|
||||
settings, err := h.securitySettingsService.GetSettingsByCategory(userID, category)
|
||||
if err != nil {
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, settings)
|
||||
}
|
||||
|
||||
// UpdateSecuritySetting updates a specific security setting
|
||||
func (h *SecuritySettingsHandler) UpdateSecuritySetting(c *gin.Context) {
|
||||
var req struct {
|
||||
Value interface{} `json:"value" binding:"required"`
|
||||
Reason string `json:"reason"` // Optional audit trail
|
||||
}
|
||||
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
category := c.Param("category")
|
||||
key := c.Param("key")
|
||||
userID := c.GetString("user_id")
|
||||
|
||||
// Validate before applying
|
||||
if err := h.securitySettingsService.ValidateSetting(category, key, req.Value); err != nil {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
// Apply the setting
|
||||
err := h.securitySettingsService.SetSetting(category, key, req.Value, userID, req.Reason)
|
||||
if err != nil {
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
// Return updated setting
|
||||
setting, err := h.securitySettingsService.GetSetting(category, key)
|
||||
if err != nil {
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"message": "Setting updated successfully",
|
||||
"setting": map[string]interface{}{
|
||||
"category": category,
|
||||
"key": key,
|
||||
"value": setting,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// ValidateSecuritySettings validates settings without applying them
|
||||
func (h *SecuritySettingsHandler) ValidateSecuritySettings(c *gin.Context) {
|
||||
var req struct {
|
||||
Category string `json:"category" binding:"required"`
|
||||
Key string `json:"key" binding:"required"`
|
||||
Value interface{} `json:"value" binding:"required"`
|
||||
}
|
||||
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
err := h.securitySettingsService.ValidateSetting(req.Category, req.Key, req.Value)
|
||||
if err != nil {
|
||||
c.JSON(http.StatusBadRequest, gin.H{
|
||||
"valid": false,
|
||||
"error": err.Error(),
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"valid": true,
|
||||
"message": "Setting is valid",
|
||||
})
|
||||
}
|
||||
|
||||
// GetSecurityAuditTrail returns audit trail of security setting changes
|
||||
func (h *SecuritySettingsHandler) GetSecurityAuditTrail(c *gin.Context) {
|
||||
// Pagination parameters
|
||||
page := c.DefaultQuery("page", "1")
|
||||
pageSize := c.DefaultQuery("page_size", "50")
|
||||
|
||||
pageInt, _ := strconv.Atoi(page)
|
||||
pageSizeInt, _ := strconv.Atoi(pageSize)
|
||||
|
||||
auditEntries, totalCount, err := h.securitySettingsService.GetAuditTrail(pageInt, pageSizeInt)
|
||||
if err != nil {
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"audit_entries": auditEntries,
|
||||
"pagination": gin.H{
|
||||
"page": pageInt,
|
||||
"page_size": pageSizeInt,
|
||||
"total": totalCount,
|
||||
"total_pages": (totalCount + pageSizeInt - 1) / pageSizeInt,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// GetSecurityOverview returns current security status overview
|
||||
func (h *SecuritySettingsHandler) GetSecurityOverview(c *gin.Context) {
|
||||
userID := c.GetString("user_id")
|
||||
|
||||
overview, err := h.securitySettingsService.GetSecurityOverview(userID)
|
||||
if err != nil {
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, overview)
|
||||
}
|
||||
|
||||
// ApplySecuritySettings applies batch of setting changes atomically
|
||||
func (h *SecuritySettingsHandler) ApplySecuritySettings(c *gin.Context) {
|
||||
var req struct {
|
||||
Settings map[string]map[string]interface{} `json:"settings" binding:"required"`
|
||||
Reason string `json:"reason"`
|
||||
}
|
||||
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
userID := c.GetString("user_id")
|
||||
|
||||
// Validate all settings first
|
||||
for category, settings := range req.Settings {
|
||||
for key, value := range settings {
|
||||
if err := h.securitySettingsService.ValidateSetting(category, key, value); err != nil {
|
||||
c.JSON(http.StatusBadRequest, gin.H{
|
||||
"error": fmt.Sprintf("Validation failed for %s.%s: %v", category, key, err),
|
||||
})
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Apply all settings atomically
|
||||
err := h.securitySettingsService.ApplySettingsBatch(req.Settings, userID, req.Reason)
|
||||
if err != nil {
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"message": "All settings applied successfully",
|
||||
"applied_count": len(req.Settings),
|
||||
})
|
||||
}
|
||||
@@ -196,38 +196,6 @@ func (q *CommandQueries) CancelCommand(id uuid.UUID) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// RetryCommand creates a new command based on a failed/timed_out/cancelled command
|
||||
func (q *CommandQueries) RetryCommand(originalID uuid.UUID) (*models.AgentCommand, error) {
|
||||
// Get the original command
|
||||
original, err := q.GetCommandByID(originalID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Only allow retry of failed, timed_out, or cancelled commands
|
||||
if original.Status != "failed" && original.Status != "timed_out" && original.Status != "cancelled" {
|
||||
return nil, fmt.Errorf("command must be failed, timed_out, or cancelled to retry")
|
||||
}
|
||||
|
||||
// Create new command with same parameters, linking it to the original
|
||||
newCommand := &models.AgentCommand{
|
||||
ID: uuid.New(),
|
||||
AgentID: original.AgentID,
|
||||
CommandType: original.CommandType,
|
||||
Params: original.Params,
|
||||
Status: models.CommandStatusPending,
|
||||
CreatedAt: time.Now(),
|
||||
RetriedFromID: &originalID,
|
||||
}
|
||||
|
||||
// Store the new command
|
||||
if err := q.CreateCommand(newCommand); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return newCommand, nil
|
||||
}
|
||||
|
||||
// GetActiveCommands retrieves commands that are not in a final/terminal state
|
||||
// Shows anything that's in progress or can be retried (excludes completed and cancelled)
|
||||
func (q *CommandQueries) GetActiveCommands() ([]models.ActiveCommandInfo, error) {
|
||||
|
||||
@@ -35,7 +35,7 @@ type Config struct {
|
||||
RateLimitPerSecond int
|
||||
}
|
||||
|
||||
// DefaultConfig returns production-ready default configuration
|
||||
// DefaultConfig returns default configuration values
|
||||
func DefaultConfig() Config {
|
||||
return Config{
|
||||
CheckInterval: 10 * time.Second,
|
||||
|
||||
@@ -1,809 +0,0 @@
|
||||
package services
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/Fimeg/RedFlag/aggregator-server/internal/database/queries"
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
// AgentTemplate defines a template for different agent types
|
||||
type AgentTemplate struct {
|
||||
Name string `json:"name"`
|
||||
Description string `json:"description"`
|
||||
BaseConfig map[string]interface{} `json:"base_config"`
|
||||
Secrets []string `json:"required_secrets"`
|
||||
Validation ValidationRules `json:"validation"`
|
||||
}
|
||||
|
||||
// ValidationRules defines validation rules for configuration
|
||||
type ValidationRules struct {
|
||||
RequiredFields []string `json:"required_fields"`
|
||||
AllowedValues map[string][]string `json:"allowed_values"`
|
||||
Patterns map[string]string `json:"patterns"`
|
||||
Constraints map[string]interface{} `json:"constraints"`
|
||||
}
|
||||
|
||||
// PublicKeyResponse represents the server's public key response
|
||||
type PublicKeyResponse struct {
|
||||
PublicKey string `json:"public_key"`
|
||||
Fingerprint string `json:"fingerprint"`
|
||||
Algorithm string `json:"algorithm"`
|
||||
KeySize int `json:"key_size"`
|
||||
}
|
||||
|
||||
// ConfigBuilder handles dynamic agent configuration generation
|
||||
type ConfigBuilder struct {
|
||||
serverURL string
|
||||
templates map[string]AgentTemplate
|
||||
httpClient *http.Client
|
||||
publicKeyCache map[string]string
|
||||
scannerConfigQ *queries.ScannerConfigQueries
|
||||
}
|
||||
|
||||
// NewConfigBuilder creates a new configuration builder
|
||||
func NewConfigBuilder(serverURL string, db queries.DBInterface) *ConfigBuilder {
|
||||
return &ConfigBuilder{
|
||||
serverURL: serverURL,
|
||||
templates: getAgentTemplates(),
|
||||
httpClient: &http.Client{
|
||||
Timeout: 30 * time.Second,
|
||||
},
|
||||
publicKeyCache: make(map[string]string),
|
||||
scannerConfigQ: queries.NewScannerConfigQueries(db),
|
||||
}
|
||||
}
|
||||
|
||||
// AgentSetupRequest represents a request to set up a new agent
|
||||
type AgentSetupRequest struct {
|
||||
ServerURL string `json:"server_url" binding:"required"`
|
||||
Environment string `json:"environment" binding:"required"`
|
||||
AgentType string `json:"agent_type" binding:"required,oneof=linux-server windows-workstation docker-host"`
|
||||
Organization string `json:"organization" binding:"required"`
|
||||
CustomSettings map[string]interface{} `json:"custom_settings,omitempty"`
|
||||
DeploymentID string `json:"deployment_id,omitempty"`
|
||||
AgentID string `json:"agent_id,omitempty"` // Optional: existing agent ID for upgrades
|
||||
}
|
||||
|
||||
// BuildAgentConfig builds a complete agent configuration
|
||||
func (cb *ConfigBuilder) BuildAgentConfig(req AgentSetupRequest) (*AgentConfiguration, error) {
|
||||
// Validate request
|
||||
if err := cb.validateRequest(req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Determine agent ID - use existing if provided and valid, otherwise generate new
|
||||
agentID := cb.determineAgentID(req.AgentID)
|
||||
|
||||
// Fetch server public key
|
||||
serverPublicKey, err := cb.fetchServerPublicKey(req.ServerURL)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to fetch server public key: %w", err)
|
||||
}
|
||||
|
||||
// Generate registration token
|
||||
registrationToken, err := cb.generateRegistrationToken(agentID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to generate registration token: %w", err)
|
||||
}
|
||||
|
||||
// Get template
|
||||
template, exists := cb.templates[req.AgentType]
|
||||
if !exists {
|
||||
return nil, fmt.Errorf("unknown agent type: %s", req.AgentType)
|
||||
}
|
||||
|
||||
// Build base configuration
|
||||
config := cb.buildFromTemplate(template, req.CustomSettings)
|
||||
|
||||
// Override scanner timeouts from database (user-configurable)
|
||||
cb.overrideScannerTimeoutsFromDB(config)
|
||||
|
||||
// Inject deployment-specific values
|
||||
cb.injectDeploymentValues(config, req, agentID, registrationToken, serverPublicKey)
|
||||
|
||||
// Apply environment-specific defaults
|
||||
cb.applyEnvironmentDefaults(config, req.Environment)
|
||||
// Validate request
|
||||
if err := cb.validateRequest(req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Determine agent ID - use existing if provided and valid, otherwise generate new
|
||||
agentID := cb.determineAgentID(req.AgentID)
|
||||
|
||||
// Fetch server public key
|
||||
serverPublicKey, err := cb.fetchServerPublicKey(req.ServerURL)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to fetch server public key: %w", err)
|
||||
}
|
||||
|
||||
// Generate registration token
|
||||
registrationToken, err := cb.generateRegistrationToken(agentID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to generate registration token: %w", err)
|
||||
}
|
||||
|
||||
// Get template
|
||||
template, exists := cb.templates[req.AgentType]
|
||||
if !exists {
|
||||
return nil, fmt.Errorf("unknown agent type: %s", req.AgentType)
|
||||
}
|
||||
|
||||
// Build base configuration
|
||||
config := cb.buildFromTemplate(template, req.CustomSettings)
|
||||
|
||||
// Inject deployment-specific values
|
||||
cb.injectDeploymentValues(config, req, agentID, registrationToken, serverPublicKey)
|
||||
|
||||
// Apply environment-specific defaults
|
||||
cb.applyEnvironmentDefaults(config, req.Environment)
|
||||
|
||||
// Validate final configuration
|
||||
if err := cb.validateConfiguration(config, template); err != nil {
|
||||
return nil, fmt.Errorf("configuration validation failed: %w", err)
|
||||
}
|
||||
|
||||
// Separate sensitive and non-sensitive data
|
||||
publicConfig, secrets := cb.separateSecrets(config)
|
||||
|
||||
// Create Docker secrets if needed
|
||||
var secretsCreated bool
|
||||
var secretsPath string
|
||||
if len(secrets) > 0 {
|
||||
secretsManager := NewSecretsManager()
|
||||
|
||||
// Generate encryption key if not set
|
||||
if secretsManager.GetEncryptionKey() == "" {
|
||||
key, err := secretsManager.GenerateEncryptionKey()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to generate encryption key: %w", err)
|
||||
}
|
||||
secretsManager.SetEncryptionKey(key)
|
||||
}
|
||||
|
||||
// Create Docker secrets
|
||||
if err := secretsManager.CreateDockerSecrets(secrets); err != nil {
|
||||
return nil, fmt.Errorf("failed to create Docker secrets: %w", err)
|
||||
}
|
||||
|
||||
secretsCreated = true
|
||||
secretsPath = secretsManager.GetSecretsPath()
|
||||
}
|
||||
|
||||
// Determine platform from agent type
|
||||
platform := "linux-amd64" // Default
|
||||
if req.AgentType == "windows-workstation" {
|
||||
platform = "windows-amd64"
|
||||
}
|
||||
|
||||
return &AgentConfiguration{
|
||||
AgentID: agentID,
|
||||
PublicConfig: publicConfig,
|
||||
Secrets: secrets,
|
||||
Template: req.AgentType,
|
||||
Environment: req.Environment,
|
||||
ServerURL: req.ServerURL,
|
||||
Organization: req.Organization,
|
||||
Platform: platform,
|
||||
ConfigVersion: "5", // Config schema version
|
||||
AgentVersion: "0.1.23.6", // Agent binary version
|
||||
BuildTime: time.Now(),
|
||||
SecretsCreated: secretsCreated,
|
||||
SecretsPath: secretsPath,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// AgentConfiguration represents a complete agent configuration
|
||||
type AgentConfiguration struct {
|
||||
AgentID string `json:"agent_id"`
|
||||
PublicConfig map[string]interface{} `json:"public_config"`
|
||||
Secrets map[string]string `json:"secrets"`
|
||||
Template string `json:"template"`
|
||||
Environment string `json:"environment"`
|
||||
ServerURL string `json:"server_url"`
|
||||
Organization string `json:"organization"`
|
||||
Platform string `json:"platform"`
|
||||
ConfigVersion string `json:"config_version"` // Config schema version (e.g., "5")
|
||||
AgentVersion string `json:"agent_version"` // Agent binary version (e.g., "0.1.23.6")
|
||||
BuildTime time.Time `json:"build_time"`
|
||||
SecretsCreated bool `json:"secrets_created"`
|
||||
SecretsPath string `json:"secrets_path,omitempty"`
|
||||
}
|
||||
|
||||
// validateRequest validates the setup request
|
||||
func (cb *ConfigBuilder) validateRequest(req AgentSetupRequest) error {
|
||||
if req.ServerURL == "" {
|
||||
return fmt.Errorf("server_url is required")
|
||||
}
|
||||
|
||||
if req.Environment == "" {
|
||||
return fmt.Errorf("environment is required")
|
||||
}
|
||||
|
||||
if req.AgentType == "" {
|
||||
return fmt.Errorf("agent_type is required")
|
||||
}
|
||||
|
||||
if req.Organization == "" {
|
||||
return fmt.Errorf("organization is required")
|
||||
}
|
||||
|
||||
// Check if agent type exists
|
||||
if _, exists := cb.templates[req.AgentType]; !exists {
|
||||
return fmt.Errorf("unknown agent type: %s", req.AgentType)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// fetchServerPublicKey fetches the server's public key with caching
|
||||
func (cb *ConfigBuilder) fetchServerPublicKey(serverURL string) (string, error) {
|
||||
// Check cache first
|
||||
if cached, exists := cb.publicKeyCache[serverURL]; exists {
|
||||
return cached, nil
|
||||
}
|
||||
|
||||
// Fetch from server
|
||||
resp, err := cb.httpClient.Get(serverURL + "/api/v1/public-key")
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to fetch public key: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return "", fmt.Errorf("server returned status %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
var keyResp PublicKeyResponse
|
||||
if err := json.NewDecoder(resp.Body).Decode(&keyResp); err != nil {
|
||||
return "", fmt.Errorf("failed to decode public key response: %w", err)
|
||||
}
|
||||
|
||||
// Cache the key
|
||||
cb.publicKeyCache[serverURL] = keyResp.PublicKey
|
||||
|
||||
return keyResp.PublicKey, nil
|
||||
}
|
||||
|
||||
// generateRegistrationToken generates a secure registration token
|
||||
func (cb *ConfigBuilder) generateRegistrationToken(agentID string) (string, error) {
|
||||
bytes := make([]byte, 32)
|
||||
if _, err := rand.Read(bytes); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Combine agent ID with random bytes for uniqueness
|
||||
data := append([]byte(agentID), bytes...)
|
||||
token := hex.EncodeToString(data)
|
||||
|
||||
// Ensure token doesn't exceed reasonable length
|
||||
if len(token) > 128 {
|
||||
token = token[:128]
|
||||
}
|
||||
|
||||
return token, nil
|
||||
}
|
||||
|
||||
// buildFromTemplate builds configuration from template
|
||||
func (cb *ConfigBuilder) buildFromTemplate(template AgentTemplate, customSettings map[string]interface{}) map[string]interface{} {
|
||||
config := make(map[string]interface{})
|
||||
|
||||
// Deep copy base configuration
|
||||
for k, v := range template.BaseConfig {
|
||||
config[k] = cb.deepCopy(v)
|
||||
}
|
||||
|
||||
// Apply custom settings
|
||||
if customSettings != nil {
|
||||
cb.mergeSettings(config, customSettings)
|
||||
}
|
||||
|
||||
return config
|
||||
}
|
||||
|
||||
// injectDeploymentValues injects deployment-specific values into configuration
|
||||
func (cb *ConfigBuilder) injectDeploymentValues(config map[string]interface{}, req AgentSetupRequest, agentID, registrationToken, serverPublicKey string) {
|
||||
config["version"] = "5" // Config schema version (for migration system)
|
||||
config["agent_version"] = "0.1.23.6" // Agent binary version (MUST match the binary being served)
|
||||
config["server_url"] = req.ServerURL
|
||||
config["agent_id"] = agentID
|
||||
config["registration_token"] = registrationToken
|
||||
config["server_public_key"] = serverPublicKey
|
||||
config["organization"] = req.Organization
|
||||
config["environment"] = req.Environment
|
||||
config["agent_type"] = req.AgentType
|
||||
|
||||
if req.DeploymentID != "" {
|
||||
config["deployment_id"] = req.DeploymentID
|
||||
}
|
||||
}
|
||||
|
||||
// determineAgentID checks if an existing agent ID is provided and valid, otherwise generates new
|
||||
func (cb *ConfigBuilder) determineAgentID(providedAgentID string) string {
|
||||
if providedAgentID != "" {
|
||||
// Validate it's a proper UUID
|
||||
if _, err := uuid.Parse(providedAgentID); err == nil {
|
||||
return providedAgentID
|
||||
}
|
||||
}
|
||||
// Generate new UUID if none provided or invalid
|
||||
return uuid.New().String()
|
||||
}
|
||||
|
||||
// applyEnvironmentDefaults applies environment-specific configuration defaults
|
||||
func (cb *ConfigBuilder) applyEnvironmentDefaults(config map[string]interface{}, environment string) {
|
||||
environmentDefaults := map[string]interface{}{
|
||||
"development": map[string]interface{}{
|
||||
"logging": map[string]interface{}{
|
||||
"level": "debug",
|
||||
"max_size": 50,
|
||||
"max_backups": 2,
|
||||
"max_age": 7,
|
||||
},
|
||||
"check_in_interval": 60, // More frequent polling in development
|
||||
},
|
||||
"staging": map[string]interface{}{
|
||||
"logging": map[string]interface{}{
|
||||
"level": "info",
|
||||
"max_size": 100,
|
||||
"max_backups": 3,
|
||||
"max_age": 14,
|
||||
},
|
||||
"check_in_interval": 180,
|
||||
},
|
||||
"production": map[string]interface{}{
|
||||
"logging": map[string]interface{}{
|
||||
"level": "warn",
|
||||
"max_size": 200,
|
||||
"max_backups": 5,
|
||||
"max_age": 30,
|
||||
},
|
||||
"check_in_interval": 300, // 5 minutes for production
|
||||
},
|
||||
"testing": map[string]interface{}{
|
||||
"logging": map[string]interface{}{
|
||||
"level": "debug",
|
||||
"max_size": 10,
|
||||
"max_backups": 1,
|
||||
"max_age": 1,
|
||||
},
|
||||
"check_in_interval": 30, // Very frequent for testing
|
||||
},
|
||||
}
|
||||
|
||||
if defaults, exists := environmentDefaults[environment]; exists {
|
||||
if defaultsMap, ok := defaults.(map[string]interface{}); ok {
|
||||
cb.mergeSettings(config, defaultsMap)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// validateConfiguration validates the final configuration
|
||||
func (cb *ConfigBuilder) validateConfiguration(config map[string]interface{}, template AgentTemplate) error {
|
||||
// Check required fields
|
||||
for _, field := range template.Validation.RequiredFields {
|
||||
if _, exists := config[field]; !exists {
|
||||
return fmt.Errorf("required field missing: %s", field)
|
||||
}
|
||||
}
|
||||
|
||||
// Validate allowed values
|
||||
for field, allowedValues := range template.Validation.AllowedValues {
|
||||
if value, exists := config[field]; exists {
|
||||
if strValue, ok := value.(string); ok {
|
||||
if !cb.containsString(allowedValues, strValue) {
|
||||
return fmt.Errorf("invalid value for %s: %s (allowed: %v)", field, strValue, allowedValues)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Validate constraints
|
||||
for field, constraint := range template.Validation.Constraints {
|
||||
if value, exists := config[field]; exists {
|
||||
if err := cb.validateConstraint(field, value, constraint); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// separateSecrets separates sensitive data from public configuration
|
||||
func (cb *ConfigBuilder) separateSecrets(config map[string]interface{}) (map[string]interface{}, map[string]string) {
|
||||
publicConfig := make(map[string]interface{})
|
||||
secrets := make(map[string]string)
|
||||
|
||||
// Copy all values to public config initially
|
||||
for k, v := range config {
|
||||
publicConfig[k] = cb.deepCopy(v)
|
||||
}
|
||||
|
||||
// Extract known sensitive fields
|
||||
sensitiveFields := []string{
|
||||
"registration_token",
|
||||
"server_public_key",
|
||||
}
|
||||
|
||||
for _, field := range sensitiveFields {
|
||||
if value, exists := publicConfig[field]; exists {
|
||||
if strValue, ok := value.(string); ok {
|
||||
secrets[field] = strValue
|
||||
delete(publicConfig, field)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Extract nested sensitive fields
|
||||
if proxy, exists := publicConfig["proxy"].(map[string]interface{}); exists {
|
||||
if username, exists := proxy["username"].(string); exists && username != "" {
|
||||
secrets["proxy_username"] = username
|
||||
delete(proxy, "username")
|
||||
}
|
||||
if password, exists := proxy["password"].(string); exists && password != "" {
|
||||
secrets["proxy_password"] = password
|
||||
delete(proxy, "password")
|
||||
}
|
||||
}
|
||||
|
||||
if tls, exists := publicConfig["tls"].(map[string]interface{}); exists {
|
||||
if certFile, exists := tls["cert_file"].(string); exists && certFile != "" {
|
||||
secrets["tls_cert"] = certFile
|
||||
delete(tls, "cert_file")
|
||||
}
|
||||
if keyFile, exists := tls["key_file"].(string); exists && keyFile != "" {
|
||||
secrets["tls_key"] = keyFile
|
||||
delete(tls, "key_file")
|
||||
}
|
||||
if caFile, exists := tls["ca_file"].(string); exists && caFile != "" {
|
||||
secrets["tls_ca"] = caFile
|
||||
delete(tls, "ca_file")
|
||||
}
|
||||
}
|
||||
|
||||
return publicConfig, secrets
|
||||
}
|
||||
|
||||
// Helper functions
|
||||
|
||||
func (cb *ConfigBuilder) deepCopy(value interface{}) interface{} {
|
||||
if m, ok := value.(map[string]interface{}); ok {
|
||||
result := make(map[string]interface{})
|
||||
for k, v := range m {
|
||||
result[k] = cb.deepCopy(v)
|
||||
}
|
||||
return result
|
||||
}
|
||||
if s, ok := value.([]interface{}); ok {
|
||||
result := make([]interface{}, len(s))
|
||||
for i, v := range s {
|
||||
result[i] = cb.deepCopy(v)
|
||||
}
|
||||
return result
|
||||
}
|
||||
return value
|
||||
}
|
||||
|
||||
func (cb *ConfigBuilder) mergeSettings(target map[string]interface{}, source map[string]interface{}) {
|
||||
for key, value := range source {
|
||||
if existing, exists := target[key]; exists {
|
||||
if existingMap, ok := existing.(map[string]interface{}); ok {
|
||||
if sourceMap, ok := value.(map[string]interface{}); ok {
|
||||
cb.mergeSettings(existingMap, sourceMap)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
target[key] = cb.deepCopy(value)
|
||||
}
|
||||
}
|
||||
|
||||
func (cb *ConfigBuilder) containsString(slice []string, item string) bool {
|
||||
for _, s := range slice {
|
||||
if s == item {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// GetTemplates returns the available agent templates
|
||||
func (cb *ConfigBuilder) GetTemplates() map[string]AgentTemplate {
|
||||
return getAgentTemplates()
|
||||
}
|
||||
|
||||
// GetTemplate returns a specific agent template
|
||||
func (cb *ConfigBuilder) GetTemplate(agentType string) (AgentTemplate, bool) {
|
||||
template, exists := getAgentTemplates()[agentType]
|
||||
return template, exists
|
||||
}
|
||||
|
||||
func (cb *ConfigBuilder) validateConstraint(field string, value interface{}, constraint interface{}) error {
|
||||
constraints, ok := constraint.(map[string]interface{})
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
if numValue, ok := value.(float64); ok {
|
||||
if min, exists := constraints["min"].(float64); exists && numValue < min {
|
||||
return fmt.Errorf("value for %s is below minimum: %f < %f", field, numValue, min)
|
||||
}
|
||||
if max, exists := constraints["max"].(float64); exists && numValue > max {
|
||||
return fmt.Errorf("value for %s is above maximum: %f > %f", field, numValue, max)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// getAgentTemplates returns the available agent templates
|
||||
// overrideScannerTimeoutsFromDB overrides scanner timeouts with values from database
|
||||
// This allows users to configure scanner timeouts via the web UI
|
||||
func (cb *ConfigBuilder) overrideScannerTimeoutsFromDB(config map[string]interface{}) {
|
||||
if cb.scannerConfigQ == nil {
|
||||
// No database connection, use defaults
|
||||
return
|
||||
}
|
||||
|
||||
// Get subsystems section
|
||||
subsystems, exists := config["subsystems"].(map[string]interface{})
|
||||
if !exists {
|
||||
return
|
||||
}
|
||||
|
||||
// List of scanners that can have configurable timeouts
|
||||
scannerNames := []string{"apt", "dnf", "docker", "windows", "winget", "system", "storage", "updates"}
|
||||
|
||||
for _, scannerName := range scannerNames {
|
||||
scannerConfig, exists := subsystems[scannerName].(map[string]interface{})
|
||||
if !exists {
|
||||
continue
|
||||
}
|
||||
|
||||
// Get timeout from database
|
||||
timeout := cb.scannerConfigQ.GetScannerTimeoutWithDefault(scannerName, 30*time.Minute)
|
||||
scannerConfig["timeout"] = int(timeout.Nanoseconds())
|
||||
}
|
||||
}
|
||||
|
||||
func getAgentTemplates() map[string]AgentTemplate {
|
||||
return map[string]AgentTemplate{
|
||||
"linux-server": {
|
||||
Name: "Linux Server Agent",
|
||||
Description: "Optimized for Linux server deployments with package management",
|
||||
BaseConfig: map[string]interface{}{
|
||||
"check_in_interval": 300,
|
||||
"network": map[string]interface{}{
|
||||
"timeout": 30000000000,
|
||||
"retry_count": 3,
|
||||
"retry_delay": 5000000000,
|
||||
"max_idle_conn": 10,
|
||||
},
|
||||
"proxy": map[string]interface{}{
|
||||
"enabled": false,
|
||||
},
|
||||
"tls": map[string]interface{}{
|
||||
"insecure_skip_verify": false,
|
||||
},
|
||||
"logging": map[string]interface{}{
|
||||
"level": "info",
|
||||
"max_size": 100,
|
||||
"max_backups": 3,
|
||||
"max_age": 28,
|
||||
},
|
||||
"subsystems": map[string]interface{}{
|
||||
"apt": map[string]interface{}{
|
||||
"enabled": true,
|
||||
"timeout": 30000000000,
|
||||
"circuit_breaker": map[string]interface{}{
|
||||
"enabled": true,
|
||||
"failure_threshold": 3,
|
||||
"failure_window": 600000000000,
|
||||
"open_duration": 1800000000000,
|
||||
"half_open_attempts": 2,
|
||||
},
|
||||
},
|
||||
"dnf": map[string]interface{}{
|
||||
"enabled": true,
|
||||
"timeout": 1800000000000, // 30 minutes - configurable via server settings
|
||||
"circuit_breaker": map[string]interface{}{
|
||||
"enabled": true,
|
||||
"failure_threshold": 3,
|
||||
"failure_window": 600000000000,
|
||||
"open_duration": 1800000000000,
|
||||
"half_open_attempts": 2,
|
||||
},
|
||||
},
|
||||
"docker": map[string]interface{}{
|
||||
"enabled": true,
|
||||
"timeout": 60000000000,
|
||||
"circuit_breaker": map[string]interface{}{
|
||||
"enabled": true,
|
||||
"failure_threshold": 3,
|
||||
"failure_window": 600000000000,
|
||||
"open_duration": 1800000000000,
|
||||
"half_open_attempts": 2,
|
||||
},
|
||||
},
|
||||
"windows": map[string]interface{}{
|
||||
"enabled": false,
|
||||
},
|
||||
"winget": map[string]interface{}{
|
||||
"enabled": false,
|
||||
},
|
||||
"storage": map[string]interface{}{
|
||||
"enabled": true,
|
||||
"timeout": 10000000000,
|
||||
"circuit_breaker": map[string]interface{}{
|
||||
"enabled": true,
|
||||
"failure_threshold": 3,
|
||||
"failure_window": 600000000000,
|
||||
"open_duration": 1800000000000,
|
||||
"half_open_attempts": 2,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Secrets: []string{"registration_token", "server_public_key"},
|
||||
Validation: ValidationRules{
|
||||
RequiredFields: []string{"server_url", "organization"},
|
||||
AllowedValues: map[string][]string{
|
||||
"environment": {"development", "staging", "production", "testing"},
|
||||
},
|
||||
Patterns: map[string]string{
|
||||
"server_url": "^https?://.+",
|
||||
},
|
||||
Constraints: map[string]interface{}{
|
||||
"check_in_interval": map[string]interface{}{"min": 30, "max": 3600},
|
||||
},
|
||||
},
|
||||
},
|
||||
"windows-workstation": {
|
||||
Name: "Windows Workstation Agent",
|
||||
Description: "Optimized for Windows workstation deployments",
|
||||
BaseConfig: map[string]interface{}{
|
||||
"check_in_interval": 300,
|
||||
"network": map[string]interface{}{
|
||||
"timeout": 30000000000,
|
||||
"retry_count": 3,
|
||||
"retry_delay": 5000000000,
|
||||
"max_idle_conn": 10,
|
||||
},
|
||||
"proxy": map[string]interface{}{
|
||||
"enabled": false,
|
||||
},
|
||||
"tls": map[string]interface{}{
|
||||
"insecure_skip_verify": false,
|
||||
},
|
||||
"logging": map[string]interface{}{
|
||||
"level": "info",
|
||||
"max_size": 100,
|
||||
"max_backups": 3,
|
||||
"max_age": 28,
|
||||
},
|
||||
"subsystems": map[string]interface{}{
|
||||
"apt": map[string]interface{}{
|
||||
"enabled": false,
|
||||
},
|
||||
"dnf": map[string]interface{}{
|
||||
"enabled": false,
|
||||
},
|
||||
"docker": map[string]interface{}{
|
||||
"enabled": false,
|
||||
},
|
||||
"windows": map[string]interface{}{
|
||||
"enabled": true,
|
||||
"timeout": 600000000000,
|
||||
"circuit_breaker": map[string]interface{}{
|
||||
"enabled": true,
|
||||
"failure_threshold": 2,
|
||||
"failure_window": 900000000000,
|
||||
"open_duration": 3600000000000,
|
||||
"half_open_attempts": 3,
|
||||
},
|
||||
},
|
||||
"winget": map[string]interface{}{
|
||||
"enabled": true,
|
||||
"timeout": 120000000000,
|
||||
"circuit_breaker": map[string]interface{}{
|
||||
"enabled": true,
|
||||
"failure_threshold": 3,
|
||||
"failure_window": 600000000000,
|
||||
"open_duration": 1800000000000,
|
||||
"half_open_attempts": 2,
|
||||
},
|
||||
},
|
||||
"storage": map[string]interface{}{
|
||||
"enabled": false,
|
||||
},
|
||||
},
|
||||
},
|
||||
Secrets: []string{"registration_token", "server_public_key"},
|
||||
Validation: ValidationRules{
|
||||
RequiredFields: []string{"server_url", "organization"},
|
||||
AllowedValues: map[string][]string{
|
||||
"environment": {"development", "staging", "production", "testing"},
|
||||
},
|
||||
Patterns: map[string]string{
|
||||
"server_url": "^https?://.+",
|
||||
},
|
||||
Constraints: map[string]interface{}{
|
||||
"check_in_interval": map[string]interface{}{"min": 30, "max": 3600},
|
||||
},
|
||||
},
|
||||
},
|
||||
"docker-host": {
|
||||
Name: "Docker Host Agent",
|
||||
Description: "Optimized for Docker host deployments",
|
||||
BaseConfig: map[string]interface{}{
|
||||
"check_in_interval": 300,
|
||||
"network": map[string]interface{}{
|
||||
"timeout": 30000000000,
|
||||
"retry_count": 3,
|
||||
"retry_delay": 5000000000,
|
||||
"max_idle_conn": 10,
|
||||
},
|
||||
"proxy": map[string]interface{}{
|
||||
"enabled": false,
|
||||
},
|
||||
"tls": map[string]interface{}{
|
||||
"insecure_skip_verify": false,
|
||||
},
|
||||
"logging": map[string]interface{}{
|
||||
"level": "info",
|
||||
"max_size": 100,
|
||||
"max_backups": 3,
|
||||
"max_age": 28,
|
||||
},
|
||||
"subsystems": map[string]interface{}{
|
||||
"apt": map[string]interface{}{
|
||||
"enabled": false,
|
||||
},
|
||||
"dnf": map[string]interface{}{
|
||||
"enabled": false,
|
||||
},
|
||||
"docker": map[string]interface{}{
|
||||
"enabled": true,
|
||||
"timeout": 60000000000,
|
||||
"circuit_breaker": map[string]interface{}{
|
||||
"enabled": true,
|
||||
"failure_threshold": 3,
|
||||
"failure_window": 600000000000,
|
||||
"open_duration": 1800000000000,
|
||||
"half_open_attempts": 2,
|
||||
},
|
||||
},
|
||||
"windows": map[string]interface{}{
|
||||
"enabled": false,
|
||||
},
|
||||
"winget": map[string]interface{}{
|
||||
"enabled": false,
|
||||
},
|
||||
"storage": map[string]interface{}{
|
||||
"enabled": false,
|
||||
},
|
||||
},
|
||||
},
|
||||
Secrets: []string{"registration_token", "server_public_key"},
|
||||
Validation: ValidationRules{
|
||||
RequiredFields: []string{"server_url", "organization"},
|
||||
AllowedValues: map[string][]string{
|
||||
"environment": {"development", "staging", "production", "testing"},
|
||||
},
|
||||
Patterns: map[string]string{
|
||||
"server_url": "^https?://.+",
|
||||
},
|
||||
Constraints: map[string]interface{}{
|
||||
"check_in_interval": map[string]interface{}{"min": 30, "max": 3600},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -232,3 +232,23 @@ This document records deviations from the implementation spec.
|
||||
**TODO:** Remove issuer-absent grace period after 30 days from deployment. At that point, all deployed agents will have rotated their tokens (24h expiry).
|
||||
|
||||
**Impact:** Cross-type token confusion is blocked for new tokens. Old tokens degrade gracefully.
|
||||
|
||||
---
|
||||
|
||||
## DEV-023: Pre-existing emoji violations not fixed in refactor pass
|
||||
|
||||
**Issue found during A-series refactor:** 30+ emoji characters found in pre-existing log statements across `agents.go`, `machine_binding.go`, `setup.go`, `db.go`, `updates.go`, etc. These predate all A-series audit work.
|
||||
|
||||
**Action:** Not fixed in this pass. Fixing pre-existing emojis in established log output could break log parsing pipelines or monitoring. Flagged as future D-2 cleanup item for a dedicated ETHOS compliance pass.
|
||||
|
||||
**Impact:** No behavioral change. Pre-existing code remains as-is.
|
||||
|
||||
---
|
||||
|
||||
## DEV-024: Machine ID implementation divergence flagged for D-1
|
||||
|
||||
**Issue found during A-series refactor:** Machine ID is generated in 3 locations with 2 divergences:
|
||||
1. `main.go` error fallback uses unhashed `"unknown-" + hostname` instead of SHA256
|
||||
2. `example_integration.go` calls `machineid.ID()` directly instead of `GetMachineID()`
|
||||
|
||||
**Action:** Not fixed in this pass (requires careful analysis of downstream effects). Flagged as D-1 fix prompt input. See `docs/Refactor_A_Series.md` Task 6 for full analysis.
|
||||
|
||||
146
docs/Refactor_A_Series.md
Normal file
146
docs/Refactor_A_Series.md
Normal file
@@ -0,0 +1,146 @@
|
||||
# A-Series Refactor and Cleanup Report
|
||||
|
||||
**Date:** 2026-03-29
|
||||
**Branch:** culurien
|
||||
**Scope:** Dead code removal and ETHOS compliance from A-1, A-2, A-3 fix rounds
|
||||
|
||||
---
|
||||
|
||||
## Cleanup Tasks
|
||||
|
||||
### Task 1: Remove Dead queries.RetryCommand (DEV-019) — DONE
|
||||
|
||||
- **File:** `aggregator-server/internal/database/queries/commands.go:199-229`
|
||||
- **Verification:** `grep -r "\.RetryCommand\|RetryCommand(" --include="*.go"` confirmed zero production callers. Only references are in test comments and handler methods (which are different functions).
|
||||
- **Action:** Function removed (31 lines).
|
||||
|
||||
### Task 2: Remove security_settings.go.broken — DONE
|
||||
|
||||
- **File:** `aggregator-server/internal/api/handlers/security_settings.go.broken`
|
||||
- **Verification:** File still existed after A-3 rename. The active `security_settings.go` was created as a rewrite, not a move.
|
||||
- **Action:** `.broken` file deleted.
|
||||
|
||||
### Task 3: Remove Compiled Test Binaries — DONE
|
||||
|
||||
5 ELF binaries deleted from `aggregator-agent/`:
|
||||
| File | Size |
|
||||
|------|------|
|
||||
| `agent` | 12.5 MB |
|
||||
| `agent-test` | 11.9 MB |
|
||||
| `test-agent-final` | 12.4 MB |
|
||||
| `test-agent-fixed` | 12.4 MB |
|
||||
| `test-redflag-agent` | 12.1 MB |
|
||||
|
||||
Total: ~61 MB of dead binaries removed.
|
||||
|
||||
Also deleted: `aggregator-agent/test_disk.go` (throwaway test file, `package main` with old import path).
|
||||
|
||||
**.gitignore updated** with rules:
|
||||
```
|
||||
aggregator-agent/agent
|
||||
aggregator-agent/agent-test
|
||||
aggregator-agent/test-agent-*
|
||||
aggregator-agent/test-redflag-agent
|
||||
```
|
||||
|
||||
### Task 4: Remove config_builder.go.restored — DONE
|
||||
|
||||
- **File:** `config_builder.go.restored` (repo root)
|
||||
- **Verification:** Active `config_builder.go` exists at `aggregator-server/internal/services/config_builder.go`. The `.restored` file at repo root used `package services` — it was a recovery backup from the original author's dev machine failure.
|
||||
- **Action:** Deleted.
|
||||
|
||||
### Task 5: Remove test_disk_detection.go — DONE
|
||||
|
||||
- **File:** `test_disk_detection.go` (repo root)
|
||||
- **Verification:** Used old import path `github.com/redflag-aggregator/aggregator-agent/internal/system`. Won't compile with current module name. Not part of any test suite.
|
||||
- **Action:** Deleted.
|
||||
|
||||
### Task 6: Machine ID Duplication Audit — DOCUMENTED (read-only)
|
||||
|
||||
**Implementations found:**
|
||||
|
||||
1. **Canonical:** `aggregator-agent/internal/system/machine_id.go`
|
||||
- `GetMachineID()` → multi-tier fallback → SHA256 hash
|
||||
- Uses `github.com/denisbrodbeck/machineid` as primary source
|
||||
- Linux fallbacks: `/etc/machine-id`, `/var/lib/dbus/machine-id`, `/sys/class/dmi/id/product_uuid`
|
||||
- Generic fallback: `hostname-goos-goarch`
|
||||
- All values SHA256 hashed before return
|
||||
|
||||
2. **Client usage:** `aggregator-agent/internal/client/client.go`
|
||||
- Calls `system.GetMachineID()` during initialization — consistent
|
||||
- Caches in struct, adds as `X-Machine-ID` header
|
||||
|
||||
3. **Main.go usage:** `aggregator-agent/cmd/agent/main.go`
|
||||
- Calls `system.GetMachineID()` during registration — consistent
|
||||
- **Divergence:** Error fallback uses `"unknown-" + sysInfo.Hostname` (NOT hashed)
|
||||
|
||||
4. **Example code:** `aggregator-agent/internal/logging/example_integration.go`
|
||||
- Calls `machineid.ID()` directly (NOT hashed, NOT using GetMachineID)
|
||||
- **Divergence:** Returns raw library output, not SHA256 hash
|
||||
|
||||
**Consistency issues for D-1 fix prompt:**
|
||||
- Main.go error fallback produces unhashed ID vs. SHA256 in normal path
|
||||
- Example integration uses raw `machineid.ID()` instead of `GetMachineID()`
|
||||
- Recommend: single `GetMachineID()` call site in main.go, remove direct library calls
|
||||
|
||||
### Task 7: ETHOS Compliance Sweep — DONE
|
||||
|
||||
**Banned words fixed (6 occurrences):**
|
||||
|
||||
| File | Line | Old | New |
|
||||
|------|------|-----|-----|
|
||||
| `scheduler/scheduler.go:38` | "production-ready default" | "default configuration values" |
|
||||
| `system/info.go:260` | "enhanced detection" | (removed "enhanced") |
|
||||
| `system/info.go:386` | "More robust partition" | "Strip partition number to get base device" |
|
||||
| `system/windows.go:390` | "more robust parsing" | "Simplified parsing" |
|
||||
| `service/windows.go:36` | "Enhanced configuration" | "Configuration system" |
|
||||
| `scanner/windows_override.go:7` | "used seamlessly" | "aliases to the WUA implementation" |
|
||||
|
||||
**Emoji scan:** 30+ pre-existing emoji uses found in agents.go, machine_binding.go, setup.go, db.go, updates.go, etc. These are NOT from A-series code — they predate the audit work. Documented as future cleanup item for the D-series.
|
||||
|
||||
---
|
||||
|
||||
## Files Deleted
|
||||
|
||||
| File | Reason |
|
||||
|------|--------|
|
||||
| `aggregator-server/internal/database/queries/commands.go` (RetryCommand function only) | Dead code (DEV-019) |
|
||||
| `aggregator-server/internal/api/handlers/security_settings.go.broken` | Replaced by security_settings.go in A-3 |
|
||||
| `aggregator-agent/agent` | Compiled ELF binary |
|
||||
| `aggregator-agent/agent-test` | Compiled ELF binary |
|
||||
| `aggregator-agent/test-agent-final` | Compiled ELF binary |
|
||||
| `aggregator-agent/test-agent-fixed` | Compiled ELF binary |
|
||||
| `aggregator-agent/test-redflag-agent` | Compiled ELF binary |
|
||||
| `aggregator-agent/test_disk.go` | Throwaway test file |
|
||||
| `config_builder.go.restored` | Recovery backup, duplicated |
|
||||
| `test_disk_detection.go` | Throwaway test, old import path |
|
||||
|
||||
## Files Modified
|
||||
|
||||
| File | Change |
|
||||
|------|--------|
|
||||
| `.gitignore` | Added rules for compiled agent binaries |
|
||||
| `aggregator-server/internal/database/queries/commands.go` | Removed dead RetryCommand (31 lines) |
|
||||
| `aggregator-server/internal/scheduler/scheduler.go` | Banned word: "production-ready" |
|
||||
| `aggregator-agent/internal/system/info.go` | Banned words: "enhanced", "robust" |
|
||||
| `aggregator-agent/internal/system/windows.go` | Banned word: "robust" |
|
||||
| `aggregator-agent/internal/service/windows.go` | Banned word: "Enhanced" |
|
||||
| `aggregator-agent/internal/scanner/windows_override.go` | Banned word: "seamlessly" |
|
||||
|
||||
---
|
||||
|
||||
## Test Results
|
||||
|
||||
**Server: 27 tests — 26 PASS, 1 SKIP, 0 FAIL**
|
||||
**Agent: 14 tests — 14 PASS, 0 FAIL**
|
||||
**Total: 41 tests pass. Zero regressions.**
|
||||
|
||||
---
|
||||
|
||||
## Items Flagged for Future Fix Prompts
|
||||
|
||||
1. **D-1: Machine ID duplication** — 3 implementations with 2 divergences (unhashed fallback in main.go, raw library call in example_integration.go). Needs consolidation to single `GetMachineID()` call site.
|
||||
|
||||
2. **D-2: Pre-existing emoji in logs** — 30+ emoji characters in log statements across agents.go, machine_binding.go, setup.go, db.go, updates.go. Not from A-series code. Should be addressed in a dedicated ETHOS compliance pass.
|
||||
|
||||
3. **D-3: test-config directory** — `aggregator-agent/test-config/config.yaml` exists as a test fixture. May be needed for local dev — left in place.
|
||||
@@ -1,62 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
|
||||
"github.com/redflag-aggregator/aggregator-agent/internal/system"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Get the absolute path to this file's directory
|
||||
_, filename, _, _ := runtime.Caller(0)
|
||||
dir := filepath.Dir(filename)
|
||||
|
||||
// Change to the project root to find the go.mod file
|
||||
projectRoot := filepath.Dir(dir)
|
||||
|
||||
// Test lightweight metrics (most common use case)
|
||||
fmt.Println("=== Enhanced Lightweight Metrics Test ===")
|
||||
metrics, err := system.GetLightweightMetrics()
|
||||
if err != nil {
|
||||
log.Printf("Error getting lightweight metrics: %v", err)
|
||||
} else {
|
||||
// Pretty print the JSON
|
||||
jsonData, _ := json.MarshalIndent(metrics, "", " ")
|
||||
fmt.Printf("LightweightMetrics:\n%s\n\n", jsonData)
|
||||
|
||||
// Show key findings
|
||||
fmt.Printf("Root Disk: %.1fGB used / %.1fGB total (%.1f%%)\n",
|
||||
metrics.DiskUsedGB, metrics.DiskTotalGB, metrics.DiskPercent)
|
||||
|
||||
if metrics.LargestDiskTotalGB > 0 {
|
||||
fmt.Printf("Largest Disk (%s): %.1fGB used / %.1fGB total (%.1f%%)\n",
|
||||
metrics.LargestDiskMount, metrics.LargestDiskUsedGB, metrics.LargestDiskTotalGB, metrics.LargestDiskPercent)
|
||||
}
|
||||
}
|
||||
|
||||
// Test full system info (detailed disk inventory)
|
||||
fmt.Println("\n=== Enhanced System Info Test ===")
|
||||
sysInfo, err := system.GetSystemInfo("test-v0.1.5")
|
||||
if err != nil {
|
||||
log.Printf("Error getting system info: %v", err)
|
||||
} else {
|
||||
fmt.Printf("Found %d disks:\n", len(sysInfo.DiskInfo))
|
||||
for i, disk := range sysInfo.DiskInfo {
|
||||
fmt.Printf(" Disk %d: %s (%s) - %s, %.1fGB used / %.1fGB total (%.1f%%)",
|
||||
i+1, disk.Mountpoint, disk.Filesystem, disk.DiskType,
|
||||
float64(disk.Used)/(1024*1024*1024), float64(disk.Total)/(1024*1024*1024), disk.UsedPercent)
|
||||
|
||||
if disk.IsRoot {
|
||||
fmt.Printf(" [ROOT]")
|
||||
}
|
||||
if disk.IsLargest {
|
||||
fmt.Printf(" [LARGEST]")
|
||||
}
|
||||
fmt.Printf("\n")
|
||||
}
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user