Complete RedFlag codebase with two major security audit implementations.
== A-1: Ed25519 Key Rotation Support ==
Server:
- SignCommand sets SignedAt timestamp and KeyID on every signature
- signing_keys database table (migration 020) for multi-key rotation
- InitializePrimaryKey registers active key at startup
- /api/v1/public-keys endpoint for rotation-aware agents
- SigningKeyQueries for key lifecycle management
Agent:
- Key-ID-aware verification via CheckKeyRotation
- FetchAndCacheAllActiveKeys for rotation pre-caching
- Cache metadata with TTL and staleness fallback
- SecurityLogger events for key rotation and command signing
== A-2: Replay Attack Fixes (F-1 through F-7) ==
F-5 CRITICAL - RetryCommand now signs via signAndCreateCommand
F-1 HIGH - v3 format: "{agent_id}:{cmd_id}:{type}:{hash}:{ts}"
F-7 HIGH - Migration 026: expires_at column with partial index
F-6 HIGH - GetPendingCommands/GetStuckCommands filter by expires_at
F-2 HIGH - Agent-side executedIDs dedup map with cleanup
F-4 HIGH - commandMaxAge reduced from 24h to 4h
F-3 CRITICAL - Old-format commands rejected after 48h via CreatedAt
Verification fixes: migration idempotency (ETHOS #4), log format
compliance (ETHOS #1), stale comments updated.
All 24 tests passing. Docker --no-cache build verified.
See docs/ for full audit reports and deviation log (DEV-001 to DEV-019).
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
159 lines
5.4 KiB
Go
159 lines
5.4 KiB
Go
package handlers
|
|
|
|
import (
|
|
"log"
|
|
"net/http"
|
|
"time"
|
|
|
|
"github.com/Fimeg/RedFlag/aggregator-server/internal/database/queries"
|
|
"github.com/Fimeg/RedFlag/aggregator-server/internal/models"
|
|
"github.com/gin-gonic/gin"
|
|
"github.com/google/uuid"
|
|
)
|
|
|
|
// StorageMetricsHandler handles storage metrics endpoints
|
|
type StorageMetricsHandler struct {
|
|
queries *queries.StorageMetricsQueries
|
|
}
|
|
|
|
// NewStorageMetricsHandler creates a new storage metrics handler
|
|
func NewStorageMetricsHandler(queries *queries.StorageMetricsQueries) *StorageMetricsHandler {
|
|
return &StorageMetricsHandler{
|
|
queries: queries,
|
|
}
|
|
}
|
|
|
|
// ReportStorageMetrics handles POST /api/v1/agents/:id/storage-metrics
|
|
func (h *StorageMetricsHandler) ReportStorageMetrics(c *gin.Context) {
|
|
// Get agent ID from context (set by middleware)
|
|
agentID := c.MustGet("agent_id").(uuid.UUID)
|
|
|
|
// Parse request body
|
|
var req models.StorageMetricRequest
|
|
if err := c.ShouldBindJSON(&req); err != nil {
|
|
c.JSON(http.StatusBadRequest, gin.H{"error": "Invalid request body"})
|
|
return
|
|
}
|
|
|
|
// Validate agent ID matches
|
|
if req.AgentID != agentID {
|
|
c.JSON(http.StatusBadRequest, gin.H{"error": "Agent ID mismatch"})
|
|
return
|
|
}
|
|
|
|
// Insert storage metrics with error isolation
|
|
for _, metric := range req.Metrics {
|
|
dbMetric := models.StorageMetric{
|
|
ID: uuid.New(),
|
|
AgentID: req.AgentID,
|
|
Mountpoint: metric.Mountpoint,
|
|
Device: metric.Device,
|
|
DiskType: metric.DiskType,
|
|
Filesystem: metric.Filesystem,
|
|
TotalBytes: metric.TotalBytes,
|
|
UsedBytes: metric.UsedBytes,
|
|
AvailableBytes: metric.AvailableBytes,
|
|
UsedPercent: metric.UsedPercent,
|
|
Severity: metric.Severity,
|
|
Metadata: metric.Metadata,
|
|
CreatedAt: time.Now(),
|
|
}
|
|
|
|
if err := h.queries.InsertStorageMetric(c.Request.Context(), dbMetric); err != nil {
|
|
log.Printf("[ERROR] Failed to insert storage metric for agent %s: %v\n", agentID, err)
|
|
c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to insert storage metric"})
|
|
return
|
|
}
|
|
}
|
|
|
|
c.JSON(http.StatusOK, gin.H{
|
|
"status": "success",
|
|
"message": "Storage metrics reported successfully",
|
|
})
|
|
}
|
|
|
|
// StorageMetricResponse represents the response format for storage metrics
|
|
type StorageMetricResponse struct {
|
|
ID uuid.UUID `json:"id"`
|
|
AgentID uuid.UUID `json:"agent_id"`
|
|
Mountpoint string `json:"mountpoint"`
|
|
Device string `json:"device"`
|
|
DiskType string `json:"disk_type"`
|
|
Filesystem string `json:"filesystem"`
|
|
Total int64 `json:"total"` // Changed from total_bytes
|
|
Used int64 `json:"used"` // Changed from used_bytes
|
|
Available int64 `json:"available"` // Changed from available_bytes
|
|
UsedPercent float64 `json:"used_percent"`
|
|
Severity string `json:"severity"`
|
|
IsRoot bool `json:"is_root"`
|
|
IsLargest bool `json:"is_largest"`
|
|
Metadata map[string]interface{} `json:"metadata,omitempty"`
|
|
CreatedAt time.Time `json:"created_at"`
|
|
}
|
|
|
|
// GetStorageMetrics handles GET /api/v1/agents/:id/storage-metrics
|
|
func (h *StorageMetricsHandler) GetStorageMetrics(c *gin.Context) {
|
|
// Get agent ID from URL parameter (this is a dashboard endpoint, not agent endpoint)
|
|
agentIDStr := c.Param("id")
|
|
agentID, err := uuid.Parse(agentIDStr)
|
|
if err != nil {
|
|
log.Printf("[ERROR] Invalid agent ID %s: %v\n", agentIDStr, err)
|
|
c.JSON(http.StatusBadRequest, gin.H{"error": "Invalid agent ID"})
|
|
return
|
|
}
|
|
|
|
// Get the latest storage metrics (one per mountpoint)
|
|
latestMetrics, err := h.queries.GetLatestStorageMetrics(c.Request.Context(), agentID)
|
|
if err != nil {
|
|
log.Printf("[ERROR] Failed to retrieve storage metrics for agent %s: %v\n", agentID, err)
|
|
c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to retrieve storage metrics"})
|
|
return
|
|
}
|
|
|
|
// Transform to response format
|
|
var responseMetrics []StorageMetricResponse
|
|
for _, metric := range latestMetrics {
|
|
// Check if this is the root mountpoint
|
|
isRoot := metric.Mountpoint == "/"
|
|
|
|
// Create response with fields matching frontend expectations
|
|
responseMetric := StorageMetricResponse{
|
|
ID: metric.ID,
|
|
AgentID: metric.AgentID,
|
|
Mountpoint: metric.Mountpoint,
|
|
Device: metric.Device,
|
|
DiskType: metric.DiskType,
|
|
Filesystem: metric.Filesystem,
|
|
Total: metric.TotalBytes, // Map total_bytes -> total
|
|
Used: metric.UsedBytes, // Map used_bytes -> used
|
|
Available: metric.AvailableBytes, // Map available_bytes -> available
|
|
UsedPercent: metric.UsedPercent,
|
|
Severity: metric.Severity,
|
|
IsRoot: isRoot,
|
|
IsLargest: false, // Will be determined below
|
|
Metadata: metric.Metadata,
|
|
CreatedAt: metric.CreatedAt,
|
|
}
|
|
responseMetrics = append(responseMetrics, responseMetric)
|
|
}
|
|
|
|
// Determine which disk is the largest
|
|
if len(responseMetrics) > 0 {
|
|
var maxSize int64
|
|
var maxIndex int
|
|
for i, metric := range responseMetrics {
|
|
if metric.Total > maxSize {
|
|
maxSize = metric.Total
|
|
maxIndex = i
|
|
}
|
|
}
|
|
// Mark the largest disk
|
|
responseMetrics[maxIndex].IsLargest = true
|
|
}
|
|
|
|
c.JSON(http.StatusOK, gin.H{
|
|
"metrics": responseMetrics,
|
|
"total": len(responseMetrics),
|
|
})
|
|
}
|