Implement proper storage metrics (P0-009)\n\n- Add dedicated storage_metrics table\n- Create StorageMetricReport models with proper field names\n- Add ReportStorageMetrics to agent client\n- Update storage scanner to use new method\n- Implement server-side handlers and queries\n- Register new routes and update UI\n- Remove legacy Scan() method\n- Follow ETHOS principles: honest naming, clean architecture
This commit is contained in:
@@ -18,23 +18,59 @@ FROM golang:1.24-alpine AS agent-builder
|
||||
|
||||
WORKDIR /build
|
||||
|
||||
# Install git for module resolution
|
||||
# Install git for version detection
|
||||
RUN apk add --no-cache git
|
||||
|
||||
# Copy .git directory to get version info
|
||||
COPY .git/ ./.git/
|
||||
|
||||
# Generate semantic version from git (BASE_VERSION.COMMIT_COUNT)
|
||||
# Examples:
|
||||
# Tagged release: v0.1.26.0 → 0.1.26.0
|
||||
# 5 commits after tag: 0.1.26.5
|
||||
# No tags: 0.1.0.0
|
||||
RUN cd /build && \
|
||||
# Get latest tag or default to 0.1.0 \
|
||||
if git describe --tags --dirty --always >/dev/null 2>&1; then \
|
||||
LATEST_TAG=$(git describe --tags --dirty --always); \
|
||||
BASE_VERSION=$(echo "$LATEST_TAG" | sed 's/^v//' | cut -d. -f1-3); \
|
||||
else \
|
||||
BASE_VERSION="0.1.0"; \
|
||||
fi && \
|
||||
# Count commits since tag (0 if on tag) \
|
||||
COMMITS_SINCE=$(git rev-list $(git describe --tags --dirty --always 2>/dev/null)..HEAD 2>/dev/null | wc -l | tr -d ' ') && \
|
||||
if [ "$COMMITS_SINCE" = "" ] || [ "$COMMITS_SINCE" -eq 0 ]; then BUILD=0; else BUILD=$COMMITS_SINCE; fi && \
|
||||
# Write semantic version (base.commits) \
|
||||
VERSION="${BASE_VERSION}.${BUILD}" && \
|
||||
echo "Building agent version: $VERSION" && \
|
||||
echo "$VERSION" > /build/version.txt
|
||||
|
||||
# Copy agent source code
|
||||
COPY aggregator-agent/ ./
|
||||
|
||||
# Build for Linux amd64
|
||||
RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o binaries/linux-amd64/redflag-agent ./cmd/agent
|
||||
RUN VERSION=$(cat /build/version.txt) && \
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build \
|
||||
-ldflags "-X github.com/Fimeg/RedFlag/aggregator-agent/internal/version.Version=$VERSION" \
|
||||
-o binaries/linux-amd64/redflag-agent ./cmd/agent
|
||||
|
||||
# Build for Linux arm64
|
||||
RUN CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build -o binaries/linux-arm64/redflag-agent ./cmd/agent
|
||||
RUN VERSION=$(cat /build/version.txt) && \
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build \
|
||||
-ldflags "-X github.com/Fimeg/RedFlag/aggregator-agent/internal/version.Version=$VERSION" \
|
||||
-o binaries/linux-arm64/redflag-agent ./cmd/agent
|
||||
|
||||
# Build for Windows amd64
|
||||
RUN CGO_ENABLED=0 GOOS=windows GOARCH=amd64 go build -o binaries/windows-amd64/redflag-agent.exe ./cmd/agent
|
||||
RUN VERSION=$(cat /build/version.txt) && \
|
||||
CGO_ENABLED=0 GOOS=windows GOARCH=amd64 go build \
|
||||
-ldflags "-X github.com/Fimeg/RedFlag/aggregator-agent/internal/version.Version=$VERSION" \
|
||||
-o binaries/windows-amd64/redflag-agent.exe ./cmd/agent
|
||||
|
||||
# Build for Windows arm64
|
||||
RUN CGO_ENABLED=0 GOOS=windows GOARCH=arm64 go build -o binaries/windows-arm64/redflag-agent.exe ./cmd/agent
|
||||
RUN VERSION=$(cat /build/version.txt) && \
|
||||
CGO_ENABLED=0 GOOS=windows GOARCH=arm64 go build \
|
||||
-ldflags "-X github.com/Fimeg/RedFlag/aggregator-agent/internal/version.Version=$VERSION" \
|
||||
-o binaries/windows-arm64/redflag-agent.exe ./cmd/agent
|
||||
|
||||
# Stage 3: Final image with server and all agent binaries
|
||||
FROM alpine:latest
|
||||
|
||||
@@ -202,6 +202,7 @@ func main() {
|
||||
agentUpdateQueries := queries.NewAgentUpdateQueries(db.DB)
|
||||
metricsQueries := queries.NewMetricsQueries(db.DB.DB)
|
||||
dockerQueries := queries.NewDockerQueries(db.DB.DB)
|
||||
storageMetricsQueries := queries.NewStorageMetricsQueries(db.DB.DB)
|
||||
adminQueries := queries.NewAdminQueries(db.DB)
|
||||
|
||||
// Create PackageQueries for accessing signed agent update packages
|
||||
@@ -307,6 +308,7 @@ func main() {
|
||||
subsystemHandler := handlers.NewSubsystemHandler(subsystemQueries, commandQueries, signingService, securityLogger)
|
||||
metricsHandler := handlers.NewMetricsHandler(metricsQueries, agentQueries, commandQueries)
|
||||
dockerReportsHandler := handlers.NewDockerReportsHandler(dockerQueries, agentQueries, commandQueries)
|
||||
storageMetricsHandler := handlers.NewStorageMetricsHandler(storageMetricsQueries)
|
||||
agentSetupHandler := handlers.NewAgentSetupHandler(agentQueries)
|
||||
|
||||
// Initialize scanner config handler (for user-configurable scanner timeouts)
|
||||
@@ -460,6 +462,9 @@ func main() {
|
||||
// New dedicated endpoints for metrics and docker images (data classification fix)
|
||||
agents.POST("/:id/metrics", rateLimiter.RateLimit("agent_reports", middleware.KeyByAgentID), metricsHandler.ReportMetrics)
|
||||
agents.POST("/:id/docker-images", rateLimiter.RateLimit("agent_reports", middleware.KeyByAgentID), dockerReportsHandler.ReportDockerImages)
|
||||
|
||||
// Dedicated storage metrics endpoint (proper separation from generic metrics)
|
||||
agents.POST("/:id/storage-metrics", rateLimiter.RateLimit("agent_reports", middleware.KeyByAgentID), storageMetricsHandler.ReportStorageMetrics)
|
||||
}
|
||||
|
||||
// Dashboard/Web routes (protected by web auth)
|
||||
@@ -469,6 +474,7 @@ func main() {
|
||||
dashboard.GET("/stats/summary", statsHandler.GetDashboardStats)
|
||||
dashboard.GET("/agents", agentHandler.ListAgents)
|
||||
dashboard.GET("/agents/:id", agentHandler.GetAgent)
|
||||
dashboard.GET("/agents/:id/storage-metrics", storageMetricsHandler.GetStorageMetrics)
|
||||
dashboard.POST("/agents/:id/scan", agentHandler.TriggerScan)
|
||||
dashboard.POST("/agents/:id/heartbeat", agentHandler.TriggerHeartbeat)
|
||||
dashboard.GET("/agents/:id/heartbeat", agentHandler.GetHeartbeatStatus)
|
||||
|
||||
8
aggregator-server/docker-entrypoint.sh
Executable file
8
aggregator-server/docker-entrypoint.sh
Executable file
@@ -0,0 +1,8 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
# Create config directory if it doesn't exist
|
||||
mkdir -p /app/config
|
||||
|
||||
# Execute the main command
|
||||
exec "$@"
|
||||
@@ -158,7 +158,7 @@ func (h *SecurityHandler) MachineBindingStatus(c *gin.Context) {
|
||||
"timestamp": time.Now(),
|
||||
"checks": map[string]interface{}{
|
||||
"binding_enforced": true,
|
||||
"min_agent_version": "v0.1.22",
|
||||
"min_agent_version": "v0.1.26",
|
||||
"fingerprint_required": true,
|
||||
"recent_violations": 0,
|
||||
"bound_agents": 0,
|
||||
|
||||
@@ -0,0 +1,205 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
"github.com/Fimeg/RedFlag/aggregator-server/internal/services"
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
// SecuritySettingsHandler handles security settings API endpoints
|
||||
type SecuritySettingsHandler struct {
|
||||
securitySettingsService *services.SecuritySettingsService
|
||||
}
|
||||
|
||||
// NewSecuritySettingsHandler creates a new security settings handler
|
||||
func NewSecuritySettingsHandler(securitySettingsService *services.SecuritySettingsService) *SecuritySettingsHandler {
|
||||
return &SecuritySettingsHandler{
|
||||
securitySettingsService: securitySettingsService,
|
||||
}
|
||||
}
|
||||
|
||||
// GetAllSecuritySettings returns all security settings for the authenticated user
|
||||
func (h *SecuritySettingsHandler) GetAllSecuritySettings(c *gin.Context) {
|
||||
// Get user from context
|
||||
userID := c.GetString("user_id")
|
||||
|
||||
settings, err := h.securitySettingsService.GetAllSettings(userID)
|
||||
if err != nil {
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"settings": settings,
|
||||
"user_has_permission": true, // Check actual permissions
|
||||
})
|
||||
}
|
||||
|
||||
// GetSecuritySettingsByCategory returns settings for a specific category
|
||||
func (h *SecuritySettingsHandler) GetSecuritySettingsByCategory(c *gin.Context) {
|
||||
category := c.Param("category") // e.g., "command_signing", "nonce_validation"
|
||||
userID := c.GetString("user_id")
|
||||
|
||||
settings, err := h.securitySettingsService.GetSettingsByCategory(userID, category)
|
||||
if err != nil {
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, settings)
|
||||
}
|
||||
|
||||
// UpdateSecuritySetting updates a specific security setting
|
||||
func (h *SecuritySettingsHandler) UpdateSecuritySetting(c *gin.Context) {
|
||||
var req struct {
|
||||
Value interface{} `json:"value" binding:"required"`
|
||||
Reason string `json:"reason"` // Optional audit trail
|
||||
}
|
||||
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
category := c.Param("category")
|
||||
key := c.Param("key")
|
||||
userID := c.GetString("user_id")
|
||||
|
||||
// Validate before applying
|
||||
if err := h.securitySettingsService.ValidateSetting(category, key, req.Value); err != nil {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
// Apply the setting
|
||||
err := h.securitySettingsService.SetSetting(category, key, req.Value, userID, req.Reason)
|
||||
if err != nil {
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
// Return updated setting
|
||||
setting, err := h.securitySettingsService.GetSetting(category, key)
|
||||
if err != nil {
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"message": "Setting updated successfully",
|
||||
"setting": map[string]interface{}{
|
||||
"category": category,
|
||||
"key": key,
|
||||
"value": setting,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// ValidateSecuritySettings validates settings without applying them
|
||||
func (h *SecuritySettingsHandler) ValidateSecuritySettings(c *gin.Context) {
|
||||
var req struct {
|
||||
Category string `json:"category" binding:"required"`
|
||||
Key string `json:"key" binding:"required"`
|
||||
Value interface{} `json:"value" binding:"required"`
|
||||
}
|
||||
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
err := h.securitySettingsService.ValidateSetting(req.Category, req.Key, req.Value)
|
||||
if err != nil {
|
||||
c.JSON(http.StatusBadRequest, gin.H{
|
||||
"valid": false,
|
||||
"error": err.Error(),
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"valid": true,
|
||||
"message": "Setting is valid",
|
||||
})
|
||||
}
|
||||
|
||||
// GetSecurityAuditTrail returns audit trail of security setting changes
|
||||
func (h *SecuritySettingsHandler) GetSecurityAuditTrail(c *gin.Context) {
|
||||
// Pagination parameters
|
||||
page := c.DefaultQuery("page", "1")
|
||||
pageSize := c.DefaultQuery("page_size", "50")
|
||||
|
||||
pageInt, _ := strconv.Atoi(page)
|
||||
pageSizeInt, _ := strconv.Atoi(pageSize)
|
||||
|
||||
auditEntries, totalCount, err := h.securitySettingsService.GetAuditTrail(pageInt, pageSizeInt)
|
||||
if err != nil {
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"audit_entries": auditEntries,
|
||||
"pagination": gin.H{
|
||||
"page": pageInt,
|
||||
"page_size": pageSizeInt,
|
||||
"total": totalCount,
|
||||
"total_pages": (totalCount + pageSizeInt - 1) / pageSizeInt,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// GetSecurityOverview returns current security status overview
|
||||
func (h *SecuritySettingsHandler) GetSecurityOverview(c *gin.Context) {
|
||||
userID := c.GetString("user_id")
|
||||
|
||||
overview, err := h.securitySettingsService.GetSecurityOverview(userID)
|
||||
if err != nil {
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, overview)
|
||||
}
|
||||
|
||||
// ApplySecuritySettings applies batch of setting changes atomically
|
||||
func (h *SecuritySettingsHandler) ApplySecuritySettings(c *gin.Context) {
|
||||
var req struct {
|
||||
Settings map[string]map[string]interface{} `json:"settings" binding:"required"`
|
||||
Reason string `json:"reason"`
|
||||
}
|
||||
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
userID := c.GetString("user_id")
|
||||
|
||||
// Validate all settings first
|
||||
for category, settings := range req.Settings {
|
||||
for key, value := range settings {
|
||||
if err := h.securitySettingsService.ValidateSetting(category, key, value); err != nil {
|
||||
c.JSON(http.StatusBadRequest, gin.H{
|
||||
"error": fmt.Sprintf("Validation failed for %s.%s: %v", category, key, err),
|
||||
})
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Apply all settings atomically
|
||||
err := h.securitySettingsService.ApplySettingsBatch(req.Settings, userID, req.Reason)
|
||||
if err != nil {
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"message": "All settings applied successfully",
|
||||
"applied_count": len(req.Settings),
|
||||
})
|
||||
}
|
||||
125
aggregator-server/internal/api/handlers/storage_metrics.go
Normal file
125
aggregator-server/internal/api/handlers/storage_metrics.go
Normal file
@@ -0,0 +1,125 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/Fimeg/RedFlag/aggregator-server/internal/database/queries"
|
||||
"github.com/Fimeg/RedFlag/aggregator-server/internal/models"
|
||||
"github.com/google/uuid"
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/lib/pq"
|
||||
)
|
||||
|
||||
// StorageMetricsHandler handles storage metrics endpoints
|
||||
type StorageMetricsHandler struct {
|
||||
queries *queries.StorageMetricsQueries
|
||||
}
|
||||
|
||||
// NewStorageMetricsHandler creates a new storage metrics handler
|
||||
func NewStorageMetricsHandler(queries *queries.StorageMetricsQueries) *StorageMetricsHandler {
|
||||
return &StorageMetricsHandler{
|
||||
queries: queries,
|
||||
}
|
||||
}
|
||||
|
||||
// ReportStorageMetrics handles POST /api/v1/agents/{id}/storage-metrics
|
||||
func (h *StorageMetricsHandler) ReportStorageMetrics(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
agentIDStr := vars["id"]
|
||||
|
||||
// Parse agent ID
|
||||
agentID, err := uuid.Parse(agentIDStr)
|
||||
if err != nil {
|
||||
http.Error(w, "Invalid agent ID", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
// Parse request body
|
||||
var req models.StorageMetricRequest
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
http.Error(w, "Invalid request body", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
// Validate agent ID matches
|
||||
if req.AgentID != agentID {
|
||||
http.Error(w, "Agent ID mismatch", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
// Insert storage metrics with error isolation
|
||||
for _, metric := range req.Metrics {
|
||||
dbMetric := models.StorageMetric{
|
||||
ID: uuid.New(),
|
||||
AgentID: req.AgentID,
|
||||
Mountpoint: metric.Mountpoint,
|
||||
Device: metric.Device,
|
||||
DiskType: metric.DiskType,
|
||||
Filesystem: metric.Filesystem,
|
||||
TotalBytes: metric.TotalBytes,
|
||||
UsedBytes: metric.UsedBytes,
|
||||
AvailableBytes: metric.AvailableBytes,
|
||||
UsedPercent: metric.UsedPercent,
|
||||
Severity: metric.Severity,
|
||||
Metadata: metric.Metadata,
|
||||
CreatedAt: time.Now(),
|
||||
}
|
||||
|
||||
if err := h.queries.InsertStorageMetric(r.Context(), dbMetric); err != nil {
|
||||
log.Printf("[ERROR] Failed to insert storage metric for agent %s: %v\n", agentID, err)
|
||||
http.Error(w, "Failed to insert storage metric", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
json.NewEncoder(w).Encode(map[string]string{
|
||||
"status": "success",
|
||||
"message": "Storage metrics reported successfully",
|
||||
})
|
||||
}
|
||||
|
||||
// GetStorageMetrics handles GET /api/v1/agents/{id}/storage-metrics
|
||||
func (h *StorageMetricsHandler) GetStorageMetrics(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
agentIDStr := vars["id"]
|
||||
|
||||
// Parse agent ID
|
||||
agentID, err := uuid.Parse(agentIDStr)
|
||||
if err != nil {
|
||||
http.Error(w, "Invalid agent ID", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
// Optional query parameters for pagination/limit
|
||||
limit := parseIntQueryParam(r, "limit", 100)
|
||||
offset := parseIntQueryParam(r, "offset", 0)
|
||||
|
||||
// Get storage metrics
|
||||
metrics, err := h.queries.GetStorageMetricsByAgentID(r.Context(), agentID, limit, offset)
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] Failed to retrieve storage metrics for agent %s: %v\n", agentID, err)
|
||||
http.Error(w, "Failed to retrieve storage metrics", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(map[string]interface{}{
|
||||
"metrics": metrics,
|
||||
"total": len(metrics),
|
||||
})
|
||||
}
|
||||
|
||||
// parseIntQueryParam safely parses integer query parameters with defaults
|
||||
func parseIntQueryParam(r *http.Request, key string, defaultValue int) int {
|
||||
if val := r.URL.Query().Get(key); val != "" {
|
||||
var result int
|
||||
if _, err := fmt.Sscanf(val, "%d", &result); err == nil && result > 0 {
|
||||
return result
|
||||
}
|
||||
}
|
||||
return defaultValue
|
||||
}
|
||||
@@ -160,7 +160,7 @@ func loadFromEnv(cfg *Config, skipSensitive bool) error {
|
||||
cfg.CheckInInterval = checkInInterval
|
||||
cfg.OfflineThreshold = offlineThreshold
|
||||
cfg.Timezone = getEnv("TIMEZONE", "UTC")
|
||||
cfg.LatestAgentVersion = getEnv("LATEST_AGENT_VERSION", "0.1.23.6")
|
||||
cfg.LatestAgentVersion = getEnv("LATEST_AGENT_VERSION", "0.1.26")
|
||||
cfg.MinAgentVersion = getEnv("MIN_AGENT_VERSION", "0.1.22")
|
||||
|
||||
if !skipSensitive {
|
||||
|
||||
@@ -0,0 +1,27 @@
|
||||
-- Create dedicated storage_metrics table for proper storage tracking
|
||||
-- This replaces the misuse of metrics table for storage data
|
||||
|
||||
CREATE TABLE storage_metrics (
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
agent_id UUID NOT NULL REFERENCES agents(id) ON DELETE CASCADE,
|
||||
mountpoint VARCHAR(255) NOT NULL,
|
||||
device VARCHAR(255),
|
||||
disk_type VARCHAR(50),
|
||||
filesystem VARCHAR(50),
|
||||
total_bytes BIGINT,
|
||||
used_bytes BIGINT,
|
||||
available_bytes BIGINT,
|
||||
used_percent FLOAT,
|
||||
severity VARCHAR(20),
|
||||
metadata JSONB,
|
||||
created_at TIMESTAMP NOT NULL DEFAULT NOW()
|
||||
);
|
||||
|
||||
-- Indexes for performance
|
||||
CREATE INDEX idx_storage_metrics_agent_id ON storage_metrics(agent_id);
|
||||
CREATE INDEX idx_storage_metrics_created_at ON storage_metrics(created_at DESC);
|
||||
CREATE INDEX idx_storage_metrics_mountpoint ON storage_metrics(mountpoint);
|
||||
CREATE INDEX idx_storage_metrics_agent_mount ON storage_metrics(agent_id, mountpoint, created_at DESC);
|
||||
|
||||
-- Track migration
|
||||
INSERT INTO schema_migrations (version, description) VALUES ('021', 'Create storage_metrics table');
|
||||
162
aggregator-server/internal/database/queries/admin.go
Normal file
162
aggregator-server/internal/database/queries/admin.go
Normal file
@@ -0,0 +1,162 @@
|
||||
package queries
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/alexedwards/argon2id"
|
||||
"github.com/google/uuid"
|
||||
"github.com/jmoiron/sqlx"
|
||||
)
|
||||
|
||||
type AdminQueries struct {
|
||||
db *sqlx.DB
|
||||
}
|
||||
|
||||
func NewAdminQueries(db *sqlx.DB) *AdminQueries {
|
||||
return &AdminQueries{db: db}
|
||||
}
|
||||
|
||||
type Admin struct {
|
||||
ID uuid.UUID `json:"id"`
|
||||
Username string `json:"username"`
|
||||
Email string `json:"email"`
|
||||
Password string `json:"-"`
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
}
|
||||
|
||||
// CreateAdminIfNotExists creates an admin user if they don't already exist
|
||||
func (q *AdminQueries) CreateAdminIfNotExists(username, email, password string) error {
|
||||
ctx := context.Background()
|
||||
|
||||
// Check if admin already exists
|
||||
var exists bool
|
||||
err := q.db.QueryRowContext(ctx, "SELECT EXISTS(SELECT 1 FROM users WHERE username = $1)", username).Scan(&exists)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to check if admin exists: %w", err)
|
||||
}
|
||||
|
||||
if exists {
|
||||
return nil // Admin already exists, nothing to do
|
||||
}
|
||||
|
||||
// Hash the password
|
||||
hashedPassword, err := argon2id.CreateHash(password, argon2id.DefaultParams)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to hash password: %w", err)
|
||||
}
|
||||
|
||||
// Create the admin
|
||||
query := `
|
||||
INSERT INTO users (username, email, password_hash, created_at)
|
||||
VALUES ($1, $2, $3, NOW())
|
||||
`
|
||||
_, err = q.db.ExecContext(ctx, query, username, email, hashedPassword)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create admin: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateAdminPassword updates the admin's password (always updates from .env)
|
||||
func (q *AdminQueries) UpdateAdminPassword(username, password string) error {
|
||||
ctx := context.Background()
|
||||
|
||||
// Hash the password
|
||||
hashedPassword, err := argon2id.CreateHash(password, argon2id.DefaultParams)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to hash password: %w", err)
|
||||
}
|
||||
|
||||
// Update the password
|
||||
query := `
|
||||
UPDATE users
|
||||
SET password_hash = $1
|
||||
WHERE username = $2
|
||||
`
|
||||
result, err := q.db.ExecContext(ctx, query, hashedPassword, username)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update admin password: %w", err)
|
||||
}
|
||||
|
||||
rowsAffected, err := result.RowsAffected()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get rows affected: %w", err)
|
||||
}
|
||||
|
||||
if rowsAffected == 0 {
|
||||
return fmt.Errorf("admin not found")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// VerifyAdminCredentials validates username and password against the database hash
|
||||
func (q *AdminQueries) VerifyAdminCredentials(username, password string) (*Admin, error) {
|
||||
ctx := context.Background()
|
||||
|
||||
var admin Admin
|
||||
query := `
|
||||
SELECT id, username, email, password_hash, created_at
|
||||
FROM users
|
||||
WHERE username = $1
|
||||
`
|
||||
|
||||
err := q.db.QueryRowContext(ctx, query, username).Scan(
|
||||
&admin.ID,
|
||||
&admin.Username,
|
||||
&admin.Email,
|
||||
&admin.Password,
|
||||
&admin.CreatedAt,
|
||||
)
|
||||
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, fmt.Errorf("admin not found")
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to query admin: %w", err)
|
||||
}
|
||||
|
||||
// Verify the password
|
||||
match, err := argon2id.ComparePasswordAndHash(password, admin.Password)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to compare password: %w", err)
|
||||
}
|
||||
|
||||
if !match {
|
||||
return nil, fmt.Errorf("invalid credentials")
|
||||
}
|
||||
|
||||
return &admin, nil
|
||||
}
|
||||
|
||||
// GetAdminByUsername retrieves admin by username (for JWT claims)
|
||||
func (q *AdminQueries) GetAdminByUsername(username string) (*Admin, error) {
|
||||
ctx := context.Background()
|
||||
|
||||
var admin Admin
|
||||
query := `
|
||||
SELECT id, username, email, created_at
|
||||
FROM users
|
||||
WHERE username = $1
|
||||
`
|
||||
|
||||
err := q.db.QueryRowContext(ctx, query, username).Scan(
|
||||
&admin.ID,
|
||||
&admin.Username,
|
||||
&admin.Email,
|
||||
&admin.CreatedAt,
|
||||
)
|
||||
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, fmt.Errorf("admin not found")
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to query admin: %w", err)
|
||||
}
|
||||
|
||||
return &admin, nil
|
||||
}
|
||||
35
aggregator-server/internal/database/queries/packages.go
Normal file
35
aggregator-server/internal/database/queries/packages.go
Normal file
@@ -0,0 +1,35 @@
|
||||
package queries
|
||||
|
||||
import (
|
||||
"github.com/Fimeg/RedFlag/aggregator-server/internal/models"
|
||||
"github.com/google/uuid"
|
||||
"github.com/jmoiron/sqlx"
|
||||
)
|
||||
|
||||
// PackageQueries provides an alias for AgentUpdateQueries to match the expected interface
|
||||
// This maintains backward compatibility while using the existing agent update package system
|
||||
type PackageQueries struct {
|
||||
*AgentUpdateQueries
|
||||
}
|
||||
|
||||
// NewPackageQueries creates a new PackageQueries instance
|
||||
func NewPackageQueries(db *sqlx.DB) *PackageQueries {
|
||||
return &PackageQueries{
|
||||
AgentUpdateQueries: NewAgentUpdateQueries(db),
|
||||
}
|
||||
}
|
||||
|
||||
// StoreSignedPackage stores a signed agent package (alias for CreateUpdatePackage)
|
||||
func (pq *PackageQueries) StoreSignedPackage(pkg *models.AgentUpdatePackage) error {
|
||||
return pq.CreateUpdatePackage(pkg)
|
||||
}
|
||||
|
||||
// GetSignedPackage retrieves a signed package (alias for GetUpdatePackageByVersion)
|
||||
func (pq *PackageQueries) GetSignedPackage(version, platform, architecture string) (*models.AgentUpdatePackage, error) {
|
||||
return pq.GetUpdatePackageByVersion(version, platform, architecture)
|
||||
}
|
||||
|
||||
// GetSignedPackageByID retrieves a signed package by ID (alias for GetUpdatePackage)
|
||||
func (pq *PackageQueries) GetSignedPackageByID(id uuid.UUID) (*models.AgentUpdatePackage, error) {
|
||||
return pq.GetUpdatePackage(id)
|
||||
}
|
||||
131
aggregator-server/internal/database/queries/scanner_config.go
Normal file
131
aggregator-server/internal/database/queries/scanner_config.go
Normal file
@@ -0,0 +1,131 @@
|
||||
package queries
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
)
|
||||
|
||||
// ScannerConfigQueries handles scanner timeout configuration in database
|
||||
type ScannerConfigQueries struct {
|
||||
db *sqlx.DB
|
||||
}
|
||||
|
||||
// NewScannerConfigQueries creates new scanner config queries
|
||||
func NewScannerConfigQueries(db *sqlx.DB) *ScannerConfigQueries {
|
||||
return &ScannerConfigQueries{db: db}
|
||||
}
|
||||
|
||||
// ScannerTimeoutConfig represents a scanner timeout configuration
|
||||
type ScannerTimeoutConfig struct {
|
||||
ScannerName string `db:"scanner_name" json:"scanner_name"`
|
||||
TimeoutMs int `db:"timeout_ms" json:"timeout_ms"`
|
||||
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
||||
}
|
||||
|
||||
// UpsertScannerConfig inserts or updates scanner timeout configuration
|
||||
func (q *ScannerConfigQueries) UpsertScannerConfig(scannerName string, timeout time.Duration) error {
|
||||
if q.db == nil {
|
||||
return fmt.Errorf("database connection not available")
|
||||
}
|
||||
|
||||
query := `
|
||||
INSERT INTO scanner_config (scanner_name, timeout_ms, updated_at)
|
||||
VALUES ($1, $2, CURRENT_TIMESTAMP)
|
||||
ON CONFLICT (scanner_name)
|
||||
DO UPDATE SET
|
||||
timeout_ms = EXCLUDED.timeout_ms,
|
||||
updated_at = CURRENT_TIMESTAMP
|
||||
`
|
||||
|
||||
_, err := q.db.Exec(query, scannerName, timeout.Milliseconds())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to upsert scanner config: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetScannerConfig retrieves scanner timeout configuration for a specific scanner
|
||||
func (q *ScannerConfigQueries) GetScannerConfig(scannerName string) (*ScannerTimeoutConfig, error) {
|
||||
if q.db == nil {
|
||||
return nil, fmt.Errorf("database connection not available")
|
||||
}
|
||||
|
||||
var config ScannerTimeoutConfig
|
||||
query := `SELECT scanner_name, timeout_ms, updated_at FROM scanner_config WHERE scanner_name = $1`
|
||||
|
||||
err := q.db.Get(&config, query, scannerName)
|
||||
if err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, nil // Return nil if not found
|
||||
}
|
||||
return nil, fmt.Errorf("failed to get scanner config: %w", err)
|
||||
}
|
||||
|
||||
return &config, nil
|
||||
}
|
||||
|
||||
// GetAllScannerConfigs retrieves all scanner timeout configurations
|
||||
func (q *ScannerConfigQueries) GetAllScannerConfigs() (map[string]ScannerTimeoutConfig, error) {
|
||||
if q.db == nil {
|
||||
return nil, fmt.Errorf("database connection not available")
|
||||
}
|
||||
|
||||
var configs []ScannerTimeoutConfig
|
||||
query := `SELECT scanner_name, timeout_ms, updated_at FROM scanner_config ORDER BY scanner_name`
|
||||
|
||||
err := q.db.Select(&configs, query)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get all scanner configs: %w", err)
|
||||
}
|
||||
|
||||
// Convert slice to map
|
||||
configMap := make(map[string]ScannerTimeoutConfig)
|
||||
for _, cfg := range configs {
|
||||
configMap[cfg.ScannerName] = cfg
|
||||
}
|
||||
|
||||
return configMap, nil
|
||||
}
|
||||
|
||||
// DeleteScannerConfig removes scanner timeout configuration
|
||||
func (q *ScannerConfigQueries) DeleteScannerConfig(scannerName string) error {
|
||||
if q.db == nil {
|
||||
return fmt.Errorf("database connection not available")
|
||||
}
|
||||
|
||||
query := `DELETE FROM scanner_config WHERE scanner_name = $1`
|
||||
|
||||
result, err := q.db.Exec(query, scannerName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete scanner config: %w", err)
|
||||
}
|
||||
|
||||
rows, err := result.RowsAffected()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to verify delete: %w", err)
|
||||
}
|
||||
|
||||
if rows == 0 {
|
||||
return sql.ErrNoRows
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetScannerTimeoutWithDefault returns scanner timeout from DB or default value
|
||||
func (q *ScannerConfigQueries) GetScannerTimeoutWithDefault(scannerName string, defaultTimeout time.Duration) time.Duration {
|
||||
config, err := q.GetScannerConfig(scannerName)
|
||||
if err != nil {
|
||||
return defaultTimeout
|
||||
}
|
||||
|
||||
if config == nil {
|
||||
return defaultTimeout
|
||||
}
|
||||
|
||||
return time.Duration(config.TimeoutMs) * time.Millisecond
|
||||
}
|
||||
255
aggregator-server/internal/database/queries/security_settings.go
Normal file
255
aggregator-server/internal/database/queries/security_settings.go
Normal file
@@ -0,0 +1,255 @@
|
||||
package queries
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/Fimeg/RedFlag/aggregator-server/internal/models"
|
||||
"github.com/google/uuid"
|
||||
"github.com/jmoiron/sqlx"
|
||||
)
|
||||
|
||||
type SecuritySettingsQueries struct {
|
||||
db *sqlx.DB
|
||||
}
|
||||
|
||||
func NewSecuritySettingsQueries(db *sqlx.DB) *SecuritySettingsQueries {
|
||||
return &SecuritySettingsQueries{db: db}
|
||||
}
|
||||
|
||||
// GetSetting retrieves a specific security setting by category and key
|
||||
func (q *SecuritySettingsQueries) GetSetting(category, key string) (*models.SecuritySetting, error) {
|
||||
query := `
|
||||
SELECT id, category, key, value, is_encrypted, created_at, updated_at, created_by, updated_by
|
||||
FROM security_settings
|
||||
WHERE category = $1 AND key = $2
|
||||
`
|
||||
|
||||
var setting models.SecuritySetting
|
||||
err := q.db.Get(&setting, query, category, key)
|
||||
if err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, fmt.Errorf("failed to get security setting: %w", err)
|
||||
}
|
||||
|
||||
return &setting, nil
|
||||
}
|
||||
|
||||
// GetAllSettings retrieves all security settings
|
||||
func (q *SecuritySettingsQueries) GetAllSettings() ([]models.SecuritySetting, error) {
|
||||
query := `
|
||||
SELECT id, category, key, value, is_encrypted, created_at, updated_at, created_by, updated_by
|
||||
FROM security_settings
|
||||
ORDER BY category, key
|
||||
`
|
||||
|
||||
var settings []models.SecuritySetting
|
||||
err := q.db.Select(&settings, query)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get all security settings: %w", err)
|
||||
}
|
||||
|
||||
return settings, nil
|
||||
}
|
||||
|
||||
// GetSettingsByCategory retrieves all settings for a specific category
|
||||
func (q *SecuritySettingsQueries) GetSettingsByCategory(category string) ([]models.SecuritySetting, error) {
|
||||
query := `
|
||||
SELECT id, category, key, value, is_encrypted, created_at, updated_at, created_by, updated_by
|
||||
FROM security_settings
|
||||
WHERE category = $1
|
||||
ORDER BY key
|
||||
`
|
||||
|
||||
var settings []models.SecuritySetting
|
||||
err := q.db.Select(&settings, query, category)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get security settings by category: %w", err)
|
||||
}
|
||||
|
||||
return settings, nil
|
||||
}
|
||||
|
||||
// CreateSetting creates a new security setting
|
||||
func (q *SecuritySettingsQueries) CreateSetting(category, key string, value interface{}, isEncrypted bool, createdBy *uuid.UUID) (*models.SecuritySetting, error) {
|
||||
// Convert value to JSON string
|
||||
valueJSON, err := json.Marshal(value)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal setting value: %w", err)
|
||||
}
|
||||
|
||||
setting := &models.SecuritySetting{
|
||||
ID: uuid.New(),
|
||||
Category: category,
|
||||
Key: key,
|
||||
Value: string(valueJSON),
|
||||
IsEncrypted: isEncrypted,
|
||||
CreatedAt: time.Now().UTC(),
|
||||
CreatedBy: createdBy,
|
||||
}
|
||||
|
||||
query := `
|
||||
INSERT INTO security_settings (
|
||||
id, category, key, value, is_encrypted, created_at, created_by
|
||||
) VALUES (
|
||||
:id, :category, :key, :value, :is_encrypted, :created_at, :created_by
|
||||
)
|
||||
RETURNING *
|
||||
`
|
||||
|
||||
rows, err := q.db.NamedQuery(query, setting)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create security setting: %w", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
if rows.Next() {
|
||||
var createdSetting models.SecuritySetting
|
||||
if err := rows.StructScan(&createdSetting); err != nil {
|
||||
return nil, fmt.Errorf("failed to scan created setting: %w", err)
|
||||
}
|
||||
return &createdSetting, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("failed to create security setting: no rows returned")
|
||||
}
|
||||
|
||||
// UpdateSetting updates an existing security setting
|
||||
func (q *SecuritySettingsQueries) UpdateSetting(category, key string, value interface{}, updatedBy *uuid.UUID) (*models.SecuritySetting, *string, error) {
|
||||
// Get the old value first
|
||||
oldSetting, err := q.GetSetting(category, key)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to get old setting: %w", err)
|
||||
}
|
||||
if oldSetting == nil {
|
||||
return nil, nil, fmt.Errorf("setting not found")
|
||||
}
|
||||
|
||||
var oldValue *string
|
||||
if oldSetting != nil {
|
||||
oldValue = &oldSetting.Value
|
||||
}
|
||||
|
||||
// Convert new value to JSON string
|
||||
valueJSON, err := json.Marshal(value)
|
||||
if err != nil {
|
||||
return nil, oldValue, fmt.Errorf("failed to marshal setting value: %w", err)
|
||||
}
|
||||
|
||||
now := time.Now().UTC()
|
||||
query := `
|
||||
UPDATE security_settings
|
||||
SET value = $1, updated_at = $2, updated_by = $3
|
||||
WHERE category = $4 AND key = $5
|
||||
RETURNING id, category, key, value, is_encrypted, created_at, updated_at, created_by, updated_by
|
||||
`
|
||||
|
||||
var updatedSetting models.SecuritySetting
|
||||
err = q.db.QueryRow(query, string(valueJSON), now, updatedBy, category, key).Scan(
|
||||
&updatedSetting.ID,
|
||||
&updatedSetting.Category,
|
||||
&updatedSetting.Key,
|
||||
&updatedSetting.Value,
|
||||
&updatedSetting.IsEncrypted,
|
||||
&updatedSetting.CreatedAt,
|
||||
&updatedSetting.UpdatedAt,
|
||||
&updatedSetting.CreatedBy,
|
||||
&updatedSetting.UpdatedBy,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return nil, oldValue, fmt.Errorf("failed to update security setting: %w", err)
|
||||
}
|
||||
|
||||
return &updatedSetting, oldValue, nil
|
||||
}
|
||||
|
||||
// DeleteSetting deletes a security setting
|
||||
func (q *SecuritySettingsQueries) DeleteSetting(category, key string) (*string, error) {
|
||||
// Get the old value first
|
||||
oldSetting, err := q.GetSetting(category, key)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get old setting: %w", err)
|
||||
}
|
||||
if oldSetting == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
query := `
|
||||
DELETE FROM security_settings
|
||||
WHERE category = $1 AND key = $2
|
||||
RETURNING value
|
||||
`
|
||||
|
||||
var oldValue string
|
||||
err = q.db.QueryRow(query, category, key).Scan(&oldValue)
|
||||
if err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, fmt.Errorf("failed to delete security setting: %w", err)
|
||||
}
|
||||
|
||||
return &oldValue, nil
|
||||
}
|
||||
|
||||
// CreateAuditLog creates an audit log entry for setting changes
|
||||
func (q *SecuritySettingsQueries) CreateAuditLog(settingID, userID uuid.UUID, action, oldValue, newValue, reason string) error {
|
||||
audit := &models.SecuritySettingAudit{
|
||||
ID: uuid.New(),
|
||||
SettingID: settingID,
|
||||
UserID: userID,
|
||||
Action: action,
|
||||
OldValue: &oldValue,
|
||||
NewValue: &newValue,
|
||||
Reason: reason,
|
||||
CreatedAt: time.Now().UTC(),
|
||||
}
|
||||
|
||||
// Handle null values for old/new values
|
||||
if oldValue == "" {
|
||||
audit.OldValue = nil
|
||||
}
|
||||
if newValue == "" {
|
||||
audit.NewValue = nil
|
||||
}
|
||||
|
||||
query := `
|
||||
INSERT INTO security_setting_audit (
|
||||
id, setting_id, user_id, action, old_value, new_value, reason, created_at
|
||||
) VALUES (
|
||||
:id, :setting_id, :user_id, :action, :old_value, :new_value, :reason, :created_at
|
||||
)
|
||||
`
|
||||
|
||||
_, err := q.db.NamedExec(query, audit)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create audit log: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetAuditLogs retrieves audit logs for a setting
|
||||
func (q *SecuritySettingsQueries) GetAuditLogs(category, key string, limit int) ([]models.SecuritySettingAudit, error) {
|
||||
query := `
|
||||
SELECT sa.id, sa.setting_id, sa.user_id, sa.action, sa.old_value, sa.new_value, sa.reason, sa.created_at
|
||||
FROM security_setting_audit sa
|
||||
INNER JOIN security_settings s ON sa.setting_id = s.id
|
||||
WHERE s.category = $1 AND s.key = $2
|
||||
ORDER BY sa.created_at DESC
|
||||
LIMIT $3
|
||||
`
|
||||
|
||||
var audits []models.SecuritySettingAudit
|
||||
err := q.db.Select(&audits, query, category, key, limit)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get audit logs: %w", err)
|
||||
}
|
||||
|
||||
return audits, nil
|
||||
}
|
||||
167
aggregator-server/internal/database/queries/storage_metrics.go
Normal file
167
aggregator-server/internal/database/queries/storage_metrics.go
Normal file
@@ -0,0 +1,167 @@
|
||||
package queries
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/Fimeg/RedFlag/aggregator-server/internal/models"
|
||||
"github.com/google/uuid"
|
||||
"github.com/lib/pq"
|
||||
)
|
||||
|
||||
// StorageMetricsQueries handles storage metrics database operations
|
||||
type StorageMetricsQueries struct {
|
||||
db *sql.DB
|
||||
}
|
||||
|
||||
// NewStorageMetricsQueries creates a new storage metrics queries instance
|
||||
func NewStorageMetricsQueries(db *sql.DB) *StorageMetricsQueries {
|
||||
return &StorageMetricsQueries{db: db}
|
||||
}
|
||||
|
||||
// InsertStorageMetric inserts a new storage metric
|
||||
func (q *StorageMetricsQueries) InsertStorageMetric(ctx context.Context, metric models.StorageMetric) error {
|
||||
query := `
|
||||
INSERT INTO storage_metrics (
|
||||
id, agent_id, mountpoint, device, disk_type, filesystem,
|
||||
total_bytes, used_bytes, available_bytes, used_percent,
|
||||
severity, metadata, created_at
|
||||
) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13)
|
||||
`
|
||||
|
||||
_, err := q.db.ExecContext(ctx, query,
|
||||
metric.ID, metric.AgentID, metric.Mountpoint, metric.Device,
|
||||
metric.DiskType, metric.Filesystem, metric.TotalBytes,
|
||||
metric.UsedBytes, metric.AvailableBytes, metric.UsedPercent,
|
||||
metric.Severity, pq.Array(metric.Metadata), metric.CreatedAt,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to insert storage metric: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetStorageMetricsByAgentID retrieves storage metrics for an agent
|
||||
func (q *StorageMetricsQueries) GetStorageMetricsByAgentID(ctx context.Context, agentID uuid.UUID, limit, offset int) ([]models.StorageMetric, error) {
|
||||
query := `
|
||||
SELECT id, agent_id, mountpoint, device, disk_type, filesystem,
|
||||
total_bytes, used_bytes, available_bytes, used_percent,
|
||||
severity, metadata, created_at
|
||||
FROM storage_metrics
|
||||
WHERE agent_id = $1
|
||||
ORDER BY created_at DESC
|
||||
LIMIT $2 OFFSET $3
|
||||
`
|
||||
|
||||
rows, err := q.db.QueryContext(ctx, query, agentID, limit, offset)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to query storage metrics: %w", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var metrics []models.StorageMetric
|
||||
for rows.Next() {
|
||||
var metric models.StorageMetric
|
||||
var metadataMap map[string]interface{}
|
||||
|
||||
err := rows.Scan(
|
||||
&metric.ID, &metric.AgentID, &metric.Mountpoint, &metric.Device,
|
||||
&metric.DiskType, &metric.Filesystem, &metric.TotalBytes,
|
||||
&metric.UsedBytes, &metric.AvailableBytes, &metric.UsedPercent,
|
||||
&metric.Severity, &metadataMap, &metric.CreatedAt,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to scan storage metric: %w", err)
|
||||
}
|
||||
|
||||
metric.Metadata = metadataMap
|
||||
metrics = append(metrics, metric)
|
||||
}
|
||||
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, fmt.Errorf("error iterating storage metrics: %w", err)
|
||||
}
|
||||
|
||||
return metrics, nil
|
||||
}
|
||||
|
||||
// GetLatestStorageMetrics retrieves the most recent storage metrics per mountpoint
|
||||
func (q *StorageMetricsQueries) GetLatestStorageMetrics(ctx context.Context, agentID uuid.UUID) ([]models.StorageMetric, error) {
|
||||
query := `
|
||||
SELECT DISTINCT ON (mountpoint)
|
||||
id, agent_id, mountpoint, device, disk_type, filesystem,
|
||||
total_bytes, used_bytes, available_bytes, used_percent,
|
||||
severity, metadata, created_at
|
||||
FROM storage_metrics
|
||||
WHERE agent_id = $1
|
||||
ORDER BY mountpoint, created_at DESC
|
||||
`
|
||||
|
||||
rows, err := q.db.QueryContext(ctx, query, agentID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to query latest storage metrics: %w", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var metrics []models.StorageMetric
|
||||
for rows.Next() {
|
||||
var metric models.StorageMetric
|
||||
var metadataMap map[string]interface{}
|
||||
|
||||
err := rows.Scan(
|
||||
&metric.ID, &metric.AgentID, &metric.Mountpoint, &metric.Device,
|
||||
&metric.DiskType, &metric.Filesystem, &metric.TotalBytes,
|
||||
&metric.UsedBytes, &metric.AvailableBytes, &metric.UsedPercent,
|
||||
&metric.Severity, &metadataMap, &metric.CreatedAt,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to scan storage metric: %w", err)
|
||||
}
|
||||
|
||||
metric.Metadata = metadataMap
|
||||
metrics = append(metrics, metric)
|
||||
}
|
||||
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, fmt.Errorf("error iterating latest storage metrics: %w", err)
|
||||
}
|
||||
|
||||
return metrics, nil
|
||||
}
|
||||
|
||||
// GetStorageMetricsSummary returns summary statistics for an agent
|
||||
func (q *StorageMetricsQueries) GetStorageMetricsSummary(ctx context.Context, agentID uuid.UUID) (map[string]interface{}, error) {
|
||||
query := `
|
||||
SELECT
|
||||
COUNT(*) as total_disks,
|
||||
COUNT(CASE WHEN severity = 'critical' THEN 1 END) as critical_disks,
|
||||
COUNT(CASE WHEN severity = 'important' THEN 1 END) as important_disks,
|
||||
AVG(used_percent) as avg_used_percent,
|
||||
MAX(used_percent) as max_used_percent,
|
||||
MIN(created_at) as first_collected_at,
|
||||
MAX(created_at) as last_collected_at
|
||||
FROM storage_metrics
|
||||
WHERE agent_id = $1
|
||||
AND created_at >= NOW() - INTERVAL '24 hours'
|
||||
`
|
||||
|
||||
var summary map[string]interface{}
|
||||
err := q.db.QueryRowContext(ctx, query, agentID).Scan(
|
||||
&summary["total_disks"],
|
||||
&summary["critical_disks"],
|
||||
&summary["important_disks"],
|
||||
&summary["avg_used_percent"],
|
||||
&summary["max_used_percent"],
|
||||
&summary["first_collected_at"],
|
||||
&summary["last_collected_at"],
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get storage metrics summary: %w", err)
|
||||
}
|
||||
|
||||
return summary, nil
|
||||
}
|
||||
40
aggregator-server/internal/models/storage_metrics.go
Normal file
40
aggregator-server/internal/models/storage_metrics.go
Normal file
@@ -0,0 +1,40 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
// StorageMetric represents a storage metric from an agent
|
||||
type StorageMetric struct {
|
||||
ID uuid.UUID `json:"id" db:"id"`
|
||||
AgentID uuid.UUID `json:"agent_id" db:"agent_id"`
|
||||
Mountpoint string `json:"mountpoint" db:"mountpoint"`
|
||||
Device string `json:"device" db:"device"`
|
||||
DiskType string `json:"disk_type" db:"disk_type"`
|
||||
Filesystem string `json:"filesystem" db:"filesystem"`
|
||||
TotalBytes int64 `json:"total_bytes" db:"total_bytes"`
|
||||
UsedBytes int64 `json:"used_bytes" db:"used_bytes"`
|
||||
AvailableBytes int64 `json:"available_bytes" db:"available_bytes"`
|
||||
UsedPercent float64 `json:"used_percent" db:"used_percent"`
|
||||
Severity string `json:"severity" db:"severity"`
|
||||
Metadata map[string]interface{} `json:"metadata,omitempty" db:"metadata"`
|
||||
CreatedAt time.Time `json:"created_at" db:"created_at"`
|
||||
}
|
||||
|
||||
// StorageMetricRequest represents the request payload for storage metrics
|
||||
type StorageMetricRequest struct {
|
||||
AgentID uuid.UUID `json:"agent_id"`
|
||||
CommandID string `json:"command_id"`
|
||||
Timestamp time.Time `json:"timestamp"`
|
||||
Metrics []StorageMetric `json:"metrics"`
|
||||
}
|
||||
|
||||
// StorageMetricsList represents a list of storage metrics with pagination
|
||||
type StorageMetricsList struct {
|
||||
Metrics []StorageMetric `json:"metrics"`
|
||||
Total int `json:"total"`
|
||||
Page int `json:"page"`
|
||||
PerPage int `json:"per_page"`
|
||||
}
|
||||
@@ -27,27 +27,31 @@ func NewInstallTemplateService() *InstallTemplateService {
|
||||
func (s *InstallTemplateService) RenderInstallScript(agent *models.Agent, binaryURL, configURL string) (string, error) {
|
||||
// Define template data
|
||||
data := struct {
|
||||
AgentID string
|
||||
BinaryURL string
|
||||
ConfigURL string
|
||||
Platform string
|
||||
Architecture string
|
||||
Version string
|
||||
AgentUser string
|
||||
AgentHome string
|
||||
ConfigDir string
|
||||
LogDir string
|
||||
AgentID string
|
||||
BinaryURL string
|
||||
ConfigURL string
|
||||
Platform string
|
||||
Architecture string
|
||||
Version string
|
||||
AgentUser string
|
||||
AgentHome string
|
||||
ConfigDir string
|
||||
LogDir string
|
||||
AgentConfigDir string
|
||||
AgentLogDir string
|
||||
}{
|
||||
AgentID: agent.ID.String(),
|
||||
BinaryURL: binaryURL,
|
||||
ConfigURL: configURL,
|
||||
Platform: agent.OSType,
|
||||
Architecture: agent.OSArchitecture,
|
||||
Version: agent.CurrentVersion,
|
||||
AgentUser: "redflag-agent",
|
||||
AgentHome: "/var/lib/redflag-agent",
|
||||
ConfigDir: "/etc/redflag",
|
||||
LogDir: "/var/log/redflag",
|
||||
AgentID: agent.ID.String(),
|
||||
BinaryURL: binaryURL,
|
||||
ConfigURL: configURL,
|
||||
Platform: agent.OSType,
|
||||
Architecture: agent.OSArchitecture,
|
||||
Version: agent.CurrentVersion,
|
||||
AgentUser: "redflag-agent",
|
||||
AgentHome: "/var/lib/redflag/agent",
|
||||
ConfigDir: "/etc/redflag",
|
||||
LogDir: "/var/log/redflag",
|
||||
AgentConfigDir: "/etc/redflag/agent",
|
||||
AgentLogDir: "/var/log/redflag/agent",
|
||||
}
|
||||
|
||||
// Choose template based on platform
|
||||
@@ -102,6 +106,8 @@ func (s *InstallTemplateService) RenderInstallScriptFromBuild(
|
||||
AgentHome string
|
||||
ConfigDir string
|
||||
LogDir string
|
||||
AgentConfigDir string
|
||||
AgentLogDir string
|
||||
}{
|
||||
AgentID: agentID,
|
||||
BinaryURL: binaryURL,
|
||||
@@ -112,9 +118,11 @@ func (s *InstallTemplateService) RenderInstallScriptFromBuild(
|
||||
ServerURL: serverURL,
|
||||
RegistrationToken: registrationToken,
|
||||
AgentUser: "redflag-agent",
|
||||
AgentHome: "/var/lib/redflag-agent",
|
||||
AgentHome: "/var/lib/redflag/agent",
|
||||
ConfigDir: "/etc/redflag",
|
||||
LogDir: "/var/log/redflag",
|
||||
AgentConfigDir: "/etc/redflag/agent",
|
||||
AgentLogDir: "/var/log/redflag/agent",
|
||||
}
|
||||
|
||||
templateName := "templates/install/scripts/linux.sh.tmpl"
|
||||
|
||||
@@ -14,7 +14,11 @@ if [ "$EUID" -ne 0 ]; then
|
||||
fi
|
||||
|
||||
AGENT_USER="redflag-agent"
|
||||
AGENT_HOME="/var/lib/redflag-agent"
|
||||
BASE_DIR="/var/lib/redflag"
|
||||
CONFIG_DIR="/etc/redflag"
|
||||
AGENT_CONFIG_DIR="/etc/redflag/agent"
|
||||
LOG_DIR="/var/log/redflag"
|
||||
AGENT_LOG_DIR="/var/log/redflag/agent"
|
||||
SUDOERS_FILE="/etc/sudoers.d/redflag-agent"
|
||||
|
||||
# Function to detect package manager
|
||||
@@ -45,7 +49,7 @@ VERSION="{{.Version}}"
|
||||
LOG_DIR="/var/log/redflag"
|
||||
BACKUP_DIR="${CONFIG_DIR}/backups/backup.$(date +%s)"
|
||||
AGENT_USER="redflag-agent"
|
||||
AGENT_HOME="/var/lib/redflag-agent"
|
||||
AGENT_HOME="{{.AgentHome}}"
|
||||
SUDOERS_FILE="/etc/sudoers.d/redflag-agent"
|
||||
|
||||
echo "=== RedFlag Agent v${VERSION} Installation ==="
|
||||
@@ -99,12 +103,29 @@ else
|
||||
echo "✓ User $AGENT_USER created"
|
||||
fi
|
||||
|
||||
# Create home directory
|
||||
# Create home directory structure
|
||||
if [ ! -d "$AGENT_HOME" ]; then
|
||||
# Create nested directory structure
|
||||
sudo mkdir -p "$BASE_DIR"
|
||||
sudo mkdir -p "$AGENT_HOME"
|
||||
sudo chown "$AGENT_USER:$AGENT_USER" "$AGENT_HOME"
|
||||
sudo mkdir -p "$AGENT_HOME/cache"
|
||||
sudo mkdir -p "$AGENT_HOME/state"
|
||||
sudo mkdir -p "$AGENT_CONFIG_DIR"
|
||||
sudo mkdir -p "$AGENT_LOG_DIR"
|
||||
|
||||
# Set ownership and permissions
|
||||
sudo chown -R "$AGENT_USER:$AGENT_USER" "$BASE_DIR"
|
||||
sudo chmod 750 "$BASE_DIR"
|
||||
sudo chmod 750 "$AGENT_HOME"
|
||||
echo "✓ Home directory created at $AGENT_HOME"
|
||||
sudo chmod 750 "$AGENT_HOME/cache"
|
||||
sudo chmod 750 "$AGENT_HOME/state"
|
||||
sudo chmod 755 "$AGENT_CONFIG_DIR"
|
||||
sudo chmod 755 "$AGENT_LOG_DIR"
|
||||
|
||||
echo "✓ Agent directory structure created:"
|
||||
echo " - Agent home: $AGENT_HOME"
|
||||
echo " - Config: $AGENT_CONFIG_DIR"
|
||||
echo " - Logs: $AGENT_LOG_DIR"
|
||||
fi
|
||||
|
||||
# Step 4: Install sudoers configuration with OS-specific commands
|
||||
@@ -173,10 +194,10 @@ fi
|
||||
|
||||
# Step 4: Create directories
|
||||
echo "Creating directories..."
|
||||
sudo mkdir -p "${CONFIG_DIR}"
|
||||
sudo mkdir -p "${CONFIG_DIR}/backups"
|
||||
sudo mkdir -p "${AGENT_CONFIG_DIR}"
|
||||
sudo mkdir -p "${CONFIG_DIR}/backups" # Legacy backup location
|
||||
sudo mkdir -p "$AGENT_HOME"
|
||||
sudo mkdir -p "/var/log/redflag"
|
||||
sudo mkdir -p "$AGENT_LOG_DIR"
|
||||
|
||||
# Step 5: Download agent binary
|
||||
echo "Downloading agent binary..."
|
||||
@@ -186,14 +207,14 @@ sudo chmod +x "${INSTALL_DIR}/${SERVICE_NAME}"
|
||||
# Step 6: Handle configuration
|
||||
# IMPORTANT: The agent handles its own migration on first start.
|
||||
# We either preserve existing config OR create a minimal template.
|
||||
if [ -f "${CONFIG_DIR}/config.json" ]; then
|
||||
if [ -f "${AGENT_CONFIG_DIR}/config.json" ]; then
|
||||
echo "[CONFIG] Upgrade detected - preserving existing configuration"
|
||||
echo "[CONFIG] Agent will handle migration automatically on first start"
|
||||
echo "[CONFIG] Backup created at: ${BACKUP_DIR}"
|
||||
else
|
||||
echo "[CONFIG] Fresh install - generating minimal configuration with registration token"
|
||||
# Create minimal config template - agent will populate missing fields on first start
|
||||
sudo tee "${CONFIG_DIR}/config.json" > /dev/null <<EOF
|
||||
sudo tee "${AGENT_CONFIG_DIR}/config.json" > /dev/null <<EOF
|
||||
{
|
||||
"version": 5,
|
||||
"agent_version": "${VERSION}",
|
||||
@@ -241,7 +262,7 @@ EOF
|
||||
fi
|
||||
|
||||
# Step 7: Set permissions on config file
|
||||
sudo chmod 600 "${CONFIG_DIR}/config.json"
|
||||
sudo chmod 600 "${AGENT_CONFIG_DIR}/config.json"
|
||||
|
||||
# Step 8: Create systemd service with security hardening
|
||||
echo "Creating systemd service with security configuration..."
|
||||
@@ -266,7 +287,7 @@ RestartPreventExitStatus=255
|
||||
# Note: NoNewPrivileges disabled to allow sudo for package management
|
||||
ProtectSystem=strict
|
||||
ProtectHome=true
|
||||
ReadWritePaths={{.AgentHome}} {{.ConfigDir}} {{.LogDir}}
|
||||
ReadWritePaths={{.AgentHome}} {{.AgentHome}}/cache {{.AgentHome}}/state {{.AgentHome}}/migration_backups {{.AgentConfigDir}} {{.AgentLogDir}}
|
||||
PrivateTmp=true
|
||||
ProtectKernelTunables=true
|
||||
ProtectKernelModules=true
|
||||
@@ -286,13 +307,36 @@ EOF
|
||||
|
||||
# Set proper permissions on directories
|
||||
echo "Setting directory permissions..."
|
||||
sudo chown -R {{.AgentUser}}:{{.AgentUser}} "{{.ConfigDir}}"
|
||||
sudo chown {{.AgentUser}}:{{.AgentUser}} "{{.ConfigDir}}/config.json"
|
||||
sudo chmod 600 "{{.ConfigDir}}/config.json"
|
||||
sudo chown -R {{.AgentUser}}:{{.AgentUser}} "{{.AgentConfigDir}}"
|
||||
sudo chown {{.AgentUser}}:{{.AgentUser}} "{{.AgentConfigDir}}/config.json"
|
||||
sudo chmod 600 "{{.AgentConfigDir}}/config.json"
|
||||
sudo chown -R {{.AgentUser}}:{{.AgentUser}} "{{.AgentHome}}"
|
||||
sudo chmod 750 "{{.AgentHome}}"
|
||||
sudo chown -R {{.AgentUser}}:{{.AgentUser}} "{{.LogDir}}"
|
||||
sudo chmod 750 "{{.LogDir}}"
|
||||
sudo chown -R {{.AgentUser}}:{{.AgentUser}} "{{.AgentLogDir}}"
|
||||
sudo chmod 750 "{{.AgentLogDir}}"
|
||||
|
||||
# Register agent with server (if token provided)
|
||||
if [ -n "{{.RegistrationToken}}" ]; then
|
||||
echo "[INFO] [installer] [register] Registering agent with server..."
|
||||
if sudo -u "{{.AgentUser}}" "${INSTALL_DIR}/${SERVICE_NAME}" --server "{{.ServerURL}}" --token "{{.RegistrationToken}}" --register; then
|
||||
echo "[SUCCESS] [installer] [register] Agent registered successfully"
|
||||
echo "[INFO] [installer] [register] Agent ID assigned, configuration updated"
|
||||
else
|
||||
echo "[ERROR] [installer] [register] Registration failed - check token validity and server connectivity"
|
||||
echo "[WARN] [installer] [register] Agent installed but not registered. Service will not start."
|
||||
echo ""
|
||||
echo "[INFO] [installer] [register] To retry registration manually:"
|
||||
echo "[INFO] [installer] [register] sudo -u {{.AgentUser}} ${INSTALL_DIR}/${SERVICE_NAME} --server {{.ServerURL}} --token YOUR_TOKEN --register"
|
||||
echo "[INFO] [installer] [register] Then start service:"
|
||||
echo "[INFO] [installer] [register] sudo systemctl start ${SERVICE_NAME}"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
echo "[INFO] [installer] [register] No registration token provided - skipping registration"
|
||||
echo "[INFO] [installer] [register] Service will start but agent will exit until registered"
|
||||
echo "[INFO] [installer] [register] To register manually:"
|
||||
echo "[INFO] [installer] [register] sudo -u {{.AgentUser}} ${INSTALL_DIR}/${SERVICE_NAME} --server {{.ServerURL}} --token YOUR_TOKEN --register"
|
||||
fi
|
||||
|
||||
# Step 9: Enable and start service
|
||||
echo "Enabling and starting service..."
|
||||
|
||||
@@ -198,6 +198,31 @@ if (Test-Path $ConfigPath) {
|
||||
Write-Host "Setting file permissions..." -ForegroundColor Yellow
|
||||
icacls $ConfigPath /inheritance:r /grant:r "SYSTEM:(OI)(CI)F" /grant:r "Administrators:(OI)(CI)F" | Out-Null
|
||||
|
||||
# Register agent with server (if token provided)
|
||||
if ("{{.RegistrationToken}}" -ne "") {
|
||||
Write-Host "[INFO] [installer] [register] Registering agent with server..." -ForegroundColor Cyan
|
||||
$AgentBinary = if ($AgentPath) { "$AgentPath" } else { "$AgentDir\redflag-agent.exe" }
|
||||
$RegisterProcess = Start-Process -FilePath $AgentBinary -ArgumentList "--server", "{{.ServerURL}}", "--token", "{{.RegistrationToken}}", "--register" -Wait -PassThru -NoNewWindow
|
||||
if ($RegisterProcess.ExitCode -eq 0) {
|
||||
Write-Host "[SUCCESS] [installer] [register] Agent registered successfully" -ForegroundColor Green
|
||||
Write-Host "[INFO] [installer] [register] Agent ID assigned, configuration updated" -ForegroundColor Gray
|
||||
} else {
|
||||
Write-Host "[ERROR] [installer] [register] Registration failed - check token validity and server connectivity" -ForegroundColor Red
|
||||
Write-Host "[WARN] [installer] [register] Agent installed but not registered. Service will not start." -ForegroundColor Yellow
|
||||
Write-Host ""
|
||||
Write-Host "[INFO] [installer] [register] To retry registration manually:" -ForegroundColor Gray
|
||||
Write-Host "[INFO] [installer] [register] $AgentBinary --server {{.ServerURL}} --token YOUR_TOKEN --register" -ForegroundColor Gray
|
||||
Write-Host "[INFO] [installer] [register] Then start service:" -ForegroundColor Gray
|
||||
Write-Host "[INFO] [installer] [register] Start-Service -Name $ServiceName" -ForegroundColor Gray
|
||||
exit 1
|
||||
}
|
||||
} else {
|
||||
Write-Host "[INFO] [installer] [register] No registration token provided - skipping registration" -ForegroundColor Gray
|
||||
Write-Host "[INFO] [installer] [register] Service will start but agent will exit until registered" -ForegroundColor Gray
|
||||
Write-Host "[INFO] [installer] [register] To register manually:" -ForegroundColor Gray
|
||||
Write-Host "[INFO] [installer] [register] $AgentBinary --server {{.ServerURL}} --token YOUR_TOKEN --register" -ForegroundColor Gray
|
||||
}
|
||||
|
||||
# Step 6: Install Windows service (if not skipped)
|
||||
if (-not $SkipServiceInstall) {
|
||||
Write-Host "Creating Windows service..." -ForegroundColor Yellow
|
||||
|
||||
Reference in New Issue
Block a user