fix(database): B-1 schema integrity and migration fixes

- Fix migration 024 self-insert and bad column reference (F-B1-1, F-B1-2)
  Uses existing enabled/auto_run columns instead of non-existent deprecated
- Abort server on migration failure instead of warning (F-B1-11)
  main.go now calls log.Fatalf, prints [INFO] only on success
- Fix migration 018 scanner_config filename suffix (F-B1-3)
  Renumbered to 027 with .up.sql suffix
- Remove GRANT to non-existent role in scanner_config (F-B1-4)
- Resolve duplicate migration numbers 009 and 012 (F-B1-13)
  Renamed to 009b and 012b for unique lexical sorting
- Add IF NOT EXISTS to all non-idempotent migrations (F-B1-15)
  Fixed: 011, 012, 017, 023, 023a
- Replace N+1 dashboard stats loop with GetAllUpdateStats (F-B1-6)
  Single aggregate query replaces per-agent loop
- Add composite index on agent_commands(status, sent_at) (F-B1-5)
  New migration 028 with partial index for timeout service
- Add background refresh token cleanup goroutine (F-B1-10)
  24-hour ticker calls CleanupExpiredTokens
- ETHOS log format in migration runner (no emojis)

All 55 tests pass (41 server + 14 agent). No regressions.
See docs/B1_Fix_Implementation.md and DEV-025 through DEV-028.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
2026-03-29 07:03:35 -04:00
parent ab676c3b83
commit ec0d880036
33 changed files with 420 additions and 537 deletions

View File

@@ -2,6 +2,7 @@ package database
import (
"fmt"
"log"
"os"
"path/filepath"
"sort"
@@ -72,7 +73,7 @@ func (db *DB) Migrate(migrationsPath string) error {
}
if count > 0 {
fmt.Printf("→ Skipping migration (already applied): %s\n", filename)
log.Printf("[INFO] [server] [database] migration_skipped version=%s already_applied=true", filename)
continue
}
@@ -104,7 +105,7 @@ func (db *DB) Migrate(migrationsPath string) error {
checkErr := db.Get(&count, "SELECT COUNT(*) FROM schema_migrations WHERE version = $1", filename)
if checkErr == nil && count > 0 {
// Migration was already applied, just skip it
fmt.Printf("⚠ Migration %s already applied, skipping\n", filename)
log.Printf("[INFO] [server] [database] migration_already_applied version=%s", filename)
} else {
// Migration failed and wasn't applied - this is a real error
return fmt.Errorf("migration %s failed with 'already exists' but migration not recorded: %w", filename, err)
@@ -128,7 +129,7 @@ func (db *DB) Migrate(migrationsPath string) error {
return fmt.Errorf("failed to commit migration %s: %w", filename, err)
}
fmt.Printf("✓ Successfully executed migration: %s\n", filename)
log.Printf("[INFO] [server] [database] migration_applied version=%s", filename)
}
return nil

View File

@@ -79,7 +79,8 @@ func TestMigrationFailureReturnsError(t *testing.T) {
// ---------------------------------------------------------------------------
func TestServerStartsAfterMigrationFailure(t *testing.T) {
// Read main.go and inspect the migration error handling block
// POST-FIX (F-B1-11): main.go now aborts on migration failure.
// This test confirms the Warning pattern is gone.
mainPath := filepath.Join("..", "..", "cmd", "server", "main.go")
content, err := os.ReadFile(mainPath)
if err != nil {
@@ -88,30 +89,22 @@ func TestServerStartsAfterMigrationFailure(t *testing.T) {
src := string(content)
// Find the migration error block
if !strings.Contains(src, "Warning: Migration failed") {
t.Fatal("[ERROR] [server] [database] cannot find migration error handling in main.go")
}
// The NORMAL startup migration error (not --migrate flag) logs a warning, NOT a fatal.
// Main.go has TWO migration paths:
// 1. --migrate flag (line ~183): log.Fatal — correct behavior
// 2. Normal startup (line ~191): fmt.Printf("Warning:...") — THIS IS THE BUG
// We specifically check the normal startup path.
// The old bug: fmt.Printf("Warning: Migration failed...") must be gone
if strings.Contains(src, `fmt.Printf("Warning: Migration failed`) {
t.Log("[INFO] [server] [database] F-B1-11 P0 confirmed: normal startup swallows migration errors")
} else {
t.Error("[ERROR] [server] [database] cannot find the migration error swallowing pattern")
t.Error("[ERROR] [server] [database] F-B1-11 NOT FIXED: main.go still swallows migration errors")
}
// [OK] is printed unconditionally after the if block
migrationBlock := extractBlock(src, "db.Migrate(migrationsPath)", `Database migrations completed`)
// Must now use log.Fatalf for migration failure
migrationBlock := extractBlock(src, "db.Migrate(migrationsPath)", `migrations_complete`)
if migrationBlock == "" {
t.Fatal("[ERROR] [server] [database] cannot find migration block in main.go")
}
t.Log("[INFO] [server] [database] F-B1-11 P0 confirmed: main.go swallows migration errors and prints [OK]")
t.Log("[INFO] [server] [database] after fix: migration failure must call log.Fatal or os.Exit(1)")
if !strings.Contains(migrationBlock, "log.Fatalf") {
t.Error("[ERROR] [server] [database] migration error handler does not use log.Fatalf")
}
t.Log("[INFO] [server] [database] F-B1-11 FIXED: migration failure now aborts server")
}
// ---------------------------------------------------------------------------
@@ -124,6 +117,7 @@ func TestServerStartsAfterMigrationFailure(t *testing.T) {
// ---------------------------------------------------------------------------
func TestServerMustAbortOnMigrationFailure(t *testing.T) {
// POST-FIX (F-B1-11): Confirms log.Fatalf is used for migration failure
mainPath := filepath.Join("..", "..", "cmd", "server", "main.go")
content, err := os.ReadFile(mainPath)
if err != nil {
@@ -131,14 +125,19 @@ func TestServerMustAbortOnMigrationFailure(t *testing.T) {
}
src := string(content)
// The normal startup migration error handler (NOT --migrate flag) should abort
// Currently it uses fmt.Printf("Warning:...") and continues
if strings.Contains(src, `fmt.Printf("Warning: Migration failed`) {
t.Errorf("[ERROR] [server] [database] normal startup swallows migration errors with Warning.\n"+
"F-B1-11 P0: main.go must call log.Fatal or os.Exit(1) on migration failure.\n"+
"The [OK] message must only print on genuine success.")
migrationBlock := extractBlock(src, "db.Migrate(migrationsPath)", `migrations_complete`)
if migrationBlock == "" {
t.Fatal("[ERROR] [server] [database] cannot find migration block")
}
if !strings.Contains(migrationBlock, "log.Fatalf") {
t.Errorf("[ERROR] [server] [database] migration error handler must use log.Fatalf")
}
// Success message must only appear after the error check
if strings.Contains(src, `fmt.Printf("Warning: Migration failed`) {
t.Errorf("[ERROR] [server] [database] old warning pattern still present")
}
t.Log("[INFO] [server] [database] F-B1-11 FIXED: server aborts on migration failure")
}
// ---------------------------------------------------------------------------
@@ -148,6 +147,7 @@ func TestServerMustAbortOnMigrationFailure(t *testing.T) {
// ---------------------------------------------------------------------------
func TestMigrationRunnerDetectsDuplicateNumbers(t *testing.T) {
// POST-FIX (F-B1-13): No duplicate migration numbers should exist.
migrationsPath := filepath.Join("migrations")
files, err := os.ReadDir(migrationsPath)
if err != nil {
@@ -155,6 +155,7 @@ func TestMigrationRunnerDetectsDuplicateNumbers(t *testing.T) {
}
// Extract numeric prefixes from .up.sql files
// Note: "009b" and "012b" are distinct from "009" and "012" — not duplicates
prefixCount := make(map[string][]string)
for _, file := range files {
if !strings.HasSuffix(file.Name(), ".up.sql") {
@@ -167,20 +168,18 @@ func TestMigrationRunnerDetectsDuplicateNumbers(t *testing.T) {
}
}
// Document duplicates
duplicates := 0
for prefix, names := range prefixCount {
if len(names) > 1 {
duplicates++
t.Logf("[WARNING] [server] [database] duplicate migration number %s: %v", prefix, names)
t.Errorf("[ERROR] [server] [database] duplicate migration prefix %s: %v", prefix, names)
}
}
if duplicates == 0 {
t.Error("[ERROR] [server] [database] F-B1-13 already fixed: no duplicate migration numbers found")
if duplicates > 0 {
t.Errorf("[ERROR] [server] [database] F-B1-13 NOT FIXED: %d duplicates remain", duplicates)
}
t.Logf("[INFO] [server] [database] F-B1-13 confirmed: %d duplicate migration numbers found", duplicates)
t.Log("[INFO] [server] [database] F-B1-13 FIXED: no duplicate migration numbers")
}
// ---------------------------------------------------------------------------
@@ -190,6 +189,8 @@ func TestMigrationRunnerDetectsDuplicateNumbers(t *testing.T) {
// ---------------------------------------------------------------------------
func TestMigrationRunnerShouldRejectDuplicateNumbers(t *testing.T) {
// POST-FIX (F-B1-13): All migration prefixes are unique.
// Duplicates resolved by renaming: 009→009b, 012→012b
migrationsPath := filepath.Join("migrations")
files, err := os.ReadDir(migrationsPath)
if err != nil {
@@ -209,11 +210,10 @@ func TestMigrationRunnerShouldRejectDuplicateNumbers(t *testing.T) {
for prefix, count := range prefixCount {
if count > 1 {
t.Errorf("[ERROR] [server] [database] migration number %s has %d files.\n"+
"F-B1-13: each migration number must be unique.\n"+
"After fix: renumber or merge duplicate migrations.", prefix, count)
t.Errorf("[ERROR] [server] [database] migration prefix %s has %d files", prefix, count)
}
}
t.Log("[INFO] [server] [database] F-B1-13 FIXED: all migration prefixes are unique")
}
// extractBlock extracts text between two markers in a source string

View File

@@ -2,7 +2,7 @@
CREATE EXTENSION IF NOT EXISTS "uuid-ossp";
-- Agents table
CREATE TABLE agents (
CREATE TABLE IF NOT EXISTS agents (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
hostname VARCHAR(255) NOT NULL,
os_type VARCHAR(50) NOT NULL CHECK (os_type IN ('windows', 'linux', 'macos')),
@@ -16,12 +16,12 @@ CREATE TABLE agents (
updated_at TIMESTAMP DEFAULT NOW()
);
CREATE INDEX idx_agents_status ON agents(status);
CREATE INDEX idx_agents_os_type ON agents(os_type);
CREATE INDEX idx_agents_last_seen ON agents(last_seen);
CREATE INDEX IF NOT EXISTS idx_agents_status ON agents(status);
CREATE INDEX IF NOT EXISTS idx_agents_os_type ON agents(os_type);
CREATE INDEX IF NOT EXISTS idx_agents_last_seen ON agents(last_seen);
-- Agent specs
CREATE TABLE agent_specs (
CREATE TABLE IF NOT EXISTS agent_specs (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
agent_id UUID REFERENCES agents(id) ON DELETE CASCADE,
cpu_model VARCHAR(255),
@@ -36,10 +36,10 @@ CREATE TABLE agent_specs (
collected_at TIMESTAMP DEFAULT NOW()
);
CREATE INDEX idx_agent_specs_agent_id ON agent_specs(agent_id);
CREATE INDEX IF NOT EXISTS idx_agent_specs_agent_id ON agent_specs(agent_id);
-- Update packages
CREATE TABLE update_packages (
CREATE TABLE IF NOT EXISTS update_packages (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
agent_id UUID REFERENCES agents(id) ON DELETE CASCADE,
package_type VARCHAR(50) NOT NULL,
@@ -63,14 +63,14 @@ CREATE TABLE update_packages (
UNIQUE(agent_id, package_type, package_name, available_version)
);
CREATE INDEX idx_updates_status ON update_packages(status);
CREATE INDEX idx_updates_agent ON update_packages(agent_id);
CREATE INDEX idx_updates_severity ON update_packages(severity);
CREATE INDEX idx_updates_package_type ON update_packages(package_type);
CREATE INDEX idx_updates_composite ON update_packages(status, severity, agent_id);
CREATE INDEX IF NOT EXISTS idx_updates_status ON update_packages(status);
CREATE INDEX IF NOT EXISTS idx_updates_agent ON update_packages(agent_id);
CREATE INDEX IF NOT EXISTS idx_updates_severity ON update_packages(severity);
CREATE INDEX IF NOT EXISTS idx_updates_package_type ON update_packages(package_type);
CREATE INDEX IF NOT EXISTS idx_updates_composite ON update_packages(status, severity, agent_id);
-- Update logs
CREATE TABLE update_logs (
CREATE TABLE IF NOT EXISTS update_logs (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
agent_id UUID REFERENCES agents(id) ON DELETE CASCADE,
update_package_id UUID REFERENCES update_packages(id) ON DELETE SET NULL,
@@ -83,21 +83,21 @@ CREATE TABLE update_logs (
executed_at TIMESTAMP DEFAULT NOW()
);
CREATE INDEX idx_logs_agent ON update_logs(agent_id);
CREATE INDEX idx_logs_result ON update_logs(result);
CREATE INDEX idx_logs_executed_at ON update_logs(executed_at DESC);
CREATE INDEX IF NOT EXISTS idx_logs_agent ON update_logs(agent_id);
CREATE INDEX IF NOT EXISTS idx_logs_result ON update_logs(result);
CREATE INDEX IF NOT EXISTS idx_logs_executed_at ON update_logs(executed_at DESC);
-- Agent tags
CREATE TABLE agent_tags (
CREATE TABLE IF NOT EXISTS agent_tags (
agent_id UUID REFERENCES agents(id) ON DELETE CASCADE,
tag VARCHAR(100) NOT NULL,
PRIMARY KEY (agent_id, tag)
);
CREATE INDEX idx_agent_tags_tag ON agent_tags(tag);
CREATE INDEX IF NOT EXISTS idx_agent_tags_tag ON agent_tags(tag);
-- Users (for authentication)
CREATE TABLE users (
CREATE TABLE IF NOT EXISTS users (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
username VARCHAR(255) UNIQUE NOT NULL,
email VARCHAR(255) UNIQUE NOT NULL,
@@ -107,11 +107,11 @@ CREATE TABLE users (
last_login TIMESTAMP
);
CREATE INDEX idx_users_username ON users(username);
CREATE INDEX idx_users_email ON users(email);
CREATE INDEX IF NOT EXISTS idx_users_username ON users(username);
CREATE INDEX IF NOT EXISTS idx_users_email ON users(email);
-- Commands queue (for agent orchestration)
CREATE TABLE agent_commands (
CREATE TABLE IF NOT EXISTS agent_commands (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
agent_id UUID REFERENCES agents(id) ON DELETE CASCADE,
command_type VARCHAR(50) NOT NULL,
@@ -123,5 +123,5 @@ CREATE TABLE agent_commands (
result JSONB
);
CREATE INDEX idx_commands_agent_status ON agent_commands(agent_id, status);
CREATE INDEX idx_commands_created_at ON agent_commands(created_at DESC);
CREATE INDEX IF NOT EXISTS idx_commands_agent_status ON agent_commands(agent_id, status);
CREATE INDEX IF NOT EXISTS idx_commands_created_at ON agent_commands(created_at DESC);

View File

@@ -2,13 +2,13 @@
-- This enables the hybrid version tracking system
ALTER TABLE agents
ADD COLUMN current_version VARCHAR(50) DEFAULT '0.0.0',
ADD COLUMN update_available BOOLEAN DEFAULT FALSE,
ADD COLUMN last_version_check TIMESTAMP DEFAULT CURRENT_TIMESTAMP;
ADD COLUMN IF NOT EXISTS current_version VARCHAR(50) DEFAULT '0.0.0',
ADD COLUMN IF NOT EXISTS update_available BOOLEAN DEFAULT FALSE,
ADD COLUMN IF NOT EXISTS last_version_check TIMESTAMP DEFAULT CURRENT_TIMESTAMP;
-- Add index for faster queries on update status
CREATE INDEX idx_agents_update_available ON agents(update_available);
CREATE INDEX idx_agents_current_version ON agents(current_version);
CREATE INDEX IF NOT EXISTS idx_agents_update_available ON agents(update_available);
CREATE INDEX IF NOT EXISTS idx_agents_current_version ON agents(current_version);
-- Add comment to document the purpose
COMMENT ON COLUMN agents.current_version IS 'The version of the agent currently running';

View File

@@ -3,7 +3,7 @@
-- Add retried_from_id column to link retries to their original commands
ALTER TABLE agent_commands
ADD COLUMN retried_from_id UUID REFERENCES agent_commands(id) ON DELETE SET NULL;
ADD COLUMN IF NOT EXISTS retried_from_id UUID REFERENCES agent_commands(id) ON DELETE SET NULL;
-- Add index for efficient retry chain lookups
CREATE INDEX idx_commands_retried_from ON agent_commands(retried_from_id) WHERE retried_from_id IS NOT NULL;
CREATE INDEX IF NOT EXISTS idx_commands_retried_from ON agent_commands(retried_from_id) WHERE retried_from_id IS NOT NULL;

View File

@@ -1,7 +1,7 @@
-- Registration tokens for secure agent enrollment
-- Tokens are one-time use and have configurable expiration
CREATE TABLE registration_tokens (
CREATE TABLE IF NOT EXISTS registration_tokens (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
token VARCHAR(64) UNIQUE NOT NULL, -- One-time use token
label VARCHAR(255), -- Optional label for token identification
@@ -23,10 +23,10 @@ CREATE TABLE registration_tokens (
);
-- Indexes for performance
CREATE INDEX idx_registration_tokens_token ON registration_tokens(token);
CREATE INDEX idx_registration_tokens_expires_at ON registration_tokens(expires_at);
CREATE INDEX idx_registration_tokens_status ON registration_tokens(status);
CREATE INDEX idx_registration_tokens_used_by_agent ON registration_tokens(used_by_agent_id) WHERE used_by_agent_id IS NOT NULL;
CREATE INDEX IF NOT EXISTS idx_registration_tokens_token ON registration_tokens(token);
CREATE INDEX IF NOT EXISTS idx_registration_tokens_expires_at ON registration_tokens(expires_at);
CREATE INDEX IF NOT EXISTS idx_registration_tokens_status ON registration_tokens(status);
CREATE INDEX IF NOT EXISTS idx_registration_tokens_used_by_agent ON registration_tokens(used_by_agent_id) WHERE used_by_agent_id IS NOT NULL;
-- Foreign key constraint for used_by_agent_id
ALTER TABLE registration_tokens

View File

@@ -3,8 +3,8 @@
-- Add seats columns
ALTER TABLE registration_tokens
ADD COLUMN max_seats INT NOT NULL DEFAULT 1,
ADD COLUMN seats_used INT NOT NULL DEFAULT 0;
ADD COLUMN IF NOT EXISTS max_seats INT NOT NULL DEFAULT 1,
ADD COLUMN IF NOT EXISTS seats_used INT NOT NULL DEFAULT 0;
-- Backfill existing tokens
-- Tokens with status='used' should have seats_used=1, max_seats=1
@@ -38,8 +38,8 @@ CREATE TABLE IF NOT EXISTS registration_token_usage (
UNIQUE(token_id, agent_id)
);
CREATE INDEX idx_token_usage_token_id ON registration_token_usage(token_id);
CREATE INDEX idx_token_usage_agent_id ON registration_token_usage(agent_id);
CREATE INDEX IF NOT EXISTS idx_token_usage_token_id ON registration_token_usage(token_id);
CREATE INDEX IF NOT EXISTS idx_token_usage_agent_id ON registration_token_usage(agent_id);
-- Backfill token usage table from existing used_by_agent_id
INSERT INTO registration_token_usage (token_id, agent_id, used_at)

View File

@@ -1,11 +1,11 @@
-- Add reboot tracking fields to agents table
ALTER TABLE agents
ADD COLUMN reboot_required BOOLEAN DEFAULT FALSE,
ADD COLUMN last_reboot_at TIMESTAMP,
ADD COLUMN reboot_reason TEXT DEFAULT '';
ADD COLUMN IF NOT EXISTS reboot_required BOOLEAN DEFAULT FALSE,
ADD COLUMN IF NOT EXISTS last_reboot_at TIMESTAMP,
ADD COLUMN IF NOT EXISTS reboot_reason TEXT DEFAULT '';
-- Add index for efficient querying of agents needing reboot
CREATE INDEX idx_agents_reboot_required ON agents(reboot_required) WHERE reboot_required = TRUE;
CREATE INDEX IF NOT EXISTS idx_agents_reboot_required ON agents(reboot_required) WHERE reboot_required = TRUE;
-- Add comment for documentation
COMMENT ON COLUMN agents.reboot_required IS 'Whether the agent host requires a reboot to complete updates';

View File

@@ -3,15 +3,22 @@
-- 'system' = automatically triggered by system operations (scans, installs, etc)
ALTER TABLE agent_commands
ADD COLUMN source VARCHAR(20) DEFAULT 'manual' NOT NULL;
ADD COLUMN IF NOT EXISTS source VARCHAR(20) DEFAULT 'manual' NOT NULL;
-- Add check constraint to ensure valid source values
ALTER TABLE agent_commands
ADD CONSTRAINT agent_commands_source_check
CHECK (source IN ('manual', 'system'));
DO $$
BEGIN
IF NOT EXISTS (
SELECT 1 FROM pg_constraint WHERE conname = 'agent_commands_source_check'
) THEN
ALTER TABLE agent_commands
ADD CONSTRAINT agent_commands_source_check
CHECK (source IN ('manual', 'system'));
END IF;
END $$;
-- Add index for filtering commands by source
CREATE INDEX idx_agent_commands_source ON agent_commands(source);
CREATE INDEX IF NOT EXISTS idx_agent_commands_source ON agent_commands(source);
-- Update comment
COMMENT ON COLUMN agent_commands.source IS 'Command origin: manual (user-initiated) or system (auto-triggered)';

View File

@@ -2,15 +2,15 @@
-- This enables Ed25519 binary signing and machine binding
ALTER TABLE agents
ADD COLUMN machine_id VARCHAR(64) UNIQUE,
ADD COLUMN public_key_fingerprint VARCHAR(16),
ADD COLUMN is_updating BOOLEAN DEFAULT false,
ADD COLUMN updating_to_version VARCHAR(50),
ADD COLUMN update_initiated_at TIMESTAMP;
ADD COLUMN IF NOT EXISTS machine_id VARCHAR(64) UNIQUE,
ADD COLUMN IF NOT EXISTS public_key_fingerprint VARCHAR(16),
ADD COLUMN IF NOT EXISTS is_updating BOOLEAN DEFAULT false,
ADD COLUMN IF NOT EXISTS updating_to_version VARCHAR(50),
ADD COLUMN IF NOT EXISTS update_initiated_at TIMESTAMP;
-- Create index for machine ID lookups
CREATE INDEX idx_agents_machine_id ON agents(machine_id);
CREATE INDEX idx_agents_public_key_fingerprint ON agents(public_key_fingerprint);
CREATE INDEX IF NOT EXISTS idx_agents_machine_id ON agents(machine_id);
CREATE INDEX IF NOT EXISTS idx_agents_public_key_fingerprint ON agents(public_key_fingerprint);
-- Add comment to document the new fields
COMMENT ON COLUMN agents.machine_id IS 'Unique machine identifier to bind agent binaries to specific hardware';
@@ -20,7 +20,7 @@ COMMENT ON COLUMN agents.updating_to_version IS 'Target version for ongoing upda
COMMENT ON COLUMN agents.update_initiated_at IS 'When the update process started';
-- Create table for storing signed update packages
CREATE TABLE agent_update_packages (
CREATE TABLE IF NOT EXISTS agent_update_packages (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
version VARCHAR(50) NOT NULL,
platform VARCHAR(50) NOT NULL, -- linux-amd64, linux-arm64, windows-amd64, etc.
@@ -35,9 +35,9 @@ CREATE TABLE agent_update_packages (
);
-- Add indexes for update packages
CREATE INDEX idx_agent_update_packages_version ON agent_update_packages(version);
CREATE INDEX idx_agent_update_packages_platform ON agent_update_packages(platform, architecture);
CREATE INDEX idx_agent_update_packages_active ON agent_update_packages(is_active);
CREATE INDEX IF NOT EXISTS idx_agent_update_packages_version ON agent_update_packages(version);
CREATE INDEX IF NOT EXISTS idx_agent_update_packages_platform ON agent_update_packages(platform, architecture);
CREATE INDEX IF NOT EXISTS idx_agent_update_packages_active ON agent_update_packages(is_active);
-- Add comments for update packages table
COMMENT ON TABLE agent_update_packages IS 'Stores signed agent binary packages for secure updates';

View File

@@ -7,7 +7,7 @@ DROP INDEX IF EXISTS idx_agents_machine_id;
-- Create unique index to prevent duplicate machine IDs (allows multiple NULLs)
-- Note: CONCURRENTLY removed to allow transaction-based migration
CREATE UNIQUE INDEX idx_agents_machine_id_unique ON agents(machine_id) WHERE machine_id IS NOT NULL;
CREATE UNIQUE INDEX IF NOT EXISTS idx_agents_machine_id_unique ON agents(machine_id) WHERE machine_id IS NOT NULL;
-- Add comment for documentation
COMMENT ON COLUMN agents.machine_id IS 'SHA-256 hash of hardware fingerprint (prevents agent impersonation via config copying)';

View File

@@ -1,34 +0,0 @@
-- migration 018: Create scanner_config table for user-configurable scanner timeouts
-- This enables admin users to adjust scanner timeouts per subsystem via web UI
CREATE TABLE IF NOT EXISTS scanner_config (
scanner_name VARCHAR(50) PRIMARY KEY,
timeout_ms BIGINT NOT NULL, -- Timeout in milliseconds
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL,
CHECK (timeout_ms > 0 AND timeout_ms <= 7200000) -- Max 2 hours (7200000ms)
);
COMMENT ON TABLE scanner_config IS 'Stores user-configurable scanner timeout values';
COMMENT ON COLUMN scanner_config.scanner_name IS 'Name of the scanner (dnf, apt, docker, etc.)';
COMMENT ON COLUMN scanner_config.timeout_ms IS 'Timeout in milliseconds (1s = 1000ms)';
COMMENT ON COLUMN scanner_config.updated_at IS 'When this configuration was last modified';
-- Create index on updated_at for efficient querying of recently changed configs
CREATE INDEX IF NOT EXISTS idx_scanner_config_updated_at ON scanner_config(updated_at);
-- Insert default timeout values for all scanners
-- 30 minutes (1800000ms) is the new default for package scanners
INSERT INTO scanner_config (scanner_name, timeout_ms) VALUES
('system', 10000), -- 10 seconds for system metrics
('storage', 10000), -- 10 seconds for storage scan
('apt', 1800000), -- 30 minutes for APT
('dnf', 1800000), -- 30 minutes for DNF
('docker', 60000), -- 60 seconds for Docker
('windows', 600000), -- 10 minutes for Windows Updates
('winget', 120000), -- 2 minutes for Winget
('updates', 30000) -- 30 seconds for virtual update subsystem
ON CONFLICT (scanner_name) DO NOTHING;
-- Grant permissions
GRANT SELECT, INSERT, UPDATE, DELETE ON scanner_config TO redflag_user;

View File

@@ -14,21 +14,21 @@ CREATE TABLE IF NOT EXISTS system_events (
);
-- Performance indexes for common query patterns
CREATE INDEX idx_system_events_agent_id ON system_events(agent_id);
CREATE INDEX idx_system_events_type_subtype ON system_events(event_type, event_subtype);
CREATE INDEX idx_system_events_created_at ON system_events(created_at DESC);
CREATE INDEX idx_system_events_severity ON system_events(severity);
CREATE INDEX idx_system_events_component ON system_events(component);
CREATE INDEX IF NOT EXISTS idx_system_events_agent_id ON system_events(agent_id);
CREATE INDEX IF NOT EXISTS idx_system_events_type_subtype ON system_events(event_type, event_subtype);
CREATE INDEX IF NOT EXISTS idx_system_events_created_at ON system_events(created_at DESC);
CREATE INDEX IF NOT EXISTS idx_system_events_severity ON system_events(severity);
CREATE INDEX IF NOT EXISTS idx_system_events_component ON system_events(component);
-- Composite index for agent timeline queries (agent + time range)
CREATE INDEX idx_system_events_agent_timeline ON system_events(agent_id, created_at DESC);
CREATE INDEX IF NOT EXISTS idx_system_events_agent_timeline ON system_events(agent_id, created_at DESC);
-- Partial index for error events (faster error dashboard queries)
CREATE INDEX idx_system_events_errors ON system_events(severity, created_at DESC)
CREATE INDEX IF NOT EXISTS idx_system_events_errors ON system_events(severity, created_at DESC)
WHERE severity IN ('error', 'critical');
-- GIN index for metadata JSONB queries (allows searching event metadata)
CREATE INDEX idx_system_events_metadata_gin ON system_events USING GIN(metadata);
CREATE INDEX IF NOT EXISTS idx_system_events_metadata_gin ON system_events USING GIN(metadata);
-- Comment for documentation
COMMENT ON TABLE system_events IS 'Unified event logging table for all system events (agent + server)';

View File

@@ -1,7 +1,7 @@
-- Create dedicated storage_metrics table for proper storage tracking
-- This replaces the misuse of metrics table for storage data
CREATE TABLE storage_metrics (
CREATE TABLE IF NOT EXISTS storage_metrics (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
agent_id UUID NOT NULL REFERENCES agents(id) ON DELETE CASCADE,
mountpoint VARCHAR(255) NOT NULL,
@@ -18,7 +18,7 @@ CREATE TABLE storage_metrics (
);
-- Indexes for performance
CREATE INDEX idx_storage_metrics_agent_id ON storage_metrics(agent_id);
CREATE INDEX idx_storage_metrics_created_at ON storage_metrics(created_at DESC);
CREATE INDEX idx_storage_metrics_mountpoint ON storage_metrics(mountpoint);
CREATE INDEX idx_storage_metrics_agent_mount ON storage_metrics(agent_id, mountpoint, created_at DESC);
CREATE INDEX IF NOT EXISTS idx_storage_metrics_agent_id ON storage_metrics(agent_id);
CREATE INDEX IF NOT EXISTS idx_storage_metrics_created_at ON storage_metrics(created_at DESC);
CREATE INDEX IF NOT EXISTS idx_storage_metrics_mountpoint ON storage_metrics(mountpoint);
CREATE INDEX IF NOT EXISTS idx_storage_metrics_agent_mount ON storage_metrics(agent_id, mountpoint, created_at DESC);

View File

@@ -1,7 +1,7 @@
-- Migration 023: Client Error Logging Schema
-- Implements ETHOS #1: Errors are History, Not /dev/null
CREATE TABLE client_errors (
CREATE TABLE IF NOT EXISTS client_errors (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
agent_id UUID REFERENCES agents(id) ON DELETE SET NULL,
subsystem VARCHAR(50) NOT NULL,
@@ -15,10 +15,10 @@ CREATE TABLE client_errors (
);
-- Indexes for efficient querying
CREATE INDEX idx_client_errors_agent_time ON client_errors(agent_id, created_at DESC);
CREATE INDEX idx_client_errors_subsystem_time ON client_errors(subsystem, created_at DESC);
CREATE INDEX idx_client_errors_error_type_time ON client_errors(error_type, created_at DESC);
CREATE INDEX idx_client_errors_created_at ON client_errors(created_at DESC);
CREATE INDEX IF NOT EXISTS idx_client_errors_agent_time ON client_errors(agent_id, created_at DESC);
CREATE INDEX IF NOT EXISTS idx_client_errors_subsystem_time ON client_errors(subsystem, created_at DESC);
CREATE INDEX IF NOT EXISTS idx_client_errors_error_type_time ON client_errors(error_type, created_at DESC);
CREATE INDEX IF NOT EXISTS idx_client_errors_created_at ON client_errors(created_at DESC);
-- Comments for documentation
COMMENT ON TABLE client_errors IS 'Frontend error logs for debugging and auditing. Implements ETHOS #1.';

View File

@@ -2,13 +2,13 @@
-- Prevents multiple pending scan commands per subsystem per agent
-- Add unique constraint to enforce single pending command per subsystem
CREATE UNIQUE INDEX idx_agent_pending_subsystem
CREATE UNIQUE INDEX IF NOT EXISTS idx_agent_pending_subsystem
ON agent_commands(agent_id, command_type, status)
WHERE status = 'pending';
-- Add idempotency key support for retry scenarios
ALTER TABLE agent_commands ADD COLUMN idempotency_key VARCHAR(64) UNIQUE NULL;
CREATE INDEX idx_agent_commands_idempotency_key ON agent_commands(idempotency_key);
ALTER TABLE agent_commands ADD COLUMN IF NOT EXISTS idempotency_key VARCHAR(64) UNIQUE NULL;
CREATE INDEX IF NOT EXISTS idx_agent_commands_idempotency_key ON agent_commands(idempotency_key);
-- Comments for documentation
COMMENT ON TABLE agent_commands IS 'Commands sent to agents for execution';

View File

@@ -1,7 +1,6 @@
-- Re-enable updates subsystem (rollback)
-- Migration 024 rollback: Re-enable updates subsystem
UPDATE agent_subsystems
SET enabled = true,
auto_run = false,
deprecated = false,
updated_at = NOW()
WHERE subsystem = 'updates';

View File

@@ -1,19 +1,12 @@
-- Migration: Disable legacy updates subsystem
-- Migration 024: Disable legacy updates subsystem
-- Purpose: Clean up from monolithic scan_updates to individual scanners
-- Version: 0.1.28
-- Date: 2025-12-22
-- Fixed: removed self-insert into schema_migrations (F-B1-1)
-- Fixed: removed reference to non-existent deprecated column (F-B1-2)
-- Disable all 'updates' subsystems (legacy monolithic scanner)
-- Uses existing enabled/auto_run columns (no deprecated column needed)
UPDATE agent_subsystems
SET enabled = false,
auto_run = false,
deprecated = true,
updated_at = NOW()
WHERE subsystem = 'updates';
-- Add comment tracking this migration
COMMENT ON TABLE agent_subsystems IS 'Agent subsystems configuration. Legacy updates subsystem disabled in v0.1.28';
-- Log migration completion
INSERT INTO schema_migrations (version) VALUES
('024_disable_updates_subsystem.up.sql');

View File

@@ -0,0 +1,2 @@
-- Migration 027 rollback: Remove scanner_config table
DROP TABLE IF EXISTS scanner_config;

View File

@@ -0,0 +1,25 @@
-- Migration 027: Create scanner_config table for user-configurable scanner timeouts
-- Renumbered from 018 (F-B1-3: wrong file suffix, F-B1-13: duplicate number)
-- Fixed: removed GRANT to non-existent role (F-B1-4)
-- Fixed: added IF NOT EXISTS for idempotency (ETHOS #4)
CREATE TABLE IF NOT EXISTS scanner_config (
scanner_name VARCHAR(50) PRIMARY KEY,
timeout_ms BIGINT NOT NULL,
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL,
CHECK (timeout_ms > 0 AND timeout_ms <= 7200000)
);
CREATE INDEX IF NOT EXISTS idx_scanner_config_updated_at ON scanner_config(updated_at);
-- Insert default timeout values for all scanners
INSERT INTO scanner_config (scanner_name, timeout_ms) VALUES
('system', 10000),
('storage', 10000),
('apt', 1800000),
('dnf', 1800000),
('docker', 60000),
('windows', 600000),
('winget', 120000),
('updates', 30000)
ON CONFLICT (scanner_name) DO NOTHING;

View File

@@ -0,0 +1,2 @@
-- Migration 028 rollback
DROP INDEX IF EXISTS idx_agent_commands_status_sent_at;

View File

@@ -0,0 +1,6 @@
-- Migration 028: Add index for GetStuckCommands query (F-B1-5 fix)
-- Covers the (status, sent_at) pattern used by the timeout service
CREATE INDEX IF NOT EXISTS idx_agent_commands_status_sent_at
ON agent_commands(status, sent_at)
WHERE status IN ('pending', 'sent');

View File

@@ -53,6 +53,8 @@ func checkIdempotency(src string) (violations int, details []string) {
// ---------------------------------------------------------------------------
func TestMigrationsHaveIdempotencyViolations(t *testing.T) {
// POST-FIX (F-B1-15): All migrations should now be idempotent.
// This test confirms no violations remain.
files, err := os.ReadDir(".")
if err != nil {
t.Fatalf("failed to read migrations directory: %v", err)
@@ -64,10 +66,6 @@ func TestMigrationsHaveIdempotencyViolations(t *testing.T) {
if !strings.HasSuffix(f.Name(), ".up.sql") {
continue
}
// Skip A-series migrations (025, 026) which are already idempotent
if strings.HasPrefix(f.Name(), "025_") || strings.HasPrefix(f.Name(), "026_") {
continue
}
content, err := os.ReadFile(f.Name())
if err != nil {
@@ -84,11 +82,10 @@ func TestMigrationsHaveIdempotencyViolations(t *testing.T) {
}
}
if totalViolations == 0 {
t.Error("[ERROR] [server] [database] F-B1-15 already fixed: no idempotency violations found")
if totalViolations > 0 {
t.Errorf("[ERROR] [server] [database] %d idempotency violations remain", totalViolations)
}
t.Logf("[INFO] [server] [database] F-B1-15 confirmed: %d idempotency violations in pre-A-series migrations", totalViolations)
t.Log("[INFO] [server] [database] F-B1-15 FIXED: all migrations are idempotent")
}
// ---------------------------------------------------------------------------

View File

@@ -1,12 +1,9 @@
package migrations_test
// index_audit_test.go — Pre-fix tests for missing database indexes.
// index_audit_test.go — Tests for missing database indexes.
//
// F-B1-5 MEDIUM: GetStuckCommands filters on status + sent_at across all agents.
// No composite index covers (status, sent_at) on agent_commands.
// Full table scan on every timeout check.
//
// Run: cd aggregator-server && go test ./internal/database/migrations/... -v -run TestStuckCommands
// F-B1-5 FIXED: Migration 028 adds composite index on
// agent_commands(status, sent_at) for GetStuckCommands.
import (
"os"
@@ -14,59 +11,39 @@ import (
"testing"
)
// ---------------------------------------------------------------------------
// Test 6.1 — Documents missing index for GetStuckCommands (F-B1-5)
//
// Category: PASS-NOW (documents the bug)
// ---------------------------------------------------------------------------
func TestStuckCommandsIndexIsMissing(t *testing.T) {
// POST-FIX: index on agent_commands(status, sent_at) must exist
files, err := os.ReadDir(".")
if err != nil {
t.Fatalf("failed to read migrations directory: %v", err)
}
// Search all migration files for a CREATE INDEX on agent_commands that covers sent_at
foundIndex := false
for _, f := range files {
if !strings.HasSuffix(f.Name(), ".up.sql") && !strings.HasSuffix(f.Name(), ".sql") {
if !strings.HasSuffix(f.Name(), ".up.sql") {
continue
}
content, err := os.ReadFile(f.Name())
if err != nil {
continue
}
// Split into individual statements and check each CREATE INDEX
src := string(content)
lines := strings.Split(src, ";")
for _, stmt := range lines {
stmts := strings.Split(string(content), ";")
for _, stmt := range stmts {
lower := strings.ToLower(stmt)
// Must be a CREATE INDEX on agent_commands that specifically includes sent_at
if strings.Contains(lower, "create index") &&
strings.Contains(lower, "agent_commands") &&
strings.Contains(lower, "sent_at") {
foundIndex = true
t.Logf("[INFO] [server] [database] found agent_commands sent_at index in %s", f.Name())
}
}
}
if foundIndex {
t.Error("[ERROR] [server] [database] F-B1-5 already fixed: " +
"index on agent_commands(sent_at) exists")
if !foundIndex {
t.Error("[ERROR] [server] [database] F-B1-5 NOT FIXED: no index on agent_commands(status, sent_at)")
}
t.Log("[INFO] [server] [database] F-B1-5 confirmed: no index on agent_commands covering sent_at")
t.Log("[INFO] [server] [database] GetStuckCommands does full table scan on timeout checks")
t.Log("[INFO] [server] [database] F-B1-5 FIXED: stuck commands index exists")
}
// ---------------------------------------------------------------------------
// Test 6.2 — Composite index for stuck commands must exist (assert fix)
//
// Category: FAIL-NOW / PASS-AFTER-FIX
// ---------------------------------------------------------------------------
func TestStuckCommandsIndexExists(t *testing.T) {
files, err := os.ReadDir(".")
if err != nil {
@@ -75,14 +52,13 @@ func TestStuckCommandsIndexExists(t *testing.T) {
foundIndex := false
for _, f := range files {
if !strings.HasSuffix(f.Name(), ".up.sql") && !strings.HasSuffix(f.Name(), ".sql") {
if !strings.HasSuffix(f.Name(), ".up.sql") {
continue
}
content, err := os.ReadFile(f.Name())
if err != nil {
continue
}
stmts := strings.Split(string(content), ";")
for _, stmt := range stmts {
lower := strings.ToLower(stmt)
@@ -96,8 +72,7 @@ func TestStuckCommandsIndexExists(t *testing.T) {
if !foundIndex {
t.Errorf("[ERROR] [server] [database] no index on agent_commands covering sent_at.\n" +
"F-B1-5: GetStuckCommands needs a composite index on (status, sent_at).\n" +
"After fix: add CREATE INDEX IF NOT EXISTS idx_agent_commands_stuck\n" +
"ON agent_commands(status, sent_at) WHERE status IN ('pending', 'sent')")
"F-B1-5: GetStuckCommands needs index on (status, sent_at).")
}
t.Log("[INFO] [server] [database] F-B1-5 FIXED: stuck commands index exists")
}

View File

@@ -1,15 +1,11 @@
package migrations_test
// migration018_test.go — Pre-fix tests for migration 018 filename bug.
// migration018_test.go — Tests for scanner_config migration fixes.
//
// F-B1-3 HIGH: 018_create_scanner_config_table.sql has no .up.sql suffix.
// The migration runner only processes *.up.sql files (db.go:59).
// scanner_config table is NEVER created by the migration runner.
// F-B1-3 FIXED: Renamed from 018_create_scanner_config_table.sql (no .up.sql suffix)
// to 027_create_scanner_config_table.up.sql (correct suffix, unique number).
//
// F-B1-4 HIGH: GRANT references role `redflag_user` which does not exist.
// The default database user is `redflag`.
//
// Run: cd aggregator-server && go test ./internal/database/migrations/... -v -run TestMigration018
// F-B1-4 FIXED: GRANT to non-existent role `redflag_user` removed.
import (
"os"
@@ -17,50 +13,25 @@ import (
"testing"
)
// ---------------------------------------------------------------------------
// Test 3.1 — scanner_config migration has wrong file suffix (documents F-B1-3)
//
// Category: PASS-NOW (documents the bug)
// ---------------------------------------------------------------------------
func TestMigration018ScannerConfigHasWrongSuffix(t *testing.T) {
// POST-FIX: old .sql file must be gone, new .up.sql must exist
files, err := os.ReadDir(".")
if err != nil {
t.Fatalf("failed to read migrations directory: %v", err)
}
hasWrongSuffix := false
hasCorrectSuffix := false
for _, f := range files {
if f.Name() == "018_create_scanner_config_table.sql" {
hasWrongSuffix = true
}
if f.Name() == "018_create_scanner_config_table.up.sql" {
hasCorrectSuffix = true
t.Error("[ERROR] [server] [database] F-B1-3 NOT FIXED: old .sql file still exists")
return
}
}
if !hasWrongSuffix {
t.Error("[ERROR] [server] [database] F-B1-3 already fixed: " +
"018_create_scanner_config_table.sql no longer exists")
}
if hasCorrectSuffix {
t.Error("[ERROR] [server] [database] F-B1-3 already fixed: " +
"018_create_scanner_config_table.up.sql now exists")
}
t.Log("[INFO] [server] [database] F-B1-3 confirmed: scanner_config migration has .sql suffix (not .up.sql)")
t.Log("[INFO] [server] [database] the migration runner skips this file; scanner_config table is never created")
t.Log("[INFO] [server] [database] F-B1-3 FIXED: old 018_create_scanner_config_table.sql removed")
}
// ---------------------------------------------------------------------------
// Test 3.2 — scanner_config migration should have correct suffix (assert fix)
//
// Category: FAIL-NOW / PASS-AFTER-FIX
// ---------------------------------------------------------------------------
func TestMigration018ScannerConfigHasCorrectSuffix(t *testing.T) {
// POST-FIX: scanner_config migration must exist with .up.sql suffix
// (renumbered to 027)
files, err := os.ReadDir(".")
if err != nil {
t.Fatalf("failed to read migrations directory: %v", err)
@@ -68,43 +39,29 @@ func TestMigration018ScannerConfigHasCorrectSuffix(t *testing.T) {
found := false
for _, f := range files {
if f.Name() == "018_create_scanner_config_table.up.sql" {
if f.Name() == "027_create_scanner_config_table.up.sql" {
found = true
break
}
}
if !found {
t.Errorf("[ERROR] [server] [database] 018_create_scanner_config_table.up.sql not found.\n"+
"F-B1-3: scanner_config migration must have .up.sql suffix for the runner to process it.\n"+
"After fix: rename 018_create_scanner_config_table.sql to 018_create_scanner_config_table.up.sql")
t.Errorf("[ERROR] [server] [database] 027_create_scanner_config_table.up.sql not found.\n" +
"F-B1-3: scanner_config migration must have .up.sql suffix.")
}
t.Log("[INFO] [server] [database] F-B1-3 FIXED: scanner_config migration has correct suffix (027)")
}
// ---------------------------------------------------------------------------
// Test 3.3 — scanner_config migration should not GRANT to wrong role (F-B1-4)
//
// Category: FAIL-NOW / PASS-AFTER-FIX
// ---------------------------------------------------------------------------
func TestMigration018ScannerConfigHasNoGrantToWrongRole(t *testing.T) {
// Try both possible filenames
var content []byte
var err error
content, err = os.ReadFile("018_create_scanner_config_table.up.sql")
// POST-FIX: no GRANT to redflag_user in the scanner_config migration
content, err := os.ReadFile("027_create_scanner_config_table.up.sql")
if err != nil {
content, err = os.ReadFile("018_create_scanner_config_table.sql")
if err != nil {
t.Fatalf("failed to read scanner_config migration (tried both suffixes): %v", err)
}
t.Fatalf("failed to read scanner_config migration: %v", err)
}
src := string(content)
if strings.Contains(src, "redflag_user") {
t.Errorf("[ERROR] [server] [database] scanner_config migration GRANTs to non-existent role `redflag_user`.\n"+
"F-B1-4: the default database user is `redflag`, not `redflag_user`.\n"+
"After fix: use correct role name or remove the GRANT statement.")
if strings.Contains(string(content), "redflag_user") {
t.Errorf("[ERROR] [server] [database] scanner_config migration GRANTs to non-existent role.\n" +
"F-B1-4: GRANT to redflag_user must be removed.")
}
t.Log("[INFO] [server] [database] F-B1-4 FIXED: no GRANT to wrong role")
}

View File

@@ -1,15 +1,10 @@
package migrations_test
// migration024_test.go — Pre-fix tests for migration 024 bugs.
// migration024_test.go — Tests for migration 024 fixes.
//
// F-B1-1 CRITICAL: Migration 024 self-inserts into schema_migrations,
// causing duplicate key when the runner also inserts.
// 024_disable_updates_subsystem.up.sql:18-19
//
// F-B1-2 CRITICAL: Migration 024 references non-existent `deprecated`
// column on agent_subsystems. The column was never added.
//
// Run: cd aggregator-server && go test ./internal/database/migrations/... -v -run TestMigration024
// F-B1-1 FIXED: Self-insert into schema_migrations removed.
// F-B1-2 FIXED: Non-existent `deprecated` column reference removed.
// Migration now uses existing enabled/auto_run columns.
import (
"os"
@@ -17,126 +12,71 @@ import (
"testing"
)
// ---------------------------------------------------------------------------
// Test 2.1 — Migration 024 contains self-insert (documents F-B1-1)
//
// Category: PASS-NOW (documents the bug)
// ---------------------------------------------------------------------------
func TestMigration024HasSelfInsert(t *testing.T) {
// POST-FIX: migration 024 must NOT contain self-insert
content, err := os.ReadFile("024_disable_updates_subsystem.up.sql")
if err != nil {
t.Fatalf("failed to read migration 024: %v", err)
}
src := string(content)
if !strings.Contains(src, "INSERT INTO schema_migrations") {
t.Error("[ERROR] [server] [database] F-B1-1 already fixed: " +
"migration 024 no longer contains self-insert. Update this test.")
if strings.Contains(string(content), "INSERT INTO schema_migrations") {
t.Error("[ERROR] [server] [database] F-B1-1: migration 024 still contains self-insert")
}
t.Log("[INFO] [server] [database] F-B1-1 confirmed: migration 024 self-inserts into schema_migrations")
t.Log("[INFO] [server] [database] the runner also inserts, causing duplicate key violation")
t.Log("[INFO] [server] [database] result: migration 024 is never applied")
t.Log("[INFO] [server] [database] F-B1-1 FIXED: no self-insert in migration 024")
}
// ---------------------------------------------------------------------------
// Test 2.2 — Migration 024 should NOT have self-insert (assert fix)
//
// Category: FAIL-NOW / PASS-AFTER-FIX
// ---------------------------------------------------------------------------
func TestMigration024ShouldNotHaveSelfInsert(t *testing.T) {
content, err := os.ReadFile("024_disable_updates_subsystem.up.sql")
if err != nil {
t.Fatalf("failed to read migration 024: %v", err)
}
src := string(content)
if strings.Contains(src, "INSERT INTO schema_migrations") {
t.Errorf("[ERROR] [server] [database] migration 024 contains self-insert into schema_migrations.\n"+
"F-B1-1: the migration runner handles schema_migrations tracking.\n"+
"Migration SQL must not manage its own tracking entry.\n"+
"After fix: remove the INSERT INTO schema_migrations line.")
if strings.Contains(string(content), "INSERT INTO schema_migrations") {
t.Errorf("[ERROR] [server] [database] migration 024 contains self-insert into schema_migrations.\n" +
"F-B1-1: the migration runner handles schema_migrations tracking.")
}
}
// ---------------------------------------------------------------------------
// Test 2.3 — Migration 024 references `deprecated` column (documents F-B1-2)
//
// Category: PASS-NOW (documents the bug)
// ---------------------------------------------------------------------------
func TestMigration024ReferencesDeprecatedColumn(t *testing.T) {
// POST-FIX: migration 024 must NOT reference `deprecated` column
content, err := os.ReadFile("024_disable_updates_subsystem.up.sql")
if err != nil {
t.Fatalf("failed to read migration 024: %v", err)
}
src := string(content)
if !strings.Contains(src, "deprecated") {
t.Error("[ERROR] [server] [database] F-B1-2 already fixed: " +
"migration 024 no longer references `deprecated` column")
// Check for "deprecated" as a column SET, not in comments
lines := strings.Split(string(content), "\n")
for _, line := range lines {
trimmed := strings.TrimSpace(line)
if strings.HasPrefix(trimmed, "--") {
continue
}
if strings.Contains(strings.ToLower(trimmed), "deprecated") {
t.Error("[ERROR] [server] [database] F-B1-2: migration 024 still references deprecated column")
return
}
}
t.Log("[INFO] [server] [database] F-B1-2 confirmed: migration 024 sets `deprecated = true`")
t.Log("[INFO] [server] [database] but `deprecated` column does not exist on agent_subsystems")
t.Log("[INFO] [server] [database] F-B1-2 FIXED: no deprecated column reference in migration 024")
}
// ---------------------------------------------------------------------------
// Test 2.4 — `deprecated` column must be defined before migration 024 uses it
//
// Category: FAIL-NOW / PASS-AFTER-FIX
// ---------------------------------------------------------------------------
func TestMigration024ColumnExistsInSchema(t *testing.T) {
// Read migration 015 (creates agent_subsystems table)
// POST-FIX: migration 024 only uses columns that exist on agent_subsystems
// (enabled, auto_run, updated_at — all defined in migration 015)
content024, err := os.ReadFile("024_disable_updates_subsystem.up.sql")
if err != nil {
t.Fatalf("failed to read migration 024: %v", err)
}
content015, err := os.ReadFile("015_agent_subsystems.up.sql")
if err != nil {
t.Fatalf("failed to read migration 015: %v", err)
}
// Read migration 024
content024, err := os.ReadFile("024_disable_updates_subsystem.up.sql")
if err != nil {
t.Fatalf("failed to read migration 024: %v", err)
}
// Verify the columns 024 uses are in 015's CREATE TABLE
src015 := string(content015)
src024 := string(content024)
// Check if 024 uses `deprecated`
uses024 := strings.Contains(string(content024), "deprecated")
// Check if `deprecated` column is defined in 015 or any intermediate migration
// that touches agent_subsystems
definedInSchema := strings.Contains(string(content015), "deprecated")
// Also check if any migration between 015 and 024 adds a `deprecated` column
// to agent_subsystems specifically (not other tables)
files, _ := os.ReadDir(".")
for _, f := range files {
if !strings.HasSuffix(f.Name(), ".up.sql") {
continue
}
if f.Name() > "015" && f.Name() < "024" {
c, err := os.ReadFile(f.Name())
if err != nil {
continue
}
src := string(c)
// Must be ADD COLUMN deprecated on agent_subsystems, not other tables
if strings.Contains(src, "agent_subsystems") &&
strings.Contains(src, "ADD COLUMN") &&
strings.Contains(src, "deprecated") {
definedInSchema = true
}
// 024 sets enabled, auto_run, updated_at — all must be in 015
for _, col := range []string{"enabled", "auto_run", "updated_at"} {
if strings.Contains(src024, col) && !strings.Contains(src015, col) {
t.Errorf("[ERROR] [server] [database] migration 024 uses column %q not defined in 015", col)
}
}
if uses024 && !definedInSchema {
t.Errorf("[ERROR] [server] [database] migration 024 uses `deprecated` column but it is not defined.\n"+
"F-B1-2: the `deprecated` column must exist on agent_subsystems before migration 024 runs.\n"+
"After fix: either add the column in a prior migration or rewrite 024 to not use it.")
}
t.Log("[INFO] [server] [database] F-B1-2 FIXED: all columns used by 024 exist in schema")
}

View File

@@ -1,12 +1,9 @@
package database_test
// refresh_token_cleanup_test.go — Pre-fix tests for missing token cleanup.
// refresh_token_cleanup_test.go — Tests for background token cleanup.
//
// F-B1-10 MEDIUM: No automatic refresh token cleanup exists. The
// refresh_tokens table grows unbounded. CleanupExpiredTokens() is
// only callable via the admin endpoint, not by a background job.
//
// Run: cd aggregator-server && go test ./internal/database/... -v -run TestRefreshToken
// F-B1-10 FIXED: Background goroutine added to main.go that calls
// CleanupExpiredTokens every 24 hours.
import (
"os"
@@ -15,68 +12,36 @@ import (
"testing"
)
// ---------------------------------------------------------------------------
// Test 7.1 — Documents no background cleanup exists (F-B1-10)
//
// Category: PASS-NOW (documents the gap)
// ---------------------------------------------------------------------------
func TestNoBackgroundRefreshTokenCleanup(t *testing.T) {
// Search main.go for any goroutine that cleans up refresh tokens
// POST-FIX: background cleanup now exists in main.go
mainPath := filepath.Join("..", "..", "cmd", "server", "main.go")
content, err := os.ReadFile(mainPath)
if err != nil {
t.Fatalf("failed to read main.go: %v", err)
}
src := string(content)
src := strings.ToLower(string(content))
// Look for patterns indicating background token cleanup
patterns := []string{
"CleanupExpiredTokens",
"cleanup.*refresh",
"refresh.*cleanup",
"token.*cleanup",
"cleanup.*token",
if !strings.Contains(src, "cleanupexpiredtokens") {
t.Error("[ERROR] [server] [database] F-B1-10 NOT FIXED: no CleanupExpiredTokens call in main.go")
return
}
foundBackground := false
for _, p := range patterns {
if strings.Contains(strings.ToLower(src), strings.ToLower(p)) {
// Check if it's in a go routine or ticker (background context)
idx := strings.Index(strings.ToLower(src), strings.ToLower(p))
// Look backwards 200 chars for "go func" or "ticker" or "goroutine"
start := idx - 200
if start < 0 {
start = 0
}
context := src[start:idx]
if strings.Contains(context, "go func") ||
strings.Contains(context, "ticker") ||
strings.Contains(context, "goroutine") ||
strings.Contains(context, "Ticker") {
foundBackground = true
t.Logf("[INFO] [server] [database] background cleanup found near: %s", p)
}
}
// Check it's in a goroutine context
idx := strings.Index(src, "cleanupexpiredtokens")
start := idx - 300
if start < 0 {
start = 0
}
context := src[start:idx]
if !strings.Contains(context, "go func") && !strings.Contains(context, "ticker") {
t.Error("[ERROR] [server] [database] CleanupExpiredTokens exists but not in background context")
return
}
if foundBackground {
t.Error("[ERROR] [server] [database] F-B1-10 already fixed: " +
"background refresh token cleanup found in main.go")
}
t.Log("[INFO] [server] [database] F-B1-10 confirmed: no background refresh token cleanup exists")
t.Log("[INFO] [server] [database] CleanupExpiredTokens() is only reachable via admin endpoint")
t.Log("[INFO] [server] [database] refresh_tokens table grows unbounded until manually cleaned")
t.Log("[INFO] [server] [database] F-B1-10 FIXED: background refresh token cleanup exists")
}
// ---------------------------------------------------------------------------
// Test 7.2 — Background cleanup must exist (assert fix)
//
// Category: FAIL-NOW / PASS-AFTER-FIX
// ---------------------------------------------------------------------------
func TestBackgroundRefreshTokenCleanupExists(t *testing.T) {
mainPath := filepath.Join("..", "..", "cmd", "server", "main.go")
content, err := os.ReadFile(mainPath)
@@ -86,36 +51,11 @@ func TestBackgroundRefreshTokenCleanupExists(t *testing.T) {
src := strings.ToLower(string(content))
// After fix: main.go must contain a background goroutine or ticker
// that periodically calls token cleanup
hasCleanupInBackground := false
// Check for goroutine patterns near cleanup
if strings.Contains(src, "cleanupexpiredtokens") ||
strings.Contains(src, "cleanup_expired_tokens") ||
strings.Contains(src, "token_cleanup") {
// Look for "go func" or "ticker" in surrounding context
for _, marker := range []string{"cleanupexpiredtokens", "cleanup_expired_tokens", "token_cleanup"} {
idx := strings.Index(src, marker)
if idx == -1 {
continue
}
start := idx - 300
if start < 0 {
start = 0
}
context := src[start:idx]
if strings.Contains(context, "go func") ||
strings.Contains(context, "ticker") {
hasCleanupInBackground = true
}
}
}
if !hasCleanupInBackground {
if !strings.Contains(src, "refresh_token_cleanup") {
t.Errorf("[ERROR] [server] [database] no background refresh token cleanup found.\n" +
"F-B1-10: a background goroutine or scheduler entry must periodically\n" +
"call CleanupExpiredTokens() to prevent unbounded table growth.\n" +
"After fix: add a ticker-based cleanup in main.go startup.")
"F-B1-10: must periodically call CleanupExpiredTokens.")
return
}
t.Log("[INFO] [server] [database] F-B1-10 FIXED: background cleanup goroutine found")
}