WIP: Save current state - security subsystems, migrations, logging
This commit is contained in:
135
aggregator-agent/internal/event/buffer.go
Normal file
135
aggregator-agent/internal/event/buffer.go
Normal file
@@ -0,0 +1,135 @@
|
||||
package event
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/google/uuid"
|
||||
|
||||
"sync"
|
||||
|
||||
"github.com/Fimeg/RedFlag/aggregator-agent/internal/models"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultMaxBufferSize = 1000 // Max events to buffer
|
||||
)
|
||||
|
||||
// Buffer handles local event buffering for offline resilience
|
||||
type Buffer struct {
|
||||
filePath string
|
||||
maxSize int
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
// NewBuffer creates a new event buffer with the specified file path
|
||||
func NewBuffer(filePath string) *Buffer {
|
||||
return &Buffer{
|
||||
filePath: filePath,
|
||||
maxSize: defaultMaxBufferSize,
|
||||
}
|
||||
}
|
||||
|
||||
// BufferEvent saves an event to the local buffer file
|
||||
func (b *Buffer) BufferEvent(event *models.SystemEvent) error {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
// Ensure event has an ID
|
||||
if event.ID == uuid.Nil {
|
||||
return fmt.Errorf("event ID cannot be nil")
|
||||
}
|
||||
|
||||
// Create directory if needed
|
||||
dir := filepath.Dir(b.filePath)
|
||||
if err := os.MkdirAll(dir, 0755); err != nil {
|
||||
return fmt.Errorf("failed to create buffer directory: %w", err)
|
||||
}
|
||||
|
||||
// Read existing buffer
|
||||
var events []*models.SystemEvent
|
||||
if data, err := os.ReadFile(b.filePath); err == nil {
|
||||
if err := json.Unmarshal(data, &events); err != nil {
|
||||
// If we can't unmarshal, start fresh
|
||||
events = []*models.SystemEvent{}
|
||||
}
|
||||
}
|
||||
|
||||
// Append new event
|
||||
events = append(events, event)
|
||||
|
||||
// Keep only last N events if buffer too large (circular buffer)
|
||||
if len(events) > b.maxSize {
|
||||
events = events[len(events)-b.maxSize:]
|
||||
}
|
||||
|
||||
// Write back to file
|
||||
data, err := json.Marshal(events)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal events: %w", err)
|
||||
}
|
||||
|
||||
if err := os.WriteFile(b.filePath, data, 0644); err != nil {
|
||||
return fmt.Errorf("failed to write buffer file: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetBufferedEvents retrieves and clears the buffer
|
||||
func (b *Buffer) GetBufferedEvents() ([]*models.SystemEvent, error) {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
// Read buffer file
|
||||
var events []*models.SystemEvent
|
||||
data, err := os.ReadFile(b.filePath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil, nil // No buffer file means no events
|
||||
}
|
||||
return nil, fmt.Errorf("failed to read buffer file: %w", err)
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(data, &events); err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal events: %w", err)
|
||||
}
|
||||
|
||||
// Clear buffer file after reading
|
||||
if err := os.Remove(b.filePath); err != nil && !os.IsNotExist(err) {
|
||||
// Log warning but don't fail - events were still retrieved
|
||||
fmt.Printf("Warning: Failed to clear buffer file: %v\n", err)
|
||||
}
|
||||
|
||||
return events, nil
|
||||
}
|
||||
|
||||
// SetMaxSize sets the maximum number of events to buffer
|
||||
func (b *Buffer) SetMaxSize(size int) {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
b.maxSize = size
|
||||
}
|
||||
|
||||
// GetStats returns buffer statistics
|
||||
func (b *Buffer) GetStats() (int, error) {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
data, err := os.ReadFile(b.filePath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return 0, nil
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
|
||||
var events []*models.SystemEvent
|
||||
if err := json.Unmarshal(data, &events); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return len(events), nil
|
||||
}
|
||||
Reference in New Issue
Block a user