feat: implement secure cluster authentication (issue #13)

Implemented a comprehensive secure authentication mechanism for inter-node
cluster communication with the following features:

1. Global Cluster Secret (GCS)
   - Auto-generated cryptographically secure random secret (256-bit)
   - Configurable via YAML config file
   - Shared across all cluster nodes for authentication

2. Cluster Authentication Middleware
   - Validates X-Cluster-Secret and X-Node-ID headers
   - Applied to all cluster endpoints (/members/*, /merkle_tree/*, /kv_range)
   - Comprehensive logging of authentication attempts

3. Authenticated HTTP Client
   - Custom HTTP client with cluster auth headers
   - TLS support with configurable certificate verification
   - Protocol-aware (http/https based on TLS settings)

4. Secure Bootstrap Endpoint
   - New /auth/cluster-bootstrap endpoint
   - Protected by JWT authentication with admin scope
   - Allows new nodes to securely obtain cluster secret

5. Updated Cluster Communication
   - All gossip protocol requests include auth headers
   - All Merkle tree sync requests include auth headers
   - All data replication requests include auth headers

6. Configuration
   - cluster_secret: Shared secret (auto-generated if not provided)
   - cluster_tls_enabled: Enable TLS for inter-node communication
   - cluster_tls_cert_file: Path to TLS certificate
   - cluster_tls_key_file: Path to TLS private key
   - cluster_tls_skip_verify: Skip TLS verification (testing only)

This implementation addresses the security vulnerability of unprotected
cluster endpoints and provides a flexible, secure approach to protecting
internal cluster communication while allowing for automated node bootstrapping.

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
2025-10-02 22:19:40 +03:00
parent 2431d3cfb0
commit c7dcebb894
28 changed files with 477 additions and 230 deletions

View File

@@ -2,7 +2,7 @@ package storage
import (
"fmt"
"github.com/klauspost/compress/zstd"
)
@@ -57,4 +57,4 @@ func (c *CompressionService) DecompressData(compressedData []byte) ([]byte, erro
return nil, fmt.Errorf("decompressor not initialized")
}
return c.decompressor.DecodeAll(compressedData, nil)
}
}

View File

@@ -34,10 +34,10 @@ func GetRevisionKey(baseKey string, revision int) string {
func (r *RevisionService) StoreRevisionHistory(txn *badger.Txn, key string, storedValue types.StoredValue, ttl time.Duration) error {
// Get existing metadata to check current revisions
metadataKey := auth.ResourceMetadataKey(key)
var metadata types.ResourceMetadata
var currentRevisions []int
// Try to get existing metadata
metadataData, err := r.storage.RetrieveWithDecompression(txn, []byte(metadataKey))
if err == badger.ErrKeyNotFound {
@@ -60,7 +60,7 @@ func (r *RevisionService) StoreRevisionHistory(txn *badger.Txn, key string, stor
if err != nil {
return fmt.Errorf("failed to unmarshal metadata: %v", err)
}
// Extract current revisions (we store them as a custom field)
if metadata.TTL == "" {
currentRevisions = []int{}
@@ -69,13 +69,13 @@ func (r *RevisionService) StoreRevisionHistory(txn *badger.Txn, key string, stor
currentRevisions = []int{1, 2, 3} // Assume all revisions exist for existing keys
}
}
// Revision rotation logic: shift existing revisions
if len(currentRevisions) >= 3 {
// Delete oldest revision (rev:3)
oldestRevKey := GetRevisionKey(key, 3)
txn.Delete([]byte(oldestRevKey))
// Shift rev:2 → rev:3
rev2Key := GetRevisionKey(key, 2)
rev2Data, err := r.storage.RetrieveWithDecompression(txn, []byte(rev2Key))
@@ -83,8 +83,8 @@ func (r *RevisionService) StoreRevisionHistory(txn *badger.Txn, key string, stor
rev3Key := GetRevisionKey(key, 3)
r.storage.StoreWithTTL(txn, []byte(rev3Key), rev2Data, ttl)
}
// Shift rev:1 → rev:2
// Shift rev:1 → rev:2
rev1Key := GetRevisionKey(key, 1)
rev1Data, err := r.storage.RetrieveWithDecompression(txn, []byte(rev1Key))
if err == nil {
@@ -92,80 +92,80 @@ func (r *RevisionService) StoreRevisionHistory(txn *badger.Txn, key string, stor
r.storage.StoreWithTTL(txn, []byte(rev2Key), rev1Data, ttl)
}
}
// Store current value as rev:1
currentValueBytes, err := json.Marshal(storedValue)
if err != nil {
return fmt.Errorf("failed to marshal current value for revision: %v", err)
}
rev1Key := GetRevisionKey(key, 1)
err = r.storage.StoreWithTTL(txn, []byte(rev1Key), currentValueBytes, ttl)
if err != nil {
return fmt.Errorf("failed to store revision 1: %v", err)
}
// Update metadata with new revision count
metadata.UpdatedAt = time.Now().Unix()
metadataBytes, err := json.Marshal(metadata)
if err != nil {
return fmt.Errorf("failed to marshal metadata: %v", err)
}
return r.storage.StoreWithTTL(txn, []byte(metadataKey), metadataBytes, ttl)
}
// GetRevisionHistory retrieves all available revisions for a given key
func (r *RevisionService) GetRevisionHistory(key string) ([]map[string]interface{}, error) {
var revisions []map[string]interface{}
err := r.storage.db.View(func(txn *badger.Txn) error {
// Check revisions 1, 2, 3
for rev := 1; rev <= 3; rev++ {
revKey := GetRevisionKey(key, rev)
revData, err := r.storage.RetrieveWithDecompression(txn, []byte(revKey))
if err == badger.ErrKeyNotFound {
continue // Skip missing revisions
} else if err != nil {
return fmt.Errorf("failed to retrieve revision %d: %v", rev, err)
}
var storedValue types.StoredValue
err = json.Unmarshal(revData, &storedValue)
if err != nil {
return fmt.Errorf("failed to unmarshal revision %d: %v", rev, err)
}
var data interface{}
err = json.Unmarshal(storedValue.Data, &data)
if err != nil {
return fmt.Errorf("failed to unmarshal revision %d data: %v", rev, err)
}
revision := map[string]interface{}{
"revision": rev,
"uuid": storedValue.UUID,
"timestamp": storedValue.Timestamp,
"data": data,
}
revisions = append(revisions, revision)
}
return nil
})
if err != nil {
return nil, err
}
// Sort revisions by revision number (newest first)
// Note: they're already in order since we iterate 1->3, but reverse for newest first
for i, j := 0, len(revisions)-1; i < j; i, j = i+1, j-1 {
revisions[i], revisions[j] = revisions[j], revisions[i]
}
return revisions, nil
}
@@ -174,23 +174,23 @@ func (r *RevisionService) GetSpecificRevision(key string, revision int) (*types.
if revision < 1 || revision > 3 {
return nil, fmt.Errorf("invalid revision number: %d (must be 1-3)", revision)
}
var storedValue types.StoredValue
err := r.storage.db.View(func(txn *badger.Txn) error {
revKey := GetRevisionKey(key, revision)
revData, err := r.storage.RetrieveWithDecompression(txn, []byte(revKey))
if err != nil {
return err
}
return json.Unmarshal(revData, &storedValue)
})
if err != nil {
return nil, err
}
return &storedValue, nil
}
@@ -200,15 +200,15 @@ func GetRevisionFromPath(path string) (string, int, error) {
if len(parts) < 4 || parts[len(parts)-2] != "rev" {
return "", 0, fmt.Errorf("invalid revision path format")
}
revisionStr := parts[len(parts)-1]
revision, err := strconv.Atoi(revisionStr)
if err != nil {
return "", 0, fmt.Errorf("invalid revision number: %s", revisionStr)
}
// Reconstruct the base key without the "/rev/N" suffix
baseKey := strings.Join(parts[:len(parts)-2], "/")
return baseKey, revision, nil
}
}

View File

@@ -12,17 +12,17 @@ import (
// StorageService handles all BadgerDB operations and data management
type StorageService struct {
db *badger.DB
config *types.Config
compressionSvc *CompressionService
logger *logrus.Logger
db *badger.DB
config *types.Config
compressionSvc *CompressionService
logger *logrus.Logger
}
// NewStorageService creates a new storage service
func NewStorageService(db *badger.DB, config *types.Config, logger *logrus.Logger) (*StorageService, error) {
var compressionSvc *CompressionService
var err error
// Initialize compression if enabled
if config.CompressionEnabled {
compressionSvc, err = NewCompressionService()
@@ -50,7 +50,7 @@ func (s *StorageService) Close() {
func (s *StorageService) StoreWithTTL(txn *badger.Txn, key []byte, data []byte, ttl time.Duration) error {
var finalData []byte
var err error
// Compress data if compression is enabled
if s.config.CompressionEnabled && s.compressionSvc != nil {
finalData, err = s.compressionSvc.CompressData(data)
@@ -60,14 +60,14 @@ func (s *StorageService) StoreWithTTL(txn *badger.Txn, key []byte, data []byte,
} else {
finalData = data
}
entry := badger.NewEntry(key, finalData)
// Apply TTL if specified
if ttl > 0 {
entry = entry.WithTTL(ttl)
}
return txn.SetEntry(entry)
}
@@ -77,7 +77,7 @@ func (s *StorageService) RetrieveWithDecompression(txn *badger.Txn, key []byte)
if err != nil {
return nil, err
}
var compressedData []byte
err = item.Value(func(val []byte) error {
compressedData = append(compressedData, val...)
@@ -86,12 +86,12 @@ func (s *StorageService) RetrieveWithDecompression(txn *badger.Txn, key []byte)
if err != nil {
return nil, err
}
// Decompress data if compression is enabled
if s.config.CompressionEnabled && s.compressionSvc != nil {
return s.compressionSvc.DecompressData(compressedData)
}
return compressedData, nil
}
@@ -109,4 +109,4 @@ func (s *StorageService) DecompressData(compressedData []byte) ([]byte, error) {
return compressedData, nil
}
return s.compressionSvc.DecompressData(compressedData)
}
}