forked from ryyst/kalzu-value-store
refactor: extract core server package with handlers, routes, and lifecycle
Created server package with: - server.go: Server struct and core methods - handlers.go: HTTP handlers for health, KV operations, cluster management - routes.go: HTTP route setup - lifecycle.go: Server startup/shutdown logic This moves ~400 lines of server-related code from main.go to dedicated server package for better organization. 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
183
server/server.go
Normal file
183
server/server.go
Normal file
@@ -0,0 +1,183 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
"github.com/dgraph-io/badger/v3"
|
||||
"github.com/robfig/cron/v3"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/kalzu/kvs/auth"
|
||||
"github.com/kalzu/kvs/cluster"
|
||||
"github.com/kalzu/kvs/storage"
|
||||
"github.com/kalzu/kvs/types"
|
||||
)
|
||||
|
||||
// Server represents the KVS node
|
||||
type Server struct {
|
||||
config *types.Config
|
||||
db *badger.DB
|
||||
mode string // "normal", "read-only", "syncing"
|
||||
modeMu sync.RWMutex
|
||||
logger *logrus.Logger
|
||||
httpServer *http.Server
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
wg sync.WaitGroup
|
||||
|
||||
// Cluster services
|
||||
gossipService *cluster.GossipService
|
||||
syncService *cluster.SyncService
|
||||
merkleService *cluster.MerkleService
|
||||
bootstrapService *cluster.BootstrapService
|
||||
|
||||
// Storage services
|
||||
storageService *storage.StorageService
|
||||
revisionService *storage.RevisionService
|
||||
|
||||
// Phase 2: Backup system
|
||||
cronScheduler *cron.Cron // Cron scheduler for backups
|
||||
backupStatus types.BackupStatus // Current backup status
|
||||
backupMu sync.RWMutex // Protects backup status
|
||||
|
||||
// Authentication service
|
||||
authService *auth.AuthService
|
||||
}
|
||||
|
||||
// NewServer initializes and returns a new Server instance
|
||||
func NewServer(config *types.Config) (*Server, error) {
|
||||
logger := logrus.New()
|
||||
logger.SetFormatter(&logrus.JSONFormatter{})
|
||||
|
||||
level, err := logrus.ParseLevel(config.LogLevel)
|
||||
if err != nil {
|
||||
level = logrus.InfoLevel
|
||||
}
|
||||
logger.SetLevel(level)
|
||||
|
||||
// Create data directory
|
||||
if err := os.MkdirAll(config.DataDir, 0755); err != nil {
|
||||
return nil, fmt.Errorf("failed to create data directory: %v", err)
|
||||
}
|
||||
|
||||
// Open BadgerDB
|
||||
opts := badger.DefaultOptions(filepath.Join(config.DataDir, "badger"))
|
||||
opts.Logger = nil // Disable badger's internal logging
|
||||
db, err := badger.Open(opts)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to open BadgerDB: %v", err)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
// Initialize cluster services
|
||||
merkleService := cluster.NewMerkleService(db, logger)
|
||||
gossipService := cluster.NewGossipService(config, logger)
|
||||
syncService := cluster.NewSyncService(db, config, gossipService, merkleService, logger)
|
||||
var server *Server // Forward declaration
|
||||
bootstrapService := cluster.NewBootstrapService(config, gossipService, syncService, logger, func(mode string) {
|
||||
if server != nil {
|
||||
server.setMode(mode)
|
||||
}
|
||||
})
|
||||
|
||||
server = &Server{
|
||||
config: config,
|
||||
db: db,
|
||||
mode: "normal",
|
||||
logger: logger,
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
gossipService: gossipService,
|
||||
syncService: syncService,
|
||||
merkleService: merkleService,
|
||||
bootstrapService: bootstrapService,
|
||||
}
|
||||
|
||||
if config.ReadOnly {
|
||||
server.setMode("read-only")
|
||||
}
|
||||
|
||||
// Initialize storage services
|
||||
storageService, err := storage.NewStorageService(db, config, logger)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize storage service: %v", err)
|
||||
}
|
||||
server.storageService = storageService
|
||||
|
||||
// Initialize revision service
|
||||
server.revisionService = storage.NewRevisionService(storageService)
|
||||
|
||||
// Initialize authentication service
|
||||
server.authService = auth.NewAuthService(db, logger)
|
||||
|
||||
// Initialize Merkle tree using cluster service
|
||||
if err := server.syncService.InitializeMerkleTree(); err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize Merkle tree: %v", err)
|
||||
}
|
||||
|
||||
return server, nil
|
||||
}
|
||||
|
||||
// getMode returns the current server mode
|
||||
func (s *Server) getMode() string {
|
||||
s.modeMu.RLock()
|
||||
defer s.modeMu.RUnlock()
|
||||
return s.mode
|
||||
}
|
||||
|
||||
// setMode sets the server mode
|
||||
func (s *Server) setMode(mode string) {
|
||||
s.modeMu.Lock()
|
||||
defer s.modeMu.Unlock()
|
||||
oldMode := s.mode
|
||||
s.mode = mode
|
||||
s.logger.WithFields(logrus.Fields{
|
||||
"old_mode": oldMode,
|
||||
"new_mode": mode,
|
||||
}).Info("Mode changed")
|
||||
}
|
||||
|
||||
// addMember adds a member using cluster service
|
||||
func (s *Server) addMember(member *types.Member) {
|
||||
s.gossipService.AddMember(member)
|
||||
}
|
||||
|
||||
// removeMember removes a member using cluster service
|
||||
func (s *Server) removeMember(nodeID string) {
|
||||
s.gossipService.RemoveMember(nodeID)
|
||||
}
|
||||
|
||||
// getMembers returns all cluster members
|
||||
func (s *Server) getMembers() []*types.Member {
|
||||
return s.gossipService.GetMembers()
|
||||
}
|
||||
|
||||
// getJoinedTimestamp returns this node's joined timestamp (startup time)
|
||||
func (s *Server) getJoinedTimestamp() int64 {
|
||||
// For now, use a simple approach - this should be stored persistently
|
||||
return time.Now().UnixMilli()
|
||||
}
|
||||
|
||||
// getBackupStatus returns the current backup status
|
||||
func (s *Server) getBackupStatus() types.BackupStatus {
|
||||
s.backupMu.RLock()
|
||||
defer s.backupMu.RUnlock()
|
||||
|
||||
status := s.backupStatus
|
||||
|
||||
// Calculate next backup time if scheduler is running
|
||||
if s.cronScheduler != nil && len(s.cronScheduler.Entries()) > 0 {
|
||||
nextRun := s.cronScheduler.Entries()[0].Next
|
||||
if !nextRun.IsZero() {
|
||||
status.NextBackupTime = nextRun.Unix()
|
||||
}
|
||||
}
|
||||
|
||||
return status
|
||||
}
|
Reference in New Issue
Block a user