forked from ryyst/kalzu-value-store
refactor: remove duplicate Server methods and clean up main.go
- Removed all duplicate Server methods from main.go (630 lines) - Fixed import conflicts and unused imports - main.go reduced from 3,298 to 340 lines (89% reduction) - Clean modular structure with server package handling all server functionality - Achieved clean build with no compilation errors 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
@@ -8,7 +8,6 @@ import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
|
@@ -1,22 +1,37 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/dgraph-io/badger/v3"
|
||||
"github.com/dgraph-io/badger/v4"
|
||||
"github.com/golang-jwt/jwt/v4"
|
||||
"github.com/google/uuid"
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/kalzu/kvs/types"
|
||||
"kvs/auth"
|
||||
"kvs/types"
|
||||
"kvs/utils"
|
||||
)
|
||||
|
||||
// JWTClaims represents the custom claims for JWT tokens
|
||||
type JWTClaims struct {
|
||||
UserUUID string `json:"user_uuid"`
|
||||
Scopes []string `json:"scopes"`
|
||||
jwt.RegisteredClaims
|
||||
}
|
||||
|
||||
var jwtSigningKey = []byte("your-super-secret-key") // TODO: Move to config
|
||||
|
||||
// healthHandler returns server health status
|
||||
func (s *Server) healthHandler(w http.ResponseWriter, r *http.Request) {
|
||||
mode := s.getMode()
|
||||
@@ -400,3 +415,894 @@ func (s *Server) getBackupStatusHandler(w http.ResponseWriter, r *http.Request)
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(status)
|
||||
}
|
||||
|
||||
// getMerkleRootHandler returns the current Merkle tree root
|
||||
func (s *Server) getMerkleRootHandler(w http.ResponseWriter, r *http.Request) {
|
||||
root := s.syncService.GetMerkleRoot()
|
||||
if root == nil {
|
||||
http.Error(w, "Merkle tree not initialized", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
resp := types.MerkleRootResponse{
|
||||
Root: root,
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(resp)
|
||||
}
|
||||
|
||||
func (s *Server) getMerkleDiffHandler(w http.ResponseWriter, r *http.Request) {
|
||||
var req types.MerkleTreeDiffRequest
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
http.Error(w, "Bad Request", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
localPairs, err := s.getAllKVPairsForMerkleTree()
|
||||
if err != nil {
|
||||
s.logger.WithError(err).Error("Failed to get KV pairs for Merkle diff")
|
||||
http.Error(w, "Internal Server Error", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
// Build the local types.MerkleNode for the requested range to compare with the remote's hash
|
||||
localSubTreeRoot, err := s.buildMerkleTreeFromPairs(s.filterPairsByRange(localPairs, req.ParentNode.StartKey, req.ParentNode.EndKey))
|
||||
if err != nil {
|
||||
s.logger.WithError(err).Error("Failed to build sub-Merkle tree for diff request")
|
||||
http.Error(w, "Internal Server Error", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if localSubTreeRoot == nil { // This can happen if the range is empty locally
|
||||
localSubTreeRoot = &types.MerkleNode{Hash: calculateHash([]byte("empty_tree")), StartKey: req.ParentNode.StartKey, EndKey: req.ParentNode.EndKey}
|
||||
}
|
||||
|
||||
resp := types.MerkleTreeDiffResponse{}
|
||||
|
||||
// If hashes match, no need to send children or keys
|
||||
if bytes.Equal(req.LocalHash, localSubTreeRoot.Hash) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(resp)
|
||||
return
|
||||
}
|
||||
|
||||
// Hashes differ, so we need to provide more detail.
|
||||
// Get all keys within the parent node's range locally
|
||||
var keysInRange []string
|
||||
for key := range s.filterPairsByRange(localPairs, req.ParentNode.StartKey, req.ParentNode.EndKey) {
|
||||
keysInRange = append(keysInRange, key)
|
||||
}
|
||||
sort.Strings(keysInRange)
|
||||
|
||||
const diffLeafThreshold = 10 // If a range has <= 10 keys, we consider it a leaf-level diff
|
||||
|
||||
if len(keysInRange) <= diffLeafThreshold {
|
||||
// This is a leaf-level diff, return the actual keys in the range
|
||||
resp.Keys = keysInRange
|
||||
} else {
|
||||
// types.Group keys into sub-ranges and return their types.MerkleNode representations
|
||||
// For simplicity, let's split the range into two halves.
|
||||
mid := len(keysInRange) / 2
|
||||
|
||||
leftKeys := keysInRange[:mid]
|
||||
rightKeys := keysInRange[mid:]
|
||||
|
||||
if len(leftKeys) > 0 {
|
||||
leftRangePairs := s.filterPairsByRange(localPairs, leftKeys[0], leftKeys[len(leftKeys)-1])
|
||||
leftNode, err := s.buildMerkleTreeFromPairs(leftRangePairs)
|
||||
if err != nil {
|
||||
s.logger.WithError(err).Error("Failed to build left child node for diff")
|
||||
http.Error(w, "Internal Server Error", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if leftNode != nil {
|
||||
resp.Children = append(resp.Children, *leftNode)
|
||||
}
|
||||
}
|
||||
|
||||
if len(rightKeys) > 0 {
|
||||
rightRangePairs := s.filterPairsByRange(localPairs, rightKeys[0], rightKeys[len(rightKeys)-1])
|
||||
rightNode, err := s.buildMerkleTreeFromPairs(rightRangePairs)
|
||||
if err != nil {
|
||||
s.logger.WithError(err).Error("Failed to build right child node for diff")
|
||||
http.Error(w, "Internal Server Error", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if rightNode != nil {
|
||||
resp.Children = append(resp.Children, *rightNode)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(resp)
|
||||
}
|
||||
func (s *Server) getKVRangeHandler(w http.ResponseWriter, r *http.Request) {
|
||||
var req types.KVRangeRequest
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
http.Error(w, "Bad Request", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
var pairs []struct {
|
||||
Path string `json:"path"`
|
||||
StoredValue types.StoredValue `json:"stored_value"`
|
||||
}
|
||||
|
||||
err := s.db.View(func(txn *badger.Txn) error {
|
||||
opts := badger.DefaultIteratorOptions
|
||||
opts.PrefetchValues = true
|
||||
it := txn.NewIterator(opts)
|
||||
defer it.Close()
|
||||
|
||||
count := 0
|
||||
// Start iteration from the requested StartKey
|
||||
for it.Seek([]byte(req.StartKey)); it.Valid(); it.Next() {
|
||||
item := it.Item()
|
||||
key := string(item.Key())
|
||||
|
||||
if strings.HasPrefix(key, "_ts:") {
|
||||
continue // Skip index keys
|
||||
}
|
||||
|
||||
// Stop if we exceed the EndKey (if provided)
|
||||
if req.EndKey != "" && key > req.EndKey {
|
||||
break
|
||||
}
|
||||
|
||||
// Stop if we hit the limit (if provided)
|
||||
if req.Limit > 0 && count >= req.Limit {
|
||||
break
|
||||
}
|
||||
|
||||
var storedValue types.StoredValue
|
||||
err := item.Value(func(val []byte) error {
|
||||
return json.Unmarshal(val, &storedValue)
|
||||
})
|
||||
if err != nil {
|
||||
s.logger.WithError(err).WithField("key", key).Warn("Failed to unmarshal stored value in KV range, skipping")
|
||||
continue
|
||||
}
|
||||
|
||||
pairs = append(pairs, struct {
|
||||
Path string `json:"path"`
|
||||
StoredValue types.StoredValue `json:"stored_value"`
|
||||
}{Path: key, StoredValue: storedValue})
|
||||
count++
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
s.logger.WithError(err).Error("Failed to query KV range")
|
||||
http.Error(w, "Internal Server Error", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(types.KVRangeResponse{Pairs: pairs})
|
||||
}
|
||||
func (s *Server) createUserHandler(w http.ResponseWriter, r *http.Request) {
|
||||
var req types.CreateUserRequest
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
http.Error(w, "Bad Request", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
if req.Nickname == "" {
|
||||
http.Error(w, "Nickname is required", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
// Generate UUID for the user
|
||||
userUUID := uuid.New().String()
|
||||
now := time.Now().Unix()
|
||||
|
||||
user := types.User{
|
||||
UUID: userUUID,
|
||||
NicknameHash: utils.HashUserNickname(req.Nickname),
|
||||
Groups: []string{},
|
||||
CreatedAt: now,
|
||||
UpdatedAt: now,
|
||||
}
|
||||
|
||||
// Store user in BadgerDB
|
||||
userData, err := json.Marshal(user)
|
||||
if err != nil {
|
||||
s.logger.WithError(err).Error("Failed to marshal user data")
|
||||
http.Error(w, "Internal Server Error", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
err = s.db.Update(func(txn *badger.Txn) error {
|
||||
return txn.Set([]byte(auth.UserStorageKey(userUUID)), userData)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
s.logger.WithError(err).Error("Failed to store user")
|
||||
http.Error(w, "Internal Server Error", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
s.logger.WithField("user_uuid", userUUID).Info("types.User created successfully")
|
||||
|
||||
response := types.CreateUserResponse{UUID: userUUID}
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(response)
|
||||
}
|
||||
func (s *Server) getUserHandler(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
userUUID := vars["uuid"]
|
||||
|
||||
if userUUID == "" {
|
||||
http.Error(w, "types.User UUID is required", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
var user types.User
|
||||
err := s.db.View(func(txn *badger.Txn) error {
|
||||
item, err := txn.Get([]byte(auth.UserStorageKey(userUUID)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return item.Value(func(val []byte) error {
|
||||
return json.Unmarshal(val, &user)
|
||||
})
|
||||
})
|
||||
|
||||
if err == badger.ErrKeyNotFound {
|
||||
http.Error(w, "types.User not found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
s.logger.WithError(err).Error("Failed to get user")
|
||||
http.Error(w, "Internal Server Error", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
response := types.GetUserResponse{
|
||||
UUID: user.UUID,
|
||||
NicknameHash: user.NicknameHash,
|
||||
Groups: user.Groups,
|
||||
CreatedAt: user.CreatedAt,
|
||||
UpdatedAt: user.UpdatedAt,
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(response)
|
||||
}
|
||||
func (s *Server) updateUserHandler(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
userUUID := vars["uuid"]
|
||||
|
||||
if userUUID == "" {
|
||||
http.Error(w, "types.User UUID is required", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
var req types.UpdateUserRequest
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
http.Error(w, "Bad Request", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
err := s.db.Update(func(txn *badger.Txn) error {
|
||||
// Get existing user
|
||||
item, err := txn.Get([]byte(auth.UserStorageKey(userUUID)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var user types.User
|
||||
err = item.Value(func(val []byte) error {
|
||||
return json.Unmarshal(val, &user)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Update fields if provided
|
||||
now := time.Now().Unix()
|
||||
user.UpdatedAt = now
|
||||
|
||||
if req.Nickname != "" {
|
||||
user.NicknameHash = utils.HashUserNickname(req.Nickname)
|
||||
}
|
||||
|
||||
if req.Groups != nil {
|
||||
user.Groups = req.Groups
|
||||
}
|
||||
|
||||
// Store updated user
|
||||
userData, err := json.Marshal(user)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return txn.Set([]byte(auth.UserStorageKey(userUUID)), userData)
|
||||
})
|
||||
|
||||
if err == badger.ErrKeyNotFound {
|
||||
http.Error(w, "types.User not found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
s.logger.WithError(err).Error("Failed to update user")
|
||||
http.Error(w, "Internal Server Error", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
s.logger.WithField("user_uuid", userUUID).Info("types.User updated successfully")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
func (s *Server) deleteUserHandler(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
userUUID := vars["uuid"]
|
||||
|
||||
if userUUID == "" {
|
||||
http.Error(w, "types.User UUID is required", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
err := s.db.Update(func(txn *badger.Txn) error {
|
||||
// Check if user exists first
|
||||
_, err := txn.Get([]byte(auth.UserStorageKey(userUUID)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Delete the user
|
||||
return txn.Delete([]byte(auth.UserStorageKey(userUUID)))
|
||||
})
|
||||
|
||||
if err == badger.ErrKeyNotFound {
|
||||
http.Error(w, "types.User not found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
s.logger.WithError(err).Error("Failed to delete user")
|
||||
http.Error(w, "Internal Server Error", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
s.logger.WithField("user_uuid", userUUID).Info("types.User deleted successfully")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
func (s *Server) createGroupHandler(w http.ResponseWriter, r *http.Request) {
|
||||
var req types.CreateGroupRequest
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
http.Error(w, "Bad Request", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
if req.Groupname == "" {
|
||||
http.Error(w, "Groupname is required", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
// Generate UUID for the group
|
||||
groupUUID := uuid.New().String()
|
||||
now := time.Now().Unix()
|
||||
|
||||
group := types.Group{
|
||||
UUID: groupUUID,
|
||||
NameHash: utils.HashGroupName(req.Groupname),
|
||||
Members: req.Members,
|
||||
CreatedAt: now,
|
||||
UpdatedAt: now,
|
||||
}
|
||||
|
||||
if group.Members == nil {
|
||||
group.Members = []string{}
|
||||
}
|
||||
|
||||
// Store group in BadgerDB
|
||||
groupData, err := json.Marshal(group)
|
||||
if err != nil {
|
||||
s.logger.WithError(err).Error("Failed to marshal group data")
|
||||
http.Error(w, "Internal Server Error", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
err = s.db.Update(func(txn *badger.Txn) error {
|
||||
return txn.Set([]byte(auth.GroupStorageKey(groupUUID)), groupData)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
s.logger.WithError(err).Error("Failed to store group")
|
||||
http.Error(w, "Internal Server Error", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
s.logger.WithField("group_uuid", groupUUID).Info("types.Group created successfully")
|
||||
|
||||
response := types.CreateGroupResponse{UUID: groupUUID}
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(response)
|
||||
}
|
||||
func (s *Server) getGroupHandler(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
groupUUID := vars["uuid"]
|
||||
|
||||
if groupUUID == "" {
|
||||
http.Error(w, "types.Group UUID is required", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
var group types.Group
|
||||
err := s.db.View(func(txn *badger.Txn) error {
|
||||
item, err := txn.Get([]byte(auth.GroupStorageKey(groupUUID)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return item.Value(func(val []byte) error {
|
||||
return json.Unmarshal(val, &group)
|
||||
})
|
||||
})
|
||||
|
||||
if err == badger.ErrKeyNotFound {
|
||||
http.Error(w, "types.Group not found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
s.logger.WithError(err).Error("Failed to get group")
|
||||
http.Error(w, "Internal Server Error", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
response := types.GetGroupResponse{
|
||||
UUID: group.UUID,
|
||||
NameHash: group.NameHash,
|
||||
Members: group.Members,
|
||||
CreatedAt: group.CreatedAt,
|
||||
UpdatedAt: group.UpdatedAt,
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(response)
|
||||
}
|
||||
func (s *Server) updateGroupHandler(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
groupUUID := vars["uuid"]
|
||||
|
||||
if groupUUID == "" {
|
||||
http.Error(w, "types.Group UUID is required", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
var req types.UpdateGroupRequest
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
http.Error(w, "Bad Request", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
err := s.db.Update(func(txn *badger.Txn) error {
|
||||
// Get existing group
|
||||
item, err := txn.Get([]byte(auth.GroupStorageKey(groupUUID)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var group types.Group
|
||||
err = item.Value(func(val []byte) error {
|
||||
return json.Unmarshal(val, &group)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Update fields
|
||||
now := time.Now().Unix()
|
||||
group.UpdatedAt = now
|
||||
group.Members = req.Members
|
||||
|
||||
if group.Members == nil {
|
||||
group.Members = []string{}
|
||||
}
|
||||
|
||||
// Store updated group
|
||||
groupData, err := json.Marshal(group)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return txn.Set([]byte(auth.GroupStorageKey(groupUUID)), groupData)
|
||||
})
|
||||
|
||||
if err == badger.ErrKeyNotFound {
|
||||
http.Error(w, "types.Group not found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
s.logger.WithError(err).Error("Failed to update group")
|
||||
http.Error(w, "Internal Server Error", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
s.logger.WithField("group_uuid", groupUUID).Info("types.Group updated successfully")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
func (s *Server) deleteGroupHandler(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
groupUUID := vars["uuid"]
|
||||
|
||||
if groupUUID == "" {
|
||||
http.Error(w, "types.Group UUID is required", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
err := s.db.Update(func(txn *badger.Txn) error {
|
||||
// Check if group exists first
|
||||
_, err := txn.Get([]byte(auth.GroupStorageKey(groupUUID)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Delete the group
|
||||
return txn.Delete([]byte(auth.GroupStorageKey(groupUUID)))
|
||||
})
|
||||
|
||||
if err == badger.ErrKeyNotFound {
|
||||
http.Error(w, "types.Group not found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
s.logger.WithError(err).Error("Failed to delete group")
|
||||
http.Error(w, "Internal Server Error", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
s.logger.WithField("group_uuid", groupUUID).Info("types.Group deleted successfully")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
func (s *Server) createTokenHandler(w http.ResponseWriter, r *http.Request) {
|
||||
var req types.CreateTokenRequest
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
http.Error(w, "Bad Request", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
if req.UserUUID == "" {
|
||||
http.Error(w, "types.User UUID is required", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
if len(req.Scopes) == 0 {
|
||||
http.Error(w, "At least one scope is required", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
// Verify user exists
|
||||
err := s.db.View(func(txn *badger.Txn) error {
|
||||
_, err := txn.Get([]byte(auth.UserStorageKey(req.UserUUID)))
|
||||
return err
|
||||
})
|
||||
|
||||
if err == badger.ErrKeyNotFound {
|
||||
http.Error(w, "types.User not found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
s.logger.WithError(err).Error("Failed to verify user")
|
||||
http.Error(w, "Internal Server Error", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
// Generate JWT token
|
||||
tokenString, expiresAt, err := generateJWT(req.UserUUID, req.Scopes, 1) // 1 hour default
|
||||
if err != nil {
|
||||
s.logger.WithError(err).Error("Failed to generate JWT token")
|
||||
http.Error(w, "Internal Server Error", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
// Store token in BadgerDB
|
||||
err = s.storeAPIToken(tokenString, req.UserUUID, req.Scopes, expiresAt)
|
||||
if err != nil {
|
||||
s.logger.WithError(err).Error("Failed to store API token")
|
||||
http.Error(w, "Internal Server Error", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
s.logger.WithFields(logrus.Fields{
|
||||
"user_uuid": req.UserUUID,
|
||||
"scopes": req.Scopes,
|
||||
"expires_at": expiresAt,
|
||||
}).Info("API token created successfully")
|
||||
|
||||
response := types.CreateTokenResponse{
|
||||
Token: tokenString,
|
||||
ExpiresAt: expiresAt,
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(response)
|
||||
}
|
||||
func (s *Server) getRevisionHistoryHandler(w http.ResponseWriter, r *http.Request) {
|
||||
// Check if revision history is enabled
|
||||
if !s.config.RevisionHistoryEnabled {
|
||||
http.Error(w, "Revision history is disabled", http.StatusServiceUnavailable)
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
key := vars["key"]
|
||||
|
||||
if key == "" {
|
||||
http.Error(w, "Key is required", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
revisions, err := s.getRevisionHistory(key)
|
||||
if err != nil {
|
||||
s.logger.WithError(err).WithField("key", key).Error("Failed to get revision history")
|
||||
http.Error(w, "Internal Server Error", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
if len(revisions) == 0 {
|
||||
http.Error(w, "No revisions found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
response := map[string]interface{}{
|
||||
"revisions": revisions,
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(response)
|
||||
}
|
||||
func (s *Server) getSpecificRevisionHandler(w http.ResponseWriter, r *http.Request) {
|
||||
// Check if revision history is enabled
|
||||
if !s.config.RevisionHistoryEnabled {
|
||||
http.Error(w, "Revision history is disabled", http.StatusServiceUnavailable)
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
key := vars["key"]
|
||||
revisionStr := vars["revision"]
|
||||
|
||||
if key == "" {
|
||||
http.Error(w, "Key is required", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
if revisionStr == "" {
|
||||
http.Error(w, "Revision is required", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
revision, err := strconv.Atoi(revisionStr)
|
||||
if err != nil {
|
||||
http.Error(w, "Invalid revision number", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
storedValue, err := s.getSpecificRevision(key, revision)
|
||||
if err == badger.ErrKeyNotFound {
|
||||
http.Error(w, "Revision not found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
s.logger.WithError(err).WithFields(logrus.Fields{
|
||||
"key": key,
|
||||
"revision": revision,
|
||||
}).Error("Failed to get specific revision")
|
||||
http.Error(w, "Internal Server Error", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(storedValue)
|
||||
}
|
||||
|
||||
// calculateHash computes SHA256 hash of data
|
||||
func calculateHash(data []byte) []byte {
|
||||
h := sha256.New()
|
||||
h.Write(data)
|
||||
return h.Sum(nil)
|
||||
}
|
||||
|
||||
// getAllKVPairsForMerkleTree retrieves all key-value pairs for Merkle tree operations
|
||||
func (s *Server) getAllKVPairsForMerkleTree() (map[string]*types.StoredValue, error) {
|
||||
pairs := make(map[string]*types.StoredValue)
|
||||
err := s.db.View(func(txn *badger.Txn) error {
|
||||
opts := badger.DefaultIteratorOptions
|
||||
opts.PrefetchValues = true // We need the values for hashing
|
||||
it := txn.NewIterator(opts)
|
||||
defer it.Close()
|
||||
|
||||
// Iterate over all actual data keys (not _ts: indexes)
|
||||
for it.Rewind(); it.Valid(); it.Next() {
|
||||
item := it.Item()
|
||||
key := string(item.Key())
|
||||
|
||||
if strings.HasPrefix(key, "_ts:") {
|
||||
continue // Skip index keys
|
||||
}
|
||||
|
||||
var storedValue types.StoredValue
|
||||
err := item.Value(func(val []byte) error {
|
||||
return json.Unmarshal(val, &storedValue)
|
||||
})
|
||||
if err != nil {
|
||||
s.logger.WithError(err).WithField("key", key).Warn("Failed to unmarshal stored value for Merkle tree, skipping")
|
||||
continue
|
||||
}
|
||||
pairs[key] = &storedValue
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return pairs, nil
|
||||
}
|
||||
|
||||
// buildMerkleTreeFromPairs constructs a Merkle tree from key-value pairs
|
||||
func (s *Server) buildMerkleTreeFromPairs(pairs map[string]*types.StoredValue) (*types.MerkleNode, error) {
|
||||
if len(pairs) == 0 {
|
||||
return &types.MerkleNode{Hash: calculateHash([]byte("empty_tree")), StartKey: "", EndKey: ""}, nil
|
||||
}
|
||||
|
||||
// Sort keys to ensure consistent tree structure
|
||||
keys := make([]string, 0, len(pairs))
|
||||
for k := range pairs {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
|
||||
// Create leaf nodes
|
||||
leafNodes := make([]*types.MerkleNode, len(keys))
|
||||
for i, key := range keys {
|
||||
storedValue := pairs[key]
|
||||
hash := s.calculateLeafHash(key, storedValue)
|
||||
leafNodes[i] = &types.MerkleNode{Hash: hash, StartKey: key, EndKey: key}
|
||||
}
|
||||
|
||||
// Recursively build parent nodes
|
||||
return s.buildMerkleTreeRecursive(leafNodes)
|
||||
}
|
||||
|
||||
// filterPairsByRange filters key-value pairs by key range
|
||||
func (s *Server) filterPairsByRange(allPairs map[string]*types.StoredValue, startKey, endKey string) map[string]*types.StoredValue {
|
||||
filtered := make(map[string]*types.StoredValue)
|
||||
for key, value := range allPairs {
|
||||
if (startKey == "" || key >= startKey) && (endKey == "" || key <= endKey) {
|
||||
filtered[key] = value
|
||||
}
|
||||
}
|
||||
return filtered
|
||||
}
|
||||
|
||||
// calculateLeafHash generates a hash for a leaf node
|
||||
func (s *Server) calculateLeafHash(path string, storedValue *types.StoredValue) []byte {
|
||||
// Concatenate path, UUID, timestamp, and the raw data bytes for hashing
|
||||
// Ensure a consistent order of fields for hashing
|
||||
dataToHash := bytes.Buffer{}
|
||||
dataToHash.WriteString(path)
|
||||
dataToHash.WriteByte(':')
|
||||
dataToHash.WriteString(storedValue.UUID)
|
||||
dataToHash.WriteByte(':')
|
||||
dataToHash.WriteString(strconv.FormatInt(storedValue.Timestamp, 10))
|
||||
dataToHash.WriteByte(':')
|
||||
dataToHash.Write(storedValue.Data) // Use raw bytes of json.RawMessage
|
||||
|
||||
return calculateHash(dataToHash.Bytes())
|
||||
}
|
||||
|
||||
// buildMerkleTreeRecursive builds Merkle tree recursively from nodes
|
||||
func (s *Server) buildMerkleTreeRecursive(nodes []*types.MerkleNode) (*types.MerkleNode, error) {
|
||||
if len(nodes) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
if len(nodes) == 1 {
|
||||
return nodes[0], nil
|
||||
}
|
||||
|
||||
var nextLevel []*types.MerkleNode
|
||||
for i := 0; i < len(nodes); i += 2 {
|
||||
left := nodes[i]
|
||||
var right *types.MerkleNode
|
||||
if i+1 < len(nodes) {
|
||||
right = nodes[i+1]
|
||||
}
|
||||
|
||||
var combinedHash []byte
|
||||
var endKey string
|
||||
|
||||
if right != nil {
|
||||
combinedHash = calculateHash(append(left.Hash, right.Hash...))
|
||||
endKey = right.EndKey
|
||||
} else {
|
||||
// Odd number of nodes, promote the left node
|
||||
combinedHash = left.Hash
|
||||
endKey = left.EndKey
|
||||
}
|
||||
|
||||
parentNode := &types.MerkleNode{
|
||||
Hash: combinedHash,
|
||||
StartKey: left.StartKey,
|
||||
EndKey: endKey,
|
||||
}
|
||||
nextLevel = append(nextLevel, parentNode)
|
||||
}
|
||||
return s.buildMerkleTreeRecursive(nextLevel)
|
||||
}
|
||||
|
||||
// generateJWT creates a new JWT token for a user with specified scopes
|
||||
func generateJWT(userUUID string, scopes []string, expirationHours int) (string, int64, error) {
|
||||
if expirationHours <= 0 {
|
||||
expirationHours = 1 // Default to 1 hour
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
expiresAt := now.Add(time.Duration(expirationHours) * time.Hour)
|
||||
|
||||
claims := JWTClaims{
|
||||
UserUUID: userUUID,
|
||||
Scopes: scopes,
|
||||
RegisteredClaims: jwt.RegisteredClaims{
|
||||
IssuedAt: jwt.NewNumericDate(now),
|
||||
ExpiresAt: jwt.NewNumericDate(expiresAt),
|
||||
Issuer: "kvs-server",
|
||||
},
|
||||
}
|
||||
|
||||
token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)
|
||||
tokenString, err := token.SignedString(jwtSigningKey)
|
||||
if err != nil {
|
||||
return "", 0, err
|
||||
}
|
||||
|
||||
return tokenString, expiresAt.Unix(), nil
|
||||
}
|
||||
func (s *Server) storeAPIToken(tokenString string, userUUID string, scopes []string, expiresAt int64) error {
|
||||
tokenHash := utils.HashToken(tokenString)
|
||||
|
||||
apiToken := types.APIToken{
|
||||
TokenHash: tokenHash,
|
||||
UserUUID: userUUID,
|
||||
Scopes: scopes,
|
||||
IssuedAt: time.Now().Unix(),
|
||||
ExpiresAt: expiresAt,
|
||||
}
|
||||
|
||||
tokenData, err := json.Marshal(apiToken)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return s.db.Update(func(txn *badger.Txn) error {
|
||||
entry := badger.NewEntry([]byte(auth.TokenStorageKey(tokenHash)), tokenData)
|
||||
|
||||
// Set TTL to the token expiration time
|
||||
ttl := time.Until(time.Unix(expiresAt, 0))
|
||||
if ttl > 0 {
|
||||
entry = entry.WithTTL(ttl)
|
||||
}
|
||||
|
||||
return txn.SetEntry(entry)
|
||||
})
|
||||
}
|
||||
|
||||
// getRevisionHistory retrieves revision history for a key
|
||||
func (s *Server) getRevisionHistory(key string) ([]map[string]interface{}, error) {
|
||||
return s.revisionService.GetRevisionHistory(key)
|
||||
}
|
||||
|
||||
// getSpecificRevision retrieves a specific revision of a key
|
||||
func (s *Server) getSpecificRevision(key string, revision int) (*types.StoredValue, error) {
|
||||
return s.revisionService.GetSpecificRevision(key, revision)
|
||||
}
|
||||
|
@@ -74,21 +74,6 @@ func (s *Server) startBackgroundTasks() {
|
||||
|
||||
// bootstrap joins cluster using seed nodes via bootstrap service
|
||||
func (s *Server) bootstrap() {
|
||||
if len(s.config.SeedNodes) == 0 {
|
||||
s.logger.Info("No seed nodes configured, running as standalone")
|
||||
return
|
||||
}
|
||||
|
||||
s.logger.Info("Starting bootstrap process")
|
||||
s.setMode("syncing")
|
||||
|
||||
// Use bootstrap service to join cluster
|
||||
if err := s.bootstrapService.JoinCluster(); err != nil {
|
||||
s.logger.WithError(err).Error("Failed to join cluster")
|
||||
s.setMode("normal")
|
||||
return
|
||||
}
|
||||
|
||||
s.setMode("normal")
|
||||
s.logger.Info("Successfully joined cluster")
|
||||
s.bootstrapService.Bootstrap()
|
||||
}
|
@@ -7,15 +7,16 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/dgraph-io/badger/v3"
|
||||
"github.com/dgraph-io/badger/v4"
|
||||
"github.com/robfig/cron/v3"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/kalzu/kvs/auth"
|
||||
"github.com/kalzu/kvs/cluster"
|
||||
"github.com/kalzu/kvs/storage"
|
||||
"github.com/kalzu/kvs/types"
|
||||
"kvs/auth"
|
||||
"kvs/cluster"
|
||||
"kvs/storage"
|
||||
"kvs/types"
|
||||
)
|
||||
|
||||
// Server represents the KVS node
|
||||
|
Reference in New Issue
Block a user