Moving basic grid stuff to jinja2 from JS. Gemini fixed the logs display it seems.

This commit is contained in:
Kalzu Rekku
2025-06-13 23:10:39 +03:00
parent a1f4fc556b
commit 44c13c16df
7 changed files with 1300 additions and 187 deletions

View File

@ -8,7 +8,7 @@ from fastapi.responses import HTMLResponse, JSONResponse
from fastapi.templating import Jinja2Templates
from fastapi.staticfiles import StaticFiles
from pydantic import BaseModel, Field, validator
from typing import Dict, List, Annotated
from typing import Dict, List, Annotated, Optional
import uuid as uuid_lib
from collections import deque
@ -32,14 +32,17 @@ class BufferHandler(logging.Handler):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Use the same formatter string as the StreamHandler for consistency
# Ensure asctime is formatted as ISO 8601 UTC with milliseconds and 'Z'
self.formatter = jsonlogger.JsonFormatter(
"%(asctime)s %(name)s %(levelname)s %(message)s"
"%(asctime)s %(name)s %(levelname)s %(message)s",
datefmt="%Y-%m-%dT%H:%M:%S.%fZ" # ISO 8601 format with milliseconds and Z for UTC
)
def emit(self, record):
try:
log_entry_str = self.formatter.format(record)
log_entry = json.loads(log_entry_str)
# The 'asctime' field in log_entry is now guaranteed to be ISO 8601
log_buffer.add_log(log_entry)
except Exception as e:
print(
@ -53,14 +56,30 @@ class LogBuffer:
self.buffer = deque(maxlen=maxlen)
def add_log(self, record):
# Assuming 'record' here is already a dictionary parsed from the JSON log string
timestamp = record.get("asctime") or datetime.utcnow().isoformat()
# 'record' is a dictionary parsed from the JSON log string.
# 'asctime' should now be in ISO 8601 format due to BufferHandler's formatter.
timestamp_str = record.get("asctime")
if timestamp_str:
try:
# Use isoparse for robust parsing, then convert to UTC and store as ISO 8601 with 'Z'
dt_obj = isoparse(timestamp_str)
if dt_obj.tzinfo is None:
# Assume UTC if naive (common for logs without explicit timezone info)
dt_obj = dt_obj.replace(tzinfo=timezone.utc)
else:
# Convert to UTC for consistent storage
dt_obj = dt_obj.astimezone(timezone.utc)
timestamp_to_store = dt_obj.isoformat(timespec='milliseconds').replace('+00:00', 'Z')
except ValueError:
logger.warning(f"Failed to parse log timestamp '{timestamp_str}' from formatter. Using current UTC time.")
timestamp_to_store = datetime.utcnow().isoformat(timespec='milliseconds') + 'Z'
else:
timestamp_to_store = datetime.utcnow().isoformat(timespec='milliseconds') + 'Z'
self.buffer.append(
{
"timestamp": timestamp,
"level": record.get(
"levelname"
), # This should now correctly get 'levelname'
"timestamp": timestamp_to_store,
"level": record.get("levelname"),
"message": record.get("message"),
"extra": {
k: v
@ -111,9 +130,8 @@ class LogBuffer:
logs = [
log
for log in logs
if datetime.fromisoformat(
log["timestamp"].replace("Z", "+00:00")
).astimezone(timezone.utc)
# log["timestamp"] is now guaranteed to be ISO 8601 with 'Z'
if isoparse(log["timestamp"]).astimezone(timezone.utc)
>= since_dt
]
except ValueError:
@ -125,7 +143,11 @@ class LogBuffer:
log_buffer = LogBuffer()
logHandler = logging.StreamHandler()
formatter = jsonlogger.JsonFormatter("%(asctime)s %(name)s %(levelname)s %(message)s")
# Ensure StreamHandler also formats asctime into ISO 8601 UTC
formatter = jsonlogger.JsonFormatter(
"%(asctime)s %(name)s %(levelname)s %(message)s",
datefmt="%Y-%m-%dT%H:%M:%S.%fZ" # ISO 8601 format with milliseconds and Z for UTC
)
logHandler.setFormatter(formatter)
if not logger.handlers:
@ -148,6 +170,10 @@ app = FastAPI(
templates = Jinja2Templates(directory="app/web/templates")
app.mount("/static", StaticFiles(directory="app/web/static"), name="static")
# To correctly handle HTTPS behind a reverse proxy, ensure your Uvicorn server
# is run with --proxy-headers and --forwarded-allow-ips.
# e.g., uvicorn main:app --host 0.0.0.0 --port 8000 --proxy-headers --forwarded-allow-ips '*'
# --- Data Models ---
class NodeStatusModel(BaseModel):
@ -238,6 +264,30 @@ def get_node_health(node_data: Dict) -> str:
return "healthy"
def format_uptime(seconds: Optional[int]) -> str:
"""Formats uptime in seconds into a human-readable string (e.g., "1d 2h 3m 4s")."""
if seconds is None:
return "N/A"
days = seconds // (3600 * 24)
seconds %= (3600 * 24)
hours = seconds // 3600
seconds %= 3600
minutes = seconds // 60
remaining_seconds = seconds % 60
parts = []
if days > 0:
parts.append(f"{days}d")
if hours > 0:
parts.append(f"{hours}h")
if minutes > 0:
parts.append(f"{minutes}m")
# Always include seconds if no other parts, or if there are remaining seconds
if remaining_seconds > 0 or not parts:
parts.append(f"{remaining_seconds}s")
return " ".join(parts)
# --- API Endpoints ---
@app.get("/", response_class=HTMLResponse)
async def read_root(request: Request):
@ -247,15 +297,71 @@ async def read_root(request: Request):
"Web root accessed",
extra={"client_ip": client_ip, "service_uuid": SERVICE_UUID},
)
# --- Prepare initial node data for server-side rendering ---
current_time_utc = datetime.now(timezone.utc)
nodes_to_remove = []
for node_uuid, data in known_nodes_db.items():
last_seen_dt = datetime.fromisoformat(data["last_seen"]).replace(
tzinfo=timezone.utc
)
if (
current_time_utc - last_seen_dt
).total_seconds() > NODE_INACTIVE_REMOVAL_THRESHOLD_SECONDS:
nodes_to_remove.append(node_uuid)
logger.info(f"Node {node_uuid} inactive for >{NODE_INACTIVE_REMOVAL_THRESHOLD_SECONDS}s. Will not render initially.")
# Filter out inactive nodes for the initial render
active_known_nodes_db = {
k: v for k, v in known_nodes_db.items()
if k not in nodes_to_remove
}
initial_nodes_data = []
for node_uuid, data in active_known_nodes_db.items():
current_health = get_node_health(data)
connections = {}
for target_uuid in active_known_nodes_db: # Only iterate over currently active nodes
if target_uuid != node_uuid:
ping_data = database.get_ping_data(
node_uuid, target_uuid, start_time="-300s"
)
latency_ms = None
if ping_data and ping_data["data"]["latency"]:
# Get the most recent non-None, non-zero latency
for latency in reversed(ping_data["data"]["latency"]):
if latency is not None and not (isinstance(latency, float) and latency == 0.0):
latency_ms = float(latency)
break
connections[target_uuid] = latency_ms
initial_nodes_data.append(
{
"uuid": node_uuid,
"last_seen": data["last_seen"], # Keep original for JS
"formatted_last_seen": datetime.fromisoformat(data["last_seen"]).strftime("%Y-%m-%d %H:%M:%S UTC"),
"ip": data["ip"],
"health_status": current_health,
"uptime_seconds": data.get("uptime_seconds"),
"formatted_uptime": format_uptime(data.get("uptime_seconds")), # Pre-format uptime for HTML
"load_avg": data.get("load_avg"),
"memory_usage_percent": data.get("memory_usage_percent"),
"connections": connections,
}
)
# --- End initial node data preparation ---
return templates.TemplateResponse(
"index.html",
{
"request": request,
"service_uuid": SERVICE_UUID,
"url_for": request.url_for, # Pass url_for for dynamic URL generation
"url_for": request.url_for,
"root_path": request.scope.get(
"root_path", ""
), # Pass root_path for JS base URL
"nodes": initial_nodes_data, # Pass initial node data for server-side rendering
},
)