It works now
This commit is contained in:
163
client.py
Normal file
163
client.py
Normal file
@ -0,0 +1,163 @@
|
||||
import os
|
||||
import uuid
|
||||
import time
|
||||
import requests
|
||||
import random
|
||||
import json
|
||||
import logging
|
||||
from datetime import datetime, timezone
|
||||
|
||||
# --- Client Configuration ---
|
||||
# The UUID of THIS client node. Generated on startup.
|
||||
# Can be overridden by an environment variable for persistent client identity.
|
||||
NODE_UUID = os.environ.get("NODE_UUID", str(uuid.uuid4()))
|
||||
|
||||
# The UUID of the target monitoring service (the main.py server).
|
||||
# IMPORTANT: This MUST match the SERVICE_UUID of your running FastAPI server.
|
||||
# You can get this from the server's initial console output or by accessing its root endpoint ('/').
|
||||
# Replace the placeholder string below with your actual server's SERVICE_UUID.
|
||||
# For example: TARGET_SERVICE_UUID = "a1b2c3d4-e5f6-7890-1234-567890abcdef"
|
||||
TARGET_SERVICE_UUID = os.environ.get(
|
||||
"TARGET_SERVICE_UUID", "REPLACE_ME_WITH_YOUR_SERVER_SERVICE_UUID"
|
||||
)
|
||||
|
||||
# The base URL of the FastAPI monitoring service
|
||||
SERVER_BASE_URL = os.environ.get("SERVER_URL", "http://localhost:8000")
|
||||
|
||||
# How often to send status updates (in seconds)
|
||||
UPDATE_INTERVAL_SECONDS = int(os.environ.get("UPDATE_INTERVAL_SECONDS", 5))
|
||||
|
||||
# --- Logging Configuration ---
|
||||
logging.basicConfig(
|
||||
level=logging.INFO,
|
||||
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
||||
)
|
||||
logger = logging.getLogger("NodeClient")
|
||||
|
||||
# --- Global state for simulation ---
|
||||
uptime_seconds = 0
|
||||
# Dictionary to store UUIDs of other nodes received from the server
|
||||
# Format: { "node_uuid_str": { "last_seen": "iso_timestamp", "ip": "..." } }
|
||||
known_peers = {}
|
||||
|
||||
# --- Data Generation Functions ---
|
||||
|
||||
def generate_node_status_data():
|
||||
"""Generates simulated node status metrics."""
|
||||
global uptime_seconds
|
||||
uptime_seconds += UPDATE_INTERVAL_SECONDS + random.randint(0, 2) # Simulate slight variation
|
||||
|
||||
# Simulate load average (3 values: 1-min, 5-min, 15-min)
|
||||
# Load averages will fluctuate.
|
||||
load_avg = [
|
||||
round(random.uniform(0.1, 2.0), 2),
|
||||
round(random.uniform(0.1, 1.8), 2),
|
||||
round(random.uniform(0.1, 1.5), 2)
|
||||
]
|
||||
|
||||
# Simulate memory usage percentage
|
||||
memory_usage_percent = round(random.uniform(30.0, 90.0), 2)
|
||||
|
||||
return {
|
||||
"uptime_seconds": uptime_seconds,
|
||||
"load_avg": load_avg,
|
||||
"memory_usage_percent": memory_usage_percent
|
||||
}
|
||||
|
||||
def generate_ping_data():
|
||||
"""Generates simulated ping latencies to known peers."""
|
||||
pings = {}
|
||||
|
||||
# Simulate ping to self (loopback) - always very low latency
|
||||
pings[str(NODE_UUID)] = round(random.uniform(0.1, 1.0), 2)
|
||||
|
||||
# Simulate pings to other known peers
|
||||
for peer_uuid in known_peers.keys():
|
||||
if peer_uuid != str(NODE_UUID): # Don't ping self twice
|
||||
# Varying latency for external peers
|
||||
pings[peer_uuid] = round(random.uniform(10.0, 200.0), 2)
|
||||
return pings
|
||||
|
||||
# --- Main Client Logic ---
|
||||
|
||||
def run_client():
|
||||
global known_peers
|
||||
logger.info(f"Starting Node Client {NODE_UUID}")
|
||||
logger.info(f"Target Service UUID: {TARGET_SERVICE_UUID}")
|
||||
logger.info(f"Server URL: {SERVER_BASE_URL}")
|
||||
logger.info(f"Update Interval: {UPDATE_INTERVAL_SECONDS} seconds")
|
||||
|
||||
if TARGET_SERVICE_UUID == "REPLACE_ME_WITH_YOUR_SERVER_SERVICE_UUID":
|
||||
logger.error("-" * 50)
|
||||
logger.error("ERROR: TARGET_SERVICE_UUID is not set correctly!")
|
||||
logger.error("Please replace 'REPLACE_ME_WITH_YOUR_SERVER_SERVICE_UUID' in client.py")
|
||||
logger.error("or set the environment variable TARGET_SERVICE_UUID.")
|
||||
logger.error("You can find the server's UUID by running main.py and checking its console output")
|
||||
logger.error("or by visiting 'http://localhost:8000/' in your browser.")
|
||||
logger.error("-" * 50)
|
||||
return
|
||||
|
||||
while True:
|
||||
try:
|
||||
# 1. Generate status data
|
||||
status_data = generate_node_status_data()
|
||||
ping_data = generate_ping_data()
|
||||
|
||||
# 2. Construct the payload matching the StatusUpdate model
|
||||
# Use datetime.now(timezone.utc) for timezone-aware UTC timestamp
|
||||
payload = {
|
||||
"node": str(NODE_UUID),
|
||||
"timestamp": datetime.now(timezone.utc).isoformat(),
|
||||
"status": status_data,
|
||||
"pings": ping_data
|
||||
}
|
||||
|
||||
# 3. Define the endpoint URL
|
||||
endpoint_url = f"{SERVER_BASE_URL}/{TARGET_SERVICE_UUID}/{NODE_UUID}/"
|
||||
|
||||
# 4. Send the PUT request
|
||||
logger.info(f"Sending update to {endpoint_url}. Uptime: {status_data['uptime_seconds']}s, Load: {status_data['load_avg']}, Pings: {len(ping_data)}")
|
||||
|
||||
response = requests.put(endpoint_url, json=payload, timeout=10) # 10-second timeout
|
||||
|
||||
# 5. Process the response
|
||||
if response.status_code == 200:
|
||||
response_data = response.json()
|
||||
logger.info(f"Successfully sent update. Server message: '{response_data.get('message')}'")
|
||||
|
||||
if "peers" in response_data and isinstance(response_data["peers"], dict):
|
||||
# Update known_peers, converting keys to strings from JSON
|
||||
new_peers = {k: v for k, v in response_data["peers"].items()}
|
||||
|
||||
# Log if new peers are discovered
|
||||
newly_discovered = set(new_peers.keys()) - set(known_peers.keys())
|
||||
if newly_discovered:
|
||||
logger.info(f"Discovered new peer(s): {', '.join(newly_discovered)}")
|
||||
|
||||
known_peers = new_peers
|
||||
logger.info(f"Total known peers (including self if returned by server): {len(known_peers)}")
|
||||
else:
|
||||
logger.warning("Server response did not contain a valid 'peers' field or it was empty.")
|
||||
else:
|
||||
logger.error(f"Failed to send update. Status code: {response.status_code}, Response: {response.text}")
|
||||
if response.status_code == 404:
|
||||
logger.error("Hint: The TARGET_SERVICE_UUID might be incorrect, or the server isn't running at this endpoint.")
|
||||
elif response.status_code == 422: # Pydantic validation error
|
||||
logger.error(f"Server validation error (422 Unprocessable Entity): {response.json()}")
|
||||
|
||||
except requests.exceptions.Timeout:
|
||||
logger.error(f"Request timed out after {10} seconds. Is the server running and responsive?")
|
||||
except requests.exceptions.ConnectionError as e:
|
||||
logger.error(f"Connection error: {e}. Is the server running at {SERVER_BASE_URL}?")
|
||||
except requests.exceptions.RequestException as e:
|
||||
logger.error(f"An unexpected request error occurred: {e}", exc_info=True)
|
||||
except json.JSONDecodeError:
|
||||
logger.error(f"Failed to decode JSON response: {response.text}. Is the server returning valid JSON?")
|
||||
except Exception as e:
|
||||
logger.error(f"An unexpected error occurred in the client loop: {e}", exc_info=True)
|
||||
|
||||
# 6. Wait for the next update
|
||||
time.sleep(UPDATE_INTERVAL_SECONDS)
|
||||
|
||||
if __name__ == "__main__":
|
||||
run_client()
|
Reference in New Issue
Block a user