pipeline works from pi simulation to control output and strategy generation.
This commit is contained in:
@@ -1,13 +1,14 @@
|
||||
"""
|
||||
Raspberry Pi Telemetry Stream Simulator
|
||||
Raspberry Pi Telemetry Stream Simulator - Lap-Level Data
|
||||
|
||||
Reads the ALONSO_2023_MONZA_RACE CSV file row by row and simulates
|
||||
Reads the ALONSO_2023_MONZA_LAPS.csv file lap by lap and simulates
|
||||
live telemetry streaming from a Raspberry Pi sensor.
|
||||
Sends data to the HPC simulation layer via HTTP POST at intervals
|
||||
determined by the time differences between consecutive rows.
|
||||
Sends data to the HPC simulation layer via HTTP POST at fixed
|
||||
1-minute intervals between laps.
|
||||
|
||||
Usage:
|
||||
python simulate_pi_stream.py --data ALONSO_2023_MONZA_RACE --speed 1.0
|
||||
python simulate_pi_stream.py
|
||||
python simulate_pi_stream.py --interval 30 # 30 seconds between laps
|
||||
"""
|
||||
|
||||
import argparse
|
||||
@@ -19,39 +20,32 @@ import pandas as pd
|
||||
import requests
|
||||
|
||||
|
||||
def load_telemetry_csv(filepath: Path) -> pd.DataFrame:
|
||||
"""Load telemetry data from CSV file."""
|
||||
df = pd.read_csv(filepath, index_col=0)
|
||||
def load_lap_csv(filepath: Path) -> pd.DataFrame:
|
||||
"""Load lap-level telemetry data from CSV file."""
|
||||
df = pd.read_csv(filepath)
|
||||
|
||||
# Convert overall_time to timedelta if it's not already
|
||||
if df['overall_time'].dtype == 'object':
|
||||
df['overall_time'] = pd.to_timedelta(df['overall_time'])
|
||||
# Convert lap_time to timedelta if it's not already
|
||||
if 'lap_time' in df.columns and df['lap_time'].dtype == 'object':
|
||||
df['lap_time'] = pd.to_timedelta(df['lap_time'])
|
||||
|
||||
print(f"✓ Loaded {len(df)} telemetry points from {filepath}")
|
||||
print(f"✓ Loaded {len(df)} laps from {filepath}")
|
||||
print(f" Laps: {df['lap_number'].min():.0f} → {df['lap_number'].max():.0f}")
|
||||
print(f" Duration: {df['overall_time'].iloc[-1]}")
|
||||
|
||||
return df
|
||||
|
||||
|
||||
def row_to_json(row: pd.Series) -> Dict[str, Any]:
|
||||
"""Convert a DataFrame row to a JSON-compatible dictionary."""
|
||||
def lap_to_json(row: pd.Series) -> Dict[str, Any]:
|
||||
"""Convert a lap DataFrame row to a JSON-compatible dictionary."""
|
||||
data = {
|
||||
'lap_number': int(row['lap_number']) if pd.notna(row['lap_number']) else None,
|
||||
'total_laps': int(row['total_laps']) if pd.notna(row['total_laps']) else None,
|
||||
'speed': float(row['speed']) if pd.notna(row['speed']) else 0.0,
|
||||
'throttle': float(row['throttle']) if pd.notna(row['throttle']) else 0.0,
|
||||
'brake': bool(row['brake']),
|
||||
'lap_time': str(row['lap_time']) if pd.notna(row['lap_time']) else None,
|
||||
'average_speed': float(row['average_speed']) if pd.notna(row['average_speed']) else 0.0,
|
||||
'max_speed': float(row['max_speed']) if pd.notna(row['max_speed']) else 0.0,
|
||||
'tire_compound': str(row['tire_compound']) if pd.notna(row['tire_compound']) else 'UNKNOWN',
|
||||
'tire_life_laps': float(row['tire_life_laps']) if pd.notna(row['tire_life_laps']) else 0.0,
|
||||
'tire_life_laps': int(row['tire_life_laps']) if pd.notna(row['tire_life_laps']) else 0,
|
||||
'track_temperature': float(row['track_temperature']) if pd.notna(row['track_temperature']) else 0.0,
|
||||
'rainfall': bool(row['rainfall']),
|
||||
|
||||
# Additional race context fields
|
||||
'track_name': 'Monza', # From ALONSO_2023_MONZA_RACE
|
||||
'driver_name': 'Alonso',
|
||||
'current_position': 5, # Mock position, could be varied
|
||||
'fuel_level': max(0.1, 1.0 - (float(row['lap_number']) / float(row['total_laps']) * 0.8)) if pd.notna(row['lap_number']) and pd.notna(row['total_laps']) else 0.5,
|
||||
'rainfall': bool(row['rainfall'])
|
||||
}
|
||||
return data
|
||||
|
||||
@@ -59,17 +53,17 @@ def row_to_json(row: pd.Series) -> Dict[str, Any]:
|
||||
def simulate_stream(
|
||||
df: pd.DataFrame,
|
||||
endpoint: str,
|
||||
speed: float = 1.0,
|
||||
interval: int = 60,
|
||||
start_lap: int = 1,
|
||||
end_lap: int = None
|
||||
):
|
||||
"""
|
||||
Simulate live telemetry streaming based on actual time intervals in the data.
|
||||
Simulate live telemetry streaming with fixed interval between laps.
|
||||
|
||||
Args:
|
||||
df: DataFrame with telemetry data
|
||||
df: DataFrame with lap-level telemetry data
|
||||
endpoint: HPC API endpoint URL
|
||||
speed: Playback speed multiplier (1.0 = real-time, 2.0 = 2x speed)
|
||||
interval: Fixed interval in seconds between laps (default: 60 seconds)
|
||||
start_lap: Starting lap number
|
||||
end_lap: Ending lap number (None = all laps)
|
||||
"""
|
||||
@@ -79,95 +73,84 @@ def simulate_stream(
|
||||
filtered_df = filtered_df[filtered_df['lap_number'] <= end_lap].copy()
|
||||
|
||||
if len(filtered_df) == 0:
|
||||
print("❌ No telemetry points in specified lap range")
|
||||
print("❌ No laps in specified lap range")
|
||||
return
|
||||
|
||||
# Reset index for easier iteration
|
||||
filtered_df = filtered_df.reset_index(drop=True)
|
||||
|
||||
print(f"\n🏁 Starting telemetry stream simulation")
|
||||
print(f"\n🏁 Starting lap-level telemetry stream simulation")
|
||||
print(f" Endpoint: {endpoint}")
|
||||
print(f" Laps: {start_lap} → {end_lap or 'end'}")
|
||||
print(f" Speed: {speed}x")
|
||||
print(f" Points: {len(filtered_df)}")
|
||||
|
||||
total_duration = (filtered_df['overall_time'].iloc[-1] - filtered_df['overall_time'].iloc[0]).total_seconds()
|
||||
print(f" Duration: {total_duration:.1f}s (real-time) → {total_duration / speed:.1f}s (playback)\n")
|
||||
print(f" Interval: {interval} seconds between laps")
|
||||
print(f" Total laps: {len(filtered_df)}")
|
||||
print(f" Est. duration: {len(filtered_df) * interval / 60:.1f} minutes\n")
|
||||
|
||||
sent_count = 0
|
||||
error_count = 0
|
||||
current_lap = start_lap
|
||||
|
||||
try:
|
||||
for i in range(len(filtered_df)):
|
||||
row = filtered_df.iloc[i]
|
||||
lap_num = int(row['lap_number'])
|
||||
|
||||
# Calculate sleep time based on time difference to next row
|
||||
if i < len(filtered_df) - 1:
|
||||
next_row = filtered_df.iloc[i + 1]
|
||||
time_diff = (next_row['overall_time'] - row['overall_time']).total_seconds()
|
||||
sleep_time = time_diff / speed
|
||||
|
||||
# Ensure positive sleep time
|
||||
if sleep_time < 0:
|
||||
sleep_time = 0
|
||||
else:
|
||||
sleep_time = 0
|
||||
# Convert lap to JSON
|
||||
lap_data = lap_to_json(row)
|
||||
|
||||
# Convert row to JSON
|
||||
telemetry_point = row_to_json(row)
|
||||
|
||||
# Send telemetry point
|
||||
# Send lap data
|
||||
try:
|
||||
response = requests.post(
|
||||
endpoint,
|
||||
json=telemetry_point,
|
||||
timeout=2.0
|
||||
json=lap_data,
|
||||
timeout=5.0
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
sent_count += 1
|
||||
progress = (i + 1) / len(filtered_df) * 100
|
||||
|
||||
# Print progress updates
|
||||
if row['lap_number'] > current_lap:
|
||||
current_lap = row['lap_number']
|
||||
progress = (i + 1) / len(filtered_df) * 100
|
||||
print(f" 📡 Lap {int(current_lap)}: {sent_count} points sent "
|
||||
f"({progress:.1f}% complete)")
|
||||
elif sent_count % 500 == 0:
|
||||
progress = (i + 1) / len(filtered_df) * 100
|
||||
print(f" 📡 Lap {int(row['lap_number'])}: {sent_count} points sent "
|
||||
f"({progress:.1f}% complete)")
|
||||
# Print lap info
|
||||
print(f" 📡 Lap {lap_num}/{int(row['total_laps'])}: "
|
||||
f"Avg Speed: {row['average_speed']:.1f} km/h, "
|
||||
f"Tire: {row['tire_compound']} (age: {int(row['tire_life_laps'])} laps) "
|
||||
f"[{progress:.0f}%]")
|
||||
|
||||
# Show response if it contains strategies
|
||||
try:
|
||||
response_data = response.json()
|
||||
if 'strategies_generated' in response_data:
|
||||
print(f" ✓ Generated {response_data['strategies_generated']} strategies")
|
||||
except:
|
||||
pass
|
||||
else:
|
||||
error_count += 1
|
||||
print(f" ⚠ HTTP {response.status_code}: {response.text[:50]}")
|
||||
print(f" ⚠ Lap {lap_num}: HTTP {response.status_code}: {response.text[:100]}")
|
||||
|
||||
except requests.RequestException as e:
|
||||
error_count += 1
|
||||
if error_count % 10 == 0:
|
||||
print(f" ⚠ Connection error ({error_count} total): {str(e)[:50]}")
|
||||
print(f" ⚠ Lap {lap_num}: Connection error: {str(e)[:100]}")
|
||||
|
||||
# Sleep until next point should be sent
|
||||
if sleep_time > 0:
|
||||
time.sleep(sleep_time)
|
||||
# Sleep for fixed interval before next lap (except for last lap)
|
||||
if i < len(filtered_df) - 1:
|
||||
time.sleep(interval)
|
||||
|
||||
print(f"\n✅ Stream complete!")
|
||||
print(f" Sent: {sent_count} points")
|
||||
print(f" Sent: {sent_count} laps")
|
||||
print(f" Errors: {error_count}")
|
||||
|
||||
except KeyboardInterrupt:
|
||||
print(f"\n⏸ Stream interrupted by user")
|
||||
print(f" Sent: {sent_count}/{len(filtered_df)} points")
|
||||
print(f" Sent: {sent_count}/{len(filtered_df)} laps")
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Simulate Raspberry Pi telemetry streaming from CSV data"
|
||||
description="Simulate Raspberry Pi lap-level telemetry streaming"
|
||||
)
|
||||
parser.add_argument("--endpoint", type=str, default="http://localhost:8000/telemetry",
|
||||
help="HPC API endpoint")
|
||||
parser.add_argument("--speed", type=float, default=1.0,
|
||||
help="Playback speed (1.0 = real-time, 10.0 = 10x speed)")
|
||||
parser.add_argument("--endpoint", type=str, default="http://localhost:8000/ingest/telemetry",
|
||||
help="HPC API endpoint (default: http://localhost:8000/ingest/telemetry)")
|
||||
parser.add_argument("--interval", type=int, default=60,
|
||||
help="Fixed interval in seconds between laps (default: 60)")
|
||||
parser.add_argument("--start-lap", type=int, default=1, help="Starting lap number")
|
||||
parser.add_argument("--end-lap", type=int, default=None, help="Ending lap number")
|
||||
|
||||
@@ -176,19 +159,19 @@ def main():
|
||||
try:
|
||||
# Hardcoded CSV file location in the same folder as this script
|
||||
script_dir = Path(__file__).parent
|
||||
data_path = script_dir / "ALONSO_2023_MONZA_RACE.csv"
|
||||
data_path = script_dir / "ALONSO_2023_MONZA_LAPS.csv"
|
||||
|
||||
df = load_telemetry_csv(data_path)
|
||||
df = load_lap_csv(data_path)
|
||||
simulate_stream(
|
||||
df,
|
||||
args.endpoint,
|
||||
args.speed,
|
||||
args.interval,
|
||||
args.start_lap,
|
||||
args.end_lap
|
||||
)
|
||||
except FileNotFoundError:
|
||||
print(f"❌ File not found: {data_path}")
|
||||
print(f" Make sure ALONSO_2023_MONZA_RACE.csv is in the scripts/ folder")
|
||||
print(f" Make sure ALONSO_2023_MONZA_LAPS.csv is in the scripts/ folder")
|
||||
sys.exit(1)
|
||||
except Exception as e:
|
||||
print(f"❌ Error: {e}")
|
||||
|
||||
403
scripts/simulate_pi_websocket.py
Normal file
403
scripts/simulate_pi_websocket.py
Normal file
@@ -0,0 +1,403 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
WebSocket-based Raspberry Pi Telemetry Simulator.
|
||||
|
||||
Connects to AI Intelligence Layer via WebSocket and:
|
||||
1. Streams lap telemetry to AI layer
|
||||
2. Receives control commands (brake_bias, differential_slip) from AI layer
|
||||
3. Applies control adjustments in real-time
|
||||
|
||||
Usage:
|
||||
python simulate_pi_websocket.py --interval 5 --ws-url ws://localhost:9000/ws/pi
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import asyncio
|
||||
import json
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from typing import Dict, Any, Optional
|
||||
import sys
|
||||
|
||||
try:
|
||||
import pandas as pd
|
||||
import websockets
|
||||
from websockets.client import WebSocketClientProtocol
|
||||
except ImportError:
|
||||
print("Error: Required packages not installed.")
|
||||
print("Run: pip install pandas websockets")
|
||||
sys.exit(1)
|
||||
|
||||
# Configure logging
|
||||
logging.basicConfig(
|
||||
level=logging.INFO,
|
||||
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
||||
)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class PiSimulator:
|
||||
"""WebSocket-based Pi simulator with control feedback."""
|
||||
|
||||
def __init__(self, csv_path: Path, ws_url: str, interval: float = 60.0, enrichment_url: str = "http://localhost:8000"):
|
||||
self.csv_path = csv_path
|
||||
self.ws_url = ws_url
|
||||
self.enrichment_url = enrichment_url
|
||||
self.interval = interval
|
||||
self.df: Optional[pd.DataFrame] = None
|
||||
self.current_controls = {
|
||||
"brake_bias": 5,
|
||||
"differential_slip": 5
|
||||
}
|
||||
|
||||
def load_lap_csv(self) -> pd.DataFrame:
|
||||
"""Load lap-level CSV data."""
|
||||
logger.info(f"Loading CSV from {self.csv_path}")
|
||||
df = pd.read_csv(self.csv_path)
|
||||
logger.info(f"Loaded {len(df)} laps")
|
||||
return df
|
||||
|
||||
def lap_to_raw_payload(self, row: pd.Series) -> Dict[str, Any]:
|
||||
"""
|
||||
Convert CSV row to raw lap telemetry (for enrichment service).
|
||||
This is what the real Pi would send.
|
||||
"""
|
||||
return {
|
||||
"lap_number": int(row["lap_number"]),
|
||||
"total_laps": int(row["total_laps"]),
|
||||
"lap_time": str(row["lap_time"]),
|
||||
"average_speed": float(row["average_speed"]),
|
||||
"max_speed": float(row["max_speed"]),
|
||||
"tire_compound": str(row["tire_compound"]),
|
||||
"tire_life_laps": int(row["tire_life_laps"]),
|
||||
"track_temperature": float(row["track_temperature"]),
|
||||
"rainfall": bool(row.get("rainfall", False))
|
||||
}
|
||||
|
||||
async def enrich_telemetry(self, raw_telemetry: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""
|
||||
Send raw telemetry to enrichment service and get back enriched data.
|
||||
This simulates the Pi → Enrichment → AI flow.
|
||||
"""
|
||||
import aiohttp
|
||||
|
||||
try:
|
||||
async with aiohttp.ClientSession() as session:
|
||||
async with session.post(
|
||||
f"{self.enrichment_url}/ingest/telemetry",
|
||||
json=raw_telemetry,
|
||||
timeout=aiohttp.ClientTimeout(total=5.0)
|
||||
) as response:
|
||||
if response.status == 200:
|
||||
result = await response.json()
|
||||
logger.info(f" ✓ Enrichment service processed lap {raw_telemetry['lap_number']}")
|
||||
return result
|
||||
else:
|
||||
logger.error(f" ✗ Enrichment service error: {response.status}")
|
||||
return None
|
||||
except Exception as e:
|
||||
logger.error(f" ✗ Failed to connect to enrichment service: {e}")
|
||||
logger.error(f" Make sure enrichment service is running: python scripts/serve.py")
|
||||
return None
|
||||
|
||||
def lap_to_enriched_payload(self, row: pd.Series) -> Dict[str, Any]:
|
||||
"""
|
||||
Convert CSV row to enriched telemetry payload.
|
||||
Simulates the enrichment layer output.
|
||||
"""
|
||||
# Basic enrichment simulation (would normally come from enrichment service)
|
||||
lap_number = int(row["lap_number"])
|
||||
tire_age = int(row["tire_life_laps"])
|
||||
|
||||
# Simple tire degradation simulation
|
||||
tire_deg_rate = min(1.0, 0.02 * tire_age)
|
||||
tire_cliff_risk = max(0.0, min(1.0, (tire_age - 20) / 10.0))
|
||||
|
||||
# Pace trend (simplified)
|
||||
pace_trend = "stable"
|
||||
if tire_age > 25:
|
||||
pace_trend = "declining"
|
||||
elif tire_age < 5:
|
||||
pace_trend = "improving"
|
||||
|
||||
# Optimal pit window
|
||||
if tire_age > 20:
|
||||
pit_window = [lap_number + 1, lap_number + 3]
|
||||
else:
|
||||
pit_window = [lap_number + 10, lap_number + 15]
|
||||
|
||||
# Performance delta (random for simulation)
|
||||
import random
|
||||
performance_delta = random.uniform(-1.5, 1.0)
|
||||
|
||||
enriched_telemetry = {
|
||||
"lap": lap_number,
|
||||
"tire_degradation_rate": round(tire_deg_rate, 3),
|
||||
"pace_trend": pace_trend,
|
||||
"tire_cliff_risk": round(tire_cliff_risk, 3),
|
||||
"optimal_pit_window": pit_window,
|
||||
"performance_delta": round(performance_delta, 2)
|
||||
}
|
||||
|
||||
race_context = {
|
||||
"race_info": {
|
||||
"track_name": "Monza",
|
||||
"total_laps": int(row["total_laps"]),
|
||||
"current_lap": lap_number,
|
||||
"weather_condition": "Wet" if row.get("rainfall", False) else "Dry",
|
||||
"track_temp_celsius": float(row["track_temperature"])
|
||||
},
|
||||
"driver_state": {
|
||||
"driver_name": "Alonso",
|
||||
"current_position": 5,
|
||||
"current_tire_compound": str(row["tire_compound"]).lower(),
|
||||
"tire_age_laps": tire_age,
|
||||
"fuel_remaining_percent": max(0.0, 100.0 * (1.0 - (lap_number / int(row["total_laps"]))))
|
||||
},
|
||||
"competitors": []
|
||||
}
|
||||
|
||||
return {
|
||||
"type": "telemetry",
|
||||
"lap_number": lap_number,
|
||||
"enriched_telemetry": enriched_telemetry,
|
||||
"race_context": race_context
|
||||
}
|
||||
|
||||
async def stream_telemetry(self):
|
||||
"""Main WebSocket streaming loop."""
|
||||
self.df = self.load_lap_csv()
|
||||
|
||||
# Reset enrichment service state for fresh session
|
||||
logger.info(f"Resetting enrichment service state...")
|
||||
try:
|
||||
import aiohttp
|
||||
async with aiohttp.ClientSession() as session:
|
||||
async with session.post(
|
||||
f"{self.enrichment_url}/reset",
|
||||
timeout=aiohttp.ClientTimeout(total=5.0)
|
||||
) as response:
|
||||
if response.status == 200:
|
||||
logger.info("✓ Enrichment service reset successfully")
|
||||
else:
|
||||
logger.warning(f"⚠ Enrichment reset returned status {response.status}")
|
||||
except Exception as e:
|
||||
logger.warning(f"⚠ Could not reset enrichment service: {e}")
|
||||
logger.warning(" Continuing anyway (enricher may have stale state)")
|
||||
|
||||
logger.info(f"Connecting to WebSocket: {self.ws_url}")
|
||||
|
||||
try:
|
||||
async with websockets.connect(self.ws_url) as websocket:
|
||||
logger.info("WebSocket connected!")
|
||||
|
||||
# Wait for welcome message
|
||||
welcome = await websocket.recv()
|
||||
logger.info(f"Received: {welcome}")
|
||||
|
||||
# Stream each lap
|
||||
for idx, row in self.df.iterrows():
|
||||
lap_number = int(row["lap_number"])
|
||||
|
||||
logger.info(f"\n{'='*60}")
|
||||
logger.info(f"Lap {lap_number}/{int(row['total_laps'])}")
|
||||
logger.info(f"{'='*60}")
|
||||
|
||||
# Build raw telemetry payload (what real Pi would send)
|
||||
raw_telemetry = self.lap_to_raw_payload(row)
|
||||
logger.info(f"[RAW] Lap {lap_number} telemetry prepared")
|
||||
|
||||
# Send to enrichment service for processing
|
||||
enriched_data = await self.enrich_telemetry(raw_telemetry)
|
||||
|
||||
if not enriched_data:
|
||||
logger.error("Failed to get enrichment, skipping lap")
|
||||
await asyncio.sleep(self.interval)
|
||||
continue
|
||||
|
||||
# Extract enriched telemetry and race context from enrichment service
|
||||
enriched_telemetry = enriched_data.get("enriched_telemetry")
|
||||
race_context = enriched_data.get("race_context")
|
||||
|
||||
if not enriched_telemetry or not race_context:
|
||||
logger.error("Invalid enrichment response, skipping lap")
|
||||
await asyncio.sleep(self.interval)
|
||||
continue
|
||||
|
||||
# Build WebSocket payload for AI layer
|
||||
ws_payload = {
|
||||
"type": "telemetry",
|
||||
"lap_number": lap_number,
|
||||
"enriched_telemetry": enriched_telemetry,
|
||||
"race_context": race_context
|
||||
}
|
||||
|
||||
# Send enriched telemetry to AI layer via WebSocket
|
||||
await websocket.send(json.dumps(ws_payload))
|
||||
logger.info(f"[SENT] Lap {lap_number} enriched telemetry to AI layer")
|
||||
|
||||
# Wait for control command response(s)
|
||||
try:
|
||||
response = await asyncio.wait_for(websocket.recv(), timeout=5.0)
|
||||
response_data = json.loads(response)
|
||||
|
||||
if response_data.get("type") == "control_command":
|
||||
brake_bias = response_data.get("brake_bias", 5)
|
||||
diff_slip = response_data.get("differential_slip", 5)
|
||||
strategy_name = response_data.get("strategy_name", "N/A")
|
||||
message = response_data.get("message")
|
||||
|
||||
self.current_controls["brake_bias"] = brake_bias
|
||||
self.current_controls["differential_slip"] = diff_slip
|
||||
|
||||
logger.info(f"[RECEIVED] Control Command:")
|
||||
logger.info(f" ├─ Brake Bias: {brake_bias}/10")
|
||||
logger.info(f" ├─ Differential Slip: {diff_slip}/10")
|
||||
if strategy_name != "N/A":
|
||||
logger.info(f" └─ Strategy: {strategy_name}")
|
||||
if message:
|
||||
logger.info(f" └─ {message}")
|
||||
|
||||
# Apply controls (in real Pi, this would adjust hardware)
|
||||
self.apply_controls(brake_bias, diff_slip)
|
||||
|
||||
# If message indicates processing, wait for update
|
||||
if message and "Processing" in message:
|
||||
logger.info(" AI is generating strategies, waiting for update...")
|
||||
try:
|
||||
update = await asyncio.wait_for(websocket.recv(), timeout=45.0)
|
||||
update_data = json.loads(update)
|
||||
|
||||
if update_data.get("type") == "control_command_update":
|
||||
brake_bias = update_data.get("brake_bias", 5)
|
||||
diff_slip = update_data.get("differential_slip", 5)
|
||||
strategy_name = update_data.get("strategy_name", "N/A")
|
||||
|
||||
self.current_controls["brake_bias"] = brake_bias
|
||||
self.current_controls["differential_slip"] = diff_slip
|
||||
|
||||
logger.info(f"[UPDATED] Strategy-Based Control:")
|
||||
logger.info(f" ├─ Brake Bias: {brake_bias}/10")
|
||||
logger.info(f" ├─ Differential Slip: {diff_slip}/10")
|
||||
logger.info(f" └─ Strategy: {strategy_name}")
|
||||
|
||||
self.apply_controls(brake_bias, diff_slip)
|
||||
except asyncio.TimeoutError:
|
||||
logger.warning("[TIMEOUT] Strategy generation took too long")
|
||||
|
||||
elif response_data.get("type") == "error":
|
||||
logger.error(f"[ERROR] {response_data.get('message')}")
|
||||
|
||||
except asyncio.TimeoutError:
|
||||
logger.warning("[TIMEOUT] No control command received within 5s")
|
||||
|
||||
# Wait before next lap
|
||||
logger.info(f"Waiting {self.interval}s before next lap...")
|
||||
await asyncio.sleep(self.interval)
|
||||
|
||||
# All laps complete
|
||||
logger.info("\n" + "="*60)
|
||||
logger.info("RACE COMPLETE - All laps streamed")
|
||||
logger.info("="*60)
|
||||
|
||||
# Send disconnect message
|
||||
await websocket.send(json.dumps({"type": "disconnect"}))
|
||||
|
||||
except websockets.exceptions.WebSocketException as e:
|
||||
logger.error(f"WebSocket error: {e}")
|
||||
logger.error("Is the AI Intelligence Layer running on port 9000?")
|
||||
except Exception as e:
|
||||
logger.error(f"Unexpected error: {e}")
|
||||
|
||||
def apply_controls(self, brake_bias: int, differential_slip: int):
|
||||
"""
|
||||
Apply control adjustments to the car.
|
||||
In real Pi, this would interface with hardware controllers.
|
||||
"""
|
||||
logger.info(f"[APPLYING] Setting brake_bias={brake_bias}, diff_slip={differential_slip}")
|
||||
|
||||
# Simulate applying controls (in real implementation, this would:
|
||||
# - Adjust brake bias actuator
|
||||
# - Modify differential slip controller
|
||||
# - Send CAN bus messages to ECU
|
||||
# - Update dashboard display)
|
||||
|
||||
# For simulation, just log the change
|
||||
if brake_bias > 6:
|
||||
logger.info(" → Brake bias shifted REAR (protecting front tires)")
|
||||
elif brake_bias < 5:
|
||||
logger.info(" → Brake bias shifted FRONT (aggressive turn-in)")
|
||||
else:
|
||||
logger.info(" → Brake bias NEUTRAL")
|
||||
|
||||
if differential_slip > 6:
|
||||
logger.info(" → Differential slip INCREASED (gentler on tires)")
|
||||
elif differential_slip < 5:
|
||||
logger.info(" → Differential slip DECREASED (aggressive cornering)")
|
||||
else:
|
||||
logger.info(" → Differential slip NEUTRAL")
|
||||
|
||||
|
||||
async def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="WebSocket-based Raspberry Pi Telemetry Simulator"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--interval",
|
||||
type=float,
|
||||
default=60.0,
|
||||
help="Seconds between laps (default: 60s)"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--ws-url",
|
||||
type=str,
|
||||
default="ws://localhost:9000/ws/pi",
|
||||
help="WebSocket URL for AI layer (default: ws://localhost:9000/ws/pi)"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--enrichment-url",
|
||||
type=str,
|
||||
default="http://localhost:8000",
|
||||
help="Enrichment service URL (default: http://localhost:8000)"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--csv",
|
||||
type=str,
|
||||
default=None,
|
||||
help="Path to lap CSV file (default: scripts/ALONSO_2023_MONZA_LAPS.csv)"
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# Determine CSV path
|
||||
if args.csv:
|
||||
csv_path = Path(args.csv)
|
||||
else:
|
||||
script_dir = Path(__file__).parent
|
||||
csv_path = script_dir / "ALONSO_2023_MONZA_LAPS.csv"
|
||||
|
||||
if not csv_path.exists():
|
||||
logger.error(f"CSV file not found: {csv_path}")
|
||||
sys.exit(1)
|
||||
|
||||
# Create simulator and run
|
||||
simulator = PiSimulator(
|
||||
csv_path=csv_path,
|
||||
ws_url=args.ws_url,
|
||||
enrichment_url=args.enrichment_url,
|
||||
interval=args.interval
|
||||
)
|
||||
|
||||
logger.info("Starting WebSocket Pi Simulator")
|
||||
logger.info(f"CSV: {csv_path}")
|
||||
logger.info(f"Enrichment Service: {args.enrichment_url}")
|
||||
logger.info(f"WebSocket URL: {args.ws_url}")
|
||||
logger.info(f"Interval: {args.interval}s per lap")
|
||||
logger.info("-" * 60)
|
||||
|
||||
await simulator.stream_telemetry()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
157
scripts/test_websocket.py
Normal file
157
scripts/test_websocket.py
Normal file
@@ -0,0 +1,157 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Quick test to verify WebSocket control system.
|
||||
Tests the complete flow: Pi → AI → Control Commands
|
||||
"""
|
||||
import asyncio
|
||||
import json
|
||||
import sys
|
||||
|
||||
try:
|
||||
import websockets
|
||||
except ImportError:
|
||||
print("Error: websockets not installed")
|
||||
print("Run: pip install websockets")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
async def test_websocket():
|
||||
"""Test WebSocket connection and control flow."""
|
||||
|
||||
ws_url = "ws://localhost:9000/ws/pi"
|
||||
print(f"Testing WebSocket connection to {ws_url}")
|
||||
print("-" * 60)
|
||||
|
||||
try:
|
||||
async with websockets.connect(ws_url) as websocket:
|
||||
print("✓ WebSocket connected!")
|
||||
|
||||
# 1. Receive welcome message
|
||||
welcome = await websocket.recv()
|
||||
welcome_data = json.loads(welcome)
|
||||
print(f"✓ Welcome message: {welcome_data.get('message')}")
|
||||
|
||||
# 2. Send test telemetry (lap 1)
|
||||
test_payload = {
|
||||
"type": "telemetry",
|
||||
"lap_number": 1,
|
||||
"enriched_telemetry": {
|
||||
"lap": 1,
|
||||
"tire_degradation_rate": 0.15,
|
||||
"pace_trend": "stable",
|
||||
"tire_cliff_risk": 0.05,
|
||||
"optimal_pit_window": [25, 30],
|
||||
"performance_delta": 0.0
|
||||
},
|
||||
"race_context": {
|
||||
"race_info": {
|
||||
"track_name": "Monza",
|
||||
"total_laps": 51,
|
||||
"current_lap": 1,
|
||||
"weather_condition": "Dry",
|
||||
"track_temp_celsius": 28.0
|
||||
},
|
||||
"driver_state": {
|
||||
"driver_name": "Test Driver",
|
||||
"current_position": 5,
|
||||
"current_tire_compound": "medium",
|
||||
"tire_age_laps": 1,
|
||||
"fuel_remaining_percent": 98.0
|
||||
},
|
||||
"competitors": []
|
||||
}
|
||||
}
|
||||
|
||||
print("\n→ Sending lap 1 telemetry...")
|
||||
await websocket.send(json.dumps(test_payload))
|
||||
|
||||
# 3. Wait for response (short timeout for first laps)
|
||||
response = await asyncio.wait_for(websocket.recv(), timeout=5.0)
|
||||
response_data = json.loads(response)
|
||||
|
||||
if response_data.get("type") == "control_command":
|
||||
print("✓ Received control command!")
|
||||
print(f" Brake Bias: {response_data.get('brake_bias')}/10")
|
||||
print(f" Differential Slip: {response_data.get('differential_slip')}/10")
|
||||
print(f" Message: {response_data.get('message', 'N/A')}")
|
||||
else:
|
||||
print(f"✗ Unexpected response: {response_data}")
|
||||
|
||||
# 4. Send two more laps to trigger strategy generation
|
||||
for lap_num in [2, 3]:
|
||||
test_payload["lap_number"] = lap_num
|
||||
test_payload["enriched_telemetry"]["lap"] = lap_num
|
||||
test_payload["race_context"]["race_info"]["current_lap"] = lap_num
|
||||
|
||||
print(f"\n→ Sending lap {lap_num} telemetry...")
|
||||
await websocket.send(json.dumps(test_payload))
|
||||
|
||||
# Lap 3 triggers Gemini, so expect two responses
|
||||
if lap_num == 3:
|
||||
print(f" (lap 3 will trigger strategy generation - may take 10-30s)")
|
||||
|
||||
# First response: immediate acknowledgment
|
||||
response1 = await asyncio.wait_for(websocket.recv(), timeout=5.0)
|
||||
response1_data = json.loads(response1)
|
||||
print(f"✓ Immediate response: {response1_data.get('message', 'Processing...')}")
|
||||
|
||||
# Second response: strategy-based controls
|
||||
print(" Waiting for strategy generation to complete...")
|
||||
response2 = await asyncio.wait_for(websocket.recv(), timeout=45.0)
|
||||
response2_data = json.loads(response2)
|
||||
|
||||
if response2_data.get("type") == "control_command_update":
|
||||
print(f"✓ Lap {lap_num} strategy-based control received!")
|
||||
print(f" Brake Bias: {response2_data.get('brake_bias')}/10")
|
||||
print(f" Differential Slip: {response2_data.get('differential_slip')}/10")
|
||||
|
||||
strategy = response2_data.get('strategy_name')
|
||||
if strategy and strategy != "N/A":
|
||||
print(f" Strategy: {strategy}")
|
||||
print(f" Total Strategies: {response2_data.get('total_strategies')}")
|
||||
print("✓ Strategy generation successful!")
|
||||
else:
|
||||
print(f"✗ Unexpected response: {response2_data}")
|
||||
else:
|
||||
# Laps 1-2: just one response
|
||||
response = await asyncio.wait_for(websocket.recv(), timeout=5.0)
|
||||
response_data = json.loads(response)
|
||||
|
||||
if response_data.get("type") == "control_command":
|
||||
print(f"✓ Lap {lap_num} control command received!")
|
||||
print(f" Brake Bias: {response_data.get('brake_bias')}/10")
|
||||
print(f" Differential Slip: {response_data.get('differential_slip')}/10")
|
||||
|
||||
# 5. Disconnect
|
||||
print("\n→ Sending disconnect...")
|
||||
await websocket.send(json.dumps({"type": "disconnect"}))
|
||||
|
||||
print("\n" + "=" * 60)
|
||||
print("✓ ALL TESTS PASSED!")
|
||||
print("=" * 60)
|
||||
print("\nWebSocket control system is working correctly.")
|
||||
print("Ready to run: python scripts/simulate_pi_websocket.py")
|
||||
|
||||
except websockets.exceptions.WebSocketException as e:
|
||||
print(f"\n✗ WebSocket error: {e}")
|
||||
print("\nMake sure the AI Intelligence Layer is running:")
|
||||
print(" cd ai_intelligence_layer && python main.py")
|
||||
sys.exit(1)
|
||||
|
||||
except asyncio.TimeoutError:
|
||||
print("\n✗ Timeout waiting for response")
|
||||
print("AI layer may be processing (Gemini API can be slow)")
|
||||
print("Check ai_intelligence_layer logs for details")
|
||||
sys.exit(1)
|
||||
|
||||
except Exception as e:
|
||||
print(f"\n✗ Unexpected error: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
print("WebSocket Control System Test")
|
||||
print("=" * 60)
|
||||
asyncio.run(test_websocket())
|
||||
Reference in New Issue
Block a user