250 lines
11 KiB
Rust
250 lines
11 KiB
Rust
use std::sync::Arc;
|
|
use futures::{SinkExt, StreamExt};
|
|
use tokio::sync::mpsc;
|
|
use tokio_tungstenite::{connect_async, tungstenite::Message};
|
|
|
|
use crate::agent::{self, AgentEvent};
|
|
use crate::exec::LocalExecutor;
|
|
use crate::llm::LlmClient;
|
|
use crate::sink::{AgentUpdate, ServiceManager};
|
|
use crate::worker::{ServerToWorker, WorkerInfo, WorkerToServer};
|
|
|
|
fn collect_worker_info(name: &str) -> WorkerInfo {
|
|
let cpu = std::fs::read_to_string("/proc/cpuinfo").ok()
|
|
.and_then(|s| s.lines().find(|l| l.starts_with("model name"))
|
|
.map(|l| l.split(':').nth(1).unwrap_or("").trim().to_string()))
|
|
.unwrap_or_else(|| "unknown".into());
|
|
let memory = std::fs::read_to_string("/proc/meminfo").ok()
|
|
.and_then(|s| s.lines().find(|l| l.starts_with("MemTotal"))
|
|
.and_then(|l| l.split_whitespace().nth(1))
|
|
.and_then(|kb| kb.parse::<u64>().ok())
|
|
.map(|kb| format!("{:.1} GB", kb as f64 / 1_048_576.0)))
|
|
.unwrap_or_else(|| "unknown".into());
|
|
let gpu = std::process::Command::new("nvidia-smi")
|
|
.arg("--query-gpu=name").arg("--format=csv,noheader").output().ok()
|
|
.and_then(|o| if o.status.success() { Some(String::from_utf8_lossy(&o.stdout).trim().to_string()) } else { None })
|
|
.unwrap_or_else(|| "none".into());
|
|
WorkerInfo {
|
|
name: name.to_string(), cpu, memory, gpu,
|
|
os: std::env::consts::OS.to_string(),
|
|
kernel: std::process::Command::new("uname").arg("-r").output().ok()
|
|
.map(|o| String::from_utf8_lossy(&o.stdout).trim().to_string())
|
|
.unwrap_or_else(|| "unknown".into()),
|
|
}
|
|
}
|
|
|
|
/// Shared WebSocket sender that can be swapped on reconnect.
|
|
type SharedWsTx = Arc<tokio::sync::Mutex<Option<futures::stream::SplitSink<
|
|
tokio_tungstenite::WebSocketStream<tokio_tungstenite::MaybeTlsStream<tokio::net::TcpStream>>,
|
|
Message
|
|
>>>>;
|
|
|
|
pub async fn run(server_url: &str, worker_name: &str, llm_config: &crate::LlmConfig) -> anyhow::Result<()> {
|
|
tracing::info!("Tori worker '{}' connecting to {} (model={})", worker_name, server_url, llm_config.model);
|
|
|
|
let svc_mgr = ServiceManager::new(9100);
|
|
let ws_tx: SharedWsTx = Arc::new(tokio::sync::Mutex::new(None));
|
|
let comment_tx: Arc<tokio::sync::Mutex<Option<mpsc::Sender<AgentEvent>>>> =
|
|
Arc::new(tokio::sync::Mutex::new(None));
|
|
|
|
loop {
|
|
match connect_and_run(server_url, worker_name, llm_config, &svc_mgr, &ws_tx, &comment_tx).await {
|
|
Ok(()) => tracing::info!("Connection closed, reconnecting in 5s..."),
|
|
Err(e) => tracing::error!("Worker error: {}, reconnecting in 5s...", e),
|
|
}
|
|
// Clear ws_tx so relay tasks know the connection is gone
|
|
*ws_tx.lock().await = None;
|
|
tokio::time::sleep(std::time::Duration::from_secs(5)).await;
|
|
}
|
|
}
|
|
|
|
async fn connect_and_run(
|
|
server_url: &str,
|
|
worker_name: &str,
|
|
llm_config: &crate::LlmConfig,
|
|
svc_mgr: &ServiceManager,
|
|
shared_ws_tx: &SharedWsTx,
|
|
comment_tx: &Arc<tokio::sync::Mutex<Option<mpsc::Sender<AgentEvent>>>>,
|
|
) -> anyhow::Result<()> {
|
|
let (ws_stream, _) = connect_async(server_url).await?;
|
|
let (mut ws_tx, mut ws_rx) = ws_stream.split();
|
|
|
|
// Register
|
|
let info = collect_worker_info(worker_name);
|
|
let register_msg = serde_json::to_string(&WorkerToServer::Register { info })?;
|
|
ws_tx.send(Message::Text(register_msg.into())).await?;
|
|
|
|
// Wait for registration ack
|
|
while let Some(msg) = ws_rx.next().await {
|
|
match msg? {
|
|
Message::Text(text) => {
|
|
let v: serde_json::Value = serde_json::from_str(&text)?;
|
|
if v["type"] == "registered" {
|
|
tracing::info!("Registered as '{}'", v["name"]);
|
|
break;
|
|
}
|
|
}
|
|
Message::Close(_) => anyhow::bail!("Connection closed during registration"),
|
|
_ => {}
|
|
}
|
|
}
|
|
|
|
// Store the new ws_tx so relay tasks can use it
|
|
*shared_ws_tx.lock().await = Some(ws_tx);
|
|
|
|
// Ping keepalive
|
|
let ping_tx = shared_ws_tx.clone();
|
|
let ping_task = tokio::spawn(async move {
|
|
let mut interval = tokio::time::interval(std::time::Duration::from_secs(30));
|
|
loop {
|
|
interval.tick().await;
|
|
let guard = ping_tx.lock().await;
|
|
if guard.is_none() { break; }
|
|
// Can't send while holding mutex with Option, drop and re-acquire
|
|
drop(guard);
|
|
let mut guard = ping_tx.lock().await;
|
|
if let Some(ref mut tx) = *guard {
|
|
if tx.send(Message::Ping(vec![].into())).await.is_err() {
|
|
*guard = None;
|
|
break;
|
|
}
|
|
} else {
|
|
break;
|
|
}
|
|
}
|
|
});
|
|
|
|
// Main message loop
|
|
while let Some(msg) = ws_rx.next().await {
|
|
let text = match msg? {
|
|
Message::Text(t) => t,
|
|
Message::Close(_) => break,
|
|
Message::Pong(_) => continue,
|
|
_ => continue,
|
|
};
|
|
|
|
let server_msg: ServerToWorker = match serde_json::from_str(&text) {
|
|
Ok(m) => m,
|
|
Err(e) => { tracing::warn!("Bad server message: {}", e); continue; }
|
|
};
|
|
|
|
match server_msg {
|
|
ServerToWorker::WorkflowAssign {
|
|
workflow_id, project_id, requirement,
|
|
template_id: _, initial_state, require_plan_approval,
|
|
} => {
|
|
tracing::info!("Received workflow: {} (project {})", workflow_id, project_id);
|
|
|
|
let llm = LlmClient::new(llm_config);
|
|
let exec = LocalExecutor::new(None);
|
|
let workdir = format!("workspaces/{}", project_id);
|
|
let instructions = String::new();
|
|
|
|
// Ensure workspace has a venv
|
|
let _ = tokio::fs::create_dir_all(&workdir).await;
|
|
let venv_path = format!("{}/.venv", workdir);
|
|
if !std::path::Path::new(&venv_path).exists() {
|
|
tracing::info!("Setting up venv in {}", workdir);
|
|
let _ = exec.execute("uv venv .venv", &workdir).await;
|
|
}
|
|
|
|
// update channel → relay to shared ws_tx
|
|
let (update_tx, mut update_rx) = mpsc::channel::<AgentUpdate>(64);
|
|
let relay_ws_tx = shared_ws_tx.clone();
|
|
let wf_id_clone = workflow_id.clone();
|
|
tokio::spawn(async move {
|
|
while let Some(update) = update_rx.recv().await {
|
|
let msg = WorkerToServer::Update {
|
|
workflow_id: wf_id_clone.clone(),
|
|
update,
|
|
};
|
|
let json = match serde_json::to_string(&msg) {
|
|
Ok(j) => j,
|
|
Err(_) => continue,
|
|
};
|
|
let mut guard = relay_ws_tx.lock().await;
|
|
if let Some(ref mut tx) = *guard {
|
|
if tx.send(Message::Text(json.into())).await.is_err() {
|
|
tracing::warn!("WebSocket send failed, buffering...");
|
|
*guard = None;
|
|
// Don't break — keep draining update_rx so agent doesn't block
|
|
}
|
|
}
|
|
// If ws_tx is None, updates are lost (reconnect will happen)
|
|
}
|
|
});
|
|
|
|
// event channel for comments
|
|
let (evt_tx, mut evt_rx) = mpsc::channel::<AgentEvent>(32);
|
|
*comment_tx.lock().await = Some(evt_tx);
|
|
|
|
let svc = svc_mgr.clone();
|
|
let wf_id = workflow_id.clone();
|
|
let pid = project_id.clone();
|
|
tokio::spawn(async move {
|
|
let result = agent::run_agent_loop(
|
|
&llm, &exec, &update_tx, &mut evt_rx,
|
|
&pid, &wf_id, &requirement, &workdir, &svc,
|
|
&instructions, initial_state, None, require_plan_approval,
|
|
).await;
|
|
|
|
let final_status = if result.is_ok() { "done" } else { "failed" };
|
|
let reason = if let Err(ref e) = result { format!("{}", e) } else { String::new() };
|
|
if let Err(ref e) = result {
|
|
tracing::error!("Workflow {} failed: {}", wf_id, e);
|
|
let _ = update_tx.send(AgentUpdate::Error { message: format!("{}", e) }).await;
|
|
}
|
|
|
|
// Sync workspace files to server
|
|
sync_workspace(&update_tx, &pid, &workdir).await;
|
|
|
|
let _ = update_tx.send(AgentUpdate::WorkflowComplete {
|
|
workflow_id: wf_id.clone(), status: final_status.into(), reason,
|
|
}).await;
|
|
tracing::info!("Workflow {} completed: {}", wf_id, final_status);
|
|
});
|
|
}
|
|
|
|
ServerToWorker::Comment { workflow_id, content } => {
|
|
if let Some(ref tx) = *comment_tx.lock().await {
|
|
let _ = tx.send(AgentEvent::Comment { workflow_id, content }).await;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
ping_task.abort();
|
|
Ok(())
|
|
}
|
|
|
|
/// Sync all workspace files to server via FileSync updates.
|
|
async fn sync_workspace(update_tx: &mpsc::Sender<AgentUpdate>, project_id: &str, workdir: &str) {
|
|
use base64::Engine;
|
|
let base = std::path::Path::new(workdir);
|
|
if !base.exists() { return; }
|
|
let mut stack = vec![base.to_path_buf()];
|
|
let mut count = 0u32;
|
|
while let Some(dir) = stack.pop() {
|
|
let mut entries = match tokio::fs::read_dir(&dir).await { Ok(e) => e, Err(_) => continue };
|
|
while let Ok(Some(entry)) = entries.next_entry().await {
|
|
let name = entry.file_name().to_string_lossy().to_string();
|
|
if matches!(name.as_str(), ".venv" | "__pycache__" | ".git" | "node_modules" | ".mypy_cache") { continue; }
|
|
let path = entry.path();
|
|
if path.is_dir() { stack.push(path); }
|
|
else if let Ok(meta) = entry.metadata().await {
|
|
if meta.len() > 1_048_576 { continue; }
|
|
if let Ok(bytes) = tokio::fs::read(&path).await {
|
|
let rel = path.strip_prefix(base).unwrap_or(&path);
|
|
let _ = update_tx.send(AgentUpdate::FileSync {
|
|
project_id: project_id.to_string(),
|
|
path: rel.to_string_lossy().to_string(),
|
|
data_b64: base64::engine::general_purpose::STANDARD.encode(&bytes),
|
|
}).await;
|
|
count += 1;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
tracing::info!("Synced {} files from {}", count, workdir);
|
|
}
|