initial integration with telem viz

This commit is contained in:
2026-01-02 15:36:50 -05:00
parent 59b5679dda
commit 275cb07c4c
16 changed files with 1408 additions and 190 deletions

121
ground/src/command.rs Normal file
View File

@@ -0,0 +1,121 @@
use api::client::Client;
use api::client::command::CommandRegistry;
use api::macros::IntoCommandDefinition;
use chrono::TimeDelta;
use itertools::Itertools;
use log::{error, trace};
use nautilus_common::command::{Command, OwnedCommandHeader, SetPin, ValidPriorityCommand};
use nautilus_common::udp::tokio::AsyncUdpSocketExt;
use std::fmt::Debug;
use std::net::SocketAddr;
use std::sync::Arc;
use tokio::net::UdpSocket;
use tokio::select;
use tokio::sync::RwLock;
use tokio::sync::mpsc::{Sender, channel};
use tokio_util::sync::CancellationToken;
pub struct CommandHandler<'a> {
cmd: CommandRegistry,
flight_addr: &'a RwLock<SocketAddr>,
udp: &'a UdpSocket,
cancel: CancellationToken,
}
#[derive(IntoCommandDefinition)]
struct SetPinCommand {
state: bool,
}
impl<'a> CommandHandler<'a> {
pub fn new(
client: Arc<Client>,
flight_addr: &'a RwLock<SocketAddr>,
udp: &'a UdpSocket,
cancel: CancellationToken,
) -> Self {
Self {
cmd: CommandRegistry::new(client),
flight_addr,
udp,
cancel,
}
}
pub fn run(self) -> anyhow::Result<()> {
let runtime = tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()?;
runtime.block_on(async move {
let (outgoing_commands_tx, mut outgoing_commands_rx) = channel::<OwnedCommandHeader>(16);
let commands = ["a", "b"].iter().cartesian_product(0..16)
.map(|(bank, i)| {
let command_name = format!("/mcp23017{bank}/set");
let outgoing_commands_tx = outgoing_commands_tx.clone();
self.cmd.register_handler(
format!("switch.{bank}.{i}.set"),
move |header, cmd: SetPinCommand| -> anyhow::Result<_> {
trace!("Setting Switch {bank} {i}");
outgoing_commands_tx.try_send_command(
&command_name,
&ValidPriorityCommand {
inner: SetPin {
pin: i,
value: cmd.state,
},
valid_until: header.timestamp + TimeDelta::seconds(5),
priority: 0,
}
)?;
Ok("Command Executed Successfully".to_string())
}
)
})
.collect::<Vec<_>>();
let mut buffer = [0u8; 512];
while !self.cancel.is_cancelled() {
select! {
() = self.cancel.cancelled() => { break; }
outgoing = outgoing_commands_rx.recv() => {
match outgoing {
None => break,
Some(outgoing) => {
match self.udp.send_postcard(&outgoing, &mut buffer, *self.flight_addr.read().await).await {
Ok(()) => {},
Err(err) => error!("Failed to Send Outgoing {err}"),
}
}
}
}
}
}
// Explicit Drops
drop(commands);
drop(self);
});
Ok(())
}
}
trait SenderExt {
fn try_send_command<T: Command + Debug>(&self, name: &str, data: &T) -> anyhow::Result<()>;
}
impl SenderExt for Sender<OwnedCommandHeader> {
fn try_send_command<T: Command + Debug>(&self, name: &str, data: &T) -> anyhow::Result<()> {
trace!("{data:?}");
let inner_buffer = postcard::to_allocvec(data)?;
self.try_send(OwnedCommandHeader {
name: name.to_string(),
data: inner_buffer,
})?;
Ok(())
}
}

View File

@@ -1,15 +1,24 @@
#![warn(clippy::all, clippy::pedantic)]
use anyhow::Result;
use chrono::{TimeDelta, Utc};
use log::{error, info};
use nautilus_common::add_ctrlc_handler;
use nautilus_common::command::{SetPin, ValidPriorityCommand};
use nautilus_common::telemetry::Telemetry;
use nautilus_common::udp::{UdpRecvPostcardError, UdpSocketExt};
use std::net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket};
use nautilus_common::udp::tokio::AsyncUdpSocketExt;
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
mod command;
mod telemetry;
use crate::command::CommandHandler;
use crate::telemetry::TelemetryHandler;
use anyhow::{Result, anyhow, bail};
use api::client::Client;
use futures::executor::block_on;
use futures::future::{Either, select};
use log::{info, warn};
use nautilus_common::add_ctrlc_handler_cancel;
use std::sync::Arc;
use std::sync::atomic::{AtomicBool, Ordering};
use std::time::Duration;
use std::thread::{Builder, scope};
use tokio::net::UdpSocket;
use tokio::pin;
use tokio::sync::RwLock;
use tokio_util::sync::CancellationToken;
/// Run the Ground Software
///
@@ -21,86 +30,78 @@ pub fn run() -> Result<()> {
env!("CARGO_PKG_VERSION")
);
let running = Arc::new(AtomicBool::new(true));
let udp_shutdown = CancellationToken::new();
let cancel = udp_shutdown.child_token();
add_ctrlc_handler_cancel(cancel.clone())?;
add_ctrlc_handler(running.clone())?;
let (udp_thread, udp) = {
let udp_shutdown = udp_shutdown.clone();
let (udp_tx, udp) = tokio::sync::oneshot::channel();
let mut flight_addr = None;
let bind_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 14000);
let udp = UdpSocket::bind(bind_addr)?;
udp.set_read_timeout(Some(Duration::from_millis(100)))?;
let udp_thread = Builder::new()
.name("flight-udp-connection-handler".to_string())
.spawn(move || {
let runtime = tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()?;
let mut buffer = [0u8; 512];
runtime.block_on(async {
let udp = UdpSocket::bind("0.0.0.0:14000").await?;
udp_tx
.send(udp)
.map_err(|_| anyhow!("Couldn't complete UDP establish"))?;
Ok(()) as Result<()>
})?;
let mut do_once = true;
runtime.block_on(udp_shutdown.cancelled());
while running.load(Ordering::Relaxed) {
match udp.recv_postcard::<Telemetry>(&mut buffer) {
Ok((tlm, addr)) => {
flight_addr = Some(addr);
info!("{tlm:?}");
Ok(()) as Result<()>
})?;
if do_once {
do_once = false;
udp.send_command(
"/mcp23017a/set",
&ValidPriorityCommand {
inner: SetPin {
pin: 0,
value: true,
},
valid_until: Utc::now() + TimeDelta::seconds(5),
priority: 0,
},
addr,
)?;
}
}
Err(UdpRecvPostcardError::NoData) => {
// NoOp
}
Err(err) => {
error!("Rx error: {err}");
}
}
let f1 = cancel.cancelled();
pin!(f1);
let udp = block_on(async {
let f2 = udp;
select(f1, f2).await
});
let udp = match udp {
Either::Left(_) => bail!("Cancelled Before UDP established"),
Either::Right(x) => x.0?,
};
(udp_thread, udp)
};
let flight_addr = RwLock::new(SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0));
let lock = flight_addr.blocking_write();
scope(|scope| {
let client = Arc::new(Client::connect("ws://localhost:8080/backend")?);
let tlm = TelemetryHandler::new(client.clone(), &flight_addr, lock, &udp, cancel.clone());
let cmd = CommandHandler::new(client, &flight_addr, &udp, cancel.clone());
let tlm_thread = Builder::new()
.name("telemetry-handler".to_string())
.spawn_scoped(scope, move || tlm.run())?; // move to take ownership (drop when exited)
let cmd_thread = Builder::new()
.name("command-handler".to_string())
.spawn_scoped(scope, move || cmd.run())?;
// Force the thread panics into anyhow errors
tlm_thread.join().map_err(|e| anyhow!("{e:?}"))??;
cmd_thread.join().map_err(|e| anyhow!("{e:?}"))??;
Ok(()) as Result<()>
})?;
info!("Sending Shutdown Command");
if let Ok(flight_addr) = flight_addr.try_read() {
block_on(async { udp.send_command("/shutdown", &(), *flight_addr).await })?;
}
if let Some(flight_addr) = flight_addr {
udp.send_command("/shutdown", &(), flight_addr)?;
// let cmd_data = CommandData::Shutdown;
// udp.send_postcard(&cmd_data, &mut buffer, flight_addr)?;
// let cmd_data = SetPin {
// pin: 4,
// value: true,
// valid_until: Utc::now(),
// priority: 120,
// };
// let cmd_data = postcard::to_allocvec(&cmd_data)?;
//
// let cmd = CommandHeader {
// name: "/shutdown",
// data: &cmd_data,
// };
//
// let encoded = postcard::to_allocvec(&cmd)?;
// println!("{}", hex::encode(&encoded));
//
// let decoded = postcard::from_bytes::<CommandHeader>(&encoded)?;
// println!("{decoded:?}");
//
// let (decoded, remainder) = postcard::take_from_bytes::<SetPin>(decoded.data)?;
// ensure!(remainder.is_empty(), "Not all command bytes consumed");
// println!("{decoded:?}");
// let mut another_buffer = Cursor::new([0u8; 512]);
// // ciborium::into_writer(&cmd, &mut another_buffer)?;
// let _ = Serializer::writer(&cmd, &mut another_buffer)?;
// let size_encoded = usize::try_from(another_buffer.position())?;
// let _ = test(&another_buffer.get_ref()[0..size_encoded])?;
}
udp_shutdown.cancel();
udp_thread.join().map_err(|e| anyhow!("{e:?}"))??;
Ok(())
}

142
ground/src/telemetry.rs Normal file
View File

@@ -0,0 +1,142 @@
use api::client::Client;
use api::client::telemetry::{TelemetryHandle, TelemetryRegistry};
use futures::TryFutureExt;
use futures::future::join_all;
use log::{error, trace};
use nautilus_common::on_drop::on_drop;
use nautilus_common::telemetry::{SwitchBank, Telemetry, TelemetryMessage};
use nautilus_common::udp::UdpRecvPostcardError;
use nautilus_common::udp::tokio::AsyncUdpSocketExt;
use std::net::SocketAddr;
use std::sync::Arc;
use tokio::net::UdpSocket;
use tokio::sync::{RwLock, RwLockWriteGuard};
use tokio::task::yield_now;
use tokio::{join, pin, select};
use tokio_util::sync::CancellationToken;
pub struct TelemetryHandler<'a> {
tlm: TelemetryRegistry,
flight_addr: &'a RwLock<SocketAddr>,
lock: Option<RwLockWriteGuard<'a, SocketAddr>>,
udp: &'a UdpSocket,
cancel: CancellationToken,
}
impl<'a> TelemetryHandler<'a> {
pub fn new(
client: Arc<Client>,
flight_addr: &'a RwLock<SocketAddr>,
lock: RwLockWriteGuard<'a, SocketAddr>,
udp: &'a UdpSocket,
cancel: CancellationToken,
) -> Self {
Self {
tlm: TelemetryRegistry::new(client),
flight_addr,
lock: Some(lock),
udp,
cancel,
}
}
/// Run this telemetry handler.
/// Note: this method is expected to block so should run in its own thread
pub fn run(mut self) -> anyhow::Result<()> {
let cancel = self.cancel.clone();
// Trigger a shutdown if we exit the telemetry process for some reason (including panic)
let _shutdown_when_closed = on_drop(move || cancel.cancel());
let runtime = tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()?;
runtime.block_on(async {
let mut context = TelemetryContext::new(&self).await;
let mut buffer = [0u8; 512];
while !self.cancel.is_cancelled() {
select! {
() = self.cancel.cancelled() => {},
incoming = self.udp.recv_postcard::<Telemetry>(&mut buffer) => {
match incoming {
Ok((tlm, addr)) => {
trace!("{tlm:?}");
let flight_addr_update = async {
// The first time around we will use the lock given to us
// Other times we will grab a new lock
// self.re
let mut lock = match self.lock.take() {
None => self.flight_addr.write().await,
Some(lock) => lock,
};
// Update the value in the lock
*lock = addr;
};
pin!(flight_addr_update);
// We can do these two operations concurrently
join!(
flight_addr_update,
context.step(tlm),
);
},
Err(UdpRecvPostcardError::NoData) => {
// This shouldn't be possible when using a tokio socket I don't think
// But let's just yield our time anyways
yield_now().await;
},
Err(err) => {
error!("Rx error: {err}");
},
}
}
}
}
// Explicit Drop
drop(self);
Ok(())
})
}
}
struct TelemetryContext {
bank_a: Vec<TelemetryHandle<bool>>,
bank_b: Vec<TelemetryHandle<bool>>,
}
impl TelemetryContext {
async fn new(tlm: &TelemetryHandler<'_>) -> Self {
let bank_a =
join_all((0..16).map(|i| tlm.tlm.register::<bool>(format!("switch.bank_a.{i}")))).await;
let bank_b =
join_all((0..16).map(|i| tlm.tlm.register::<bool>(format!("switch.bank_b.{i}")))).await;
Self { bank_a, bank_b }
}
async fn step(&mut self, tlm: Telemetry) {
match tlm.message {
TelemetryMessage::SwitchState { bank, switches } => {
let bank_handles = match bank {
SwitchBank::A => &self.bank_a,
SwitchBank::B => &self.bank_b,
};
assert!(bank_handles.len() >= switches.iter().len());
join_all(switches.into_iter().enumerate().map(|(i, state)| {
bank_handles[i]
.publish(state, tlm.timestamp)
.unwrap_or_else(move |err| {
// We don't need to bubble this error up, just report it
error!("Failed to publish telemetry for switch {bank} {i}: {err}");
})
}))
.await;
}
}
}
}