fix overflow on disk sub

This commit is contained in:
ghe0 2025-03-07 01:46:04 +02:00
parent 8e8e1d1a99
commit 3cca48680b
Signed by: ghe0
GPG Key ID: 451028EE56A0FBB4
2 changed files with 70 additions and 18 deletions

@ -34,9 +34,8 @@ impl VMHandler {
let res = match state::Resources::load_from_disk() { let res = match state::Resources::load_from_disk() {
Ok(res) => res, Ok(res) => res,
Err(e) => { Err(e) => {
warn!("Could not load resources from disk: {e:?}"); log::error!("Could calculate resources: {e:?}");
info!("Creating new resource calculator."); std::process::exit(1);
state::Resources::new(&config.volumes)
} }
}; };
Self { receiver, sender, config, res } Self { receiver, sender, config, res }
@ -61,14 +60,18 @@ impl VMHandler {
async fn send_node_resources(&mut self) { async fn send_node_resources(&mut self) {
let (avail_ipv4, avail_ipv6) = self.get_available_ips(); let (avail_ipv4, avail_ipv6) = self.get_available_ips();
let mut avail_storage_gb = 0; let mut total_gb_available = 0;
for volume in self.config.volumes.iter() { for volume in self.config.volumes.iter() {
avail_storage_gb += volume.max_reservation_gb; let reservation: usize = match self.res.reserved_storage.get(&volume.path) {
if let Some(reservation) = self.res.reserved_storage.get(&volume.path) { Some(reserved) => *reserved,
avail_storage_gb -= reservation; None => 0 as usize,
};
let volume_gb_available = volume.max_reservation_gb - reservation;
if total_gb_available < volume_gb_available {
total_gb_available = volume_gb_available;
} }
} }
let avail_storage_gb = avail_storage_gb as u32; let avail_storage_gb = total_gb_available as u32;
let res = snp_proto::VmNodeResources { let res = snp_proto::VmNodeResources {
node_pubkey: PUBLIC_KEY.clone(), node_pubkey: PUBLIC_KEY.clone(),
avail_ports: (self.config.public_port_range.len() - self.res.reserved_ports.len()) avail_ports: (self.config.public_port_range.len() - self.res.reserved_ports.len())
@ -237,10 +240,13 @@ async fn main() {
env_logger::builder().filter_level(log::LevelFilter::Debug).init(); env_logger::builder().filter_level(log::LevelFilter::Debug).init();
loop { loop {
if std::env::var("DAEMON_AUTO_UPGRADE") != Ok("OFF".to_string()) {
// This upgrade procedure will get replaced in prod. We need this for the testnet. // This upgrade procedure will get replaced in prod. We need this for the testnet.
if let Err(e) = download_and_replace_binary() { if let Err(e) = download_and_replace_binary() {
log::error!("Failed to upgrade detee-snp-daemon to newer version: {e}"); log::error!("Failed to upgrade detee-snp-daemon to newer version: {e}");
} }
}
let (brain_msg_tx, brain_msg_rx) = tokio::sync::mpsc::channel(6); let (brain_msg_tx, brain_msg_rx) = tokio::sync::mpsc::channel(6);
let (daemon_msg_tx, daemon_msg_rx) = tokio::sync::mpsc::channel(6); let (daemon_msg_tx, daemon_msg_rx) = tokio::sync::mpsc::channel(6);

@ -11,7 +11,7 @@ use std::{
process::Command, process::Command,
}; };
#[derive(Serialize, Deserialize, Debug)] #[derive(Serialize, Deserialize, Debug, Default)]
pub struct Resources { pub struct Resources {
pub existing_vms: HashSet<String>, pub existing_vms: HashSet<String>,
// QEMU does not support MHz limiation // QEMU does not support MHz limiation
@ -34,9 +34,49 @@ impl Resources {
} }
pub fn load_from_disk() -> Result<Self> { pub fn load_from_disk() -> Result<Self> {
let content = std::fs::read_to_string(USED_RESOURCES)?; let mut res = Self { ..Default::default() };
let mut res: Self = serde_yaml::from_str(&content)?;
log::debug!("Reading VMs saved to disk to calculate used resources...");
for entry in fs::read_dir(VM_CONFIG_DIR)? {
let entry = entry?;
let path = entry.path();
if path.is_file() && path.to_string_lossy().ends_with(".yaml") {
log::info!("Found VM config: {:?}", path.to_str());
let content = std::fs::read_to_string(path)?;
let vm: VM = serde_yaml::from_str(&content)?;
res.existing_vms.insert(vm.uuid);
res.reserved_vcpus = res.reserved_vcpus.saturating_add(vm.vcpus);
res.reserved_memory = res.reserved_memory.saturating_add(vm.memory_mb);
for (port, _) in vm.fw_ports.iter() {
res.reserved_ports.insert(*port);
}
res.reserved_storage
.entry(vm.storage_dir.clone())
.and_modify(|gb| {
*gb = gb.saturating_add(vm.disk_size_gb);
})
.or_insert(vm.disk_size_gb);
for nic in vm.nics {
for ip in nic.ips {
if let Ok(ip_address) = ip.address.parse::<std::net::IpAddr>() {
if ip_address.is_ipv4() {
res.reserved_ipv4.insert(ip.address.clone());
}
if ip_address.is_ipv6() {
res.reserved_ipv6.insert(ip.address);
}
}
}
if let Some(vtap_name) = nic.if_config.vtap_name() {
res.reserved_if_names.insert(vtap_name);
}
}
}
}
res.scan_boot_files().unwrap(); res.scan_boot_files().unwrap();
res.save_to_disk()?;
Ok(res) Ok(res)
} }
@ -69,7 +109,7 @@ impl Resources {
let mut volumes = config.volumes.clone(); let mut volumes = config.volumes.clone();
for volume in volumes.iter_mut() { for volume in volumes.iter_mut() {
if let Some(reservation) = self.reserved_storage.get(&volume.path) { if let Some(reservation) = self.reserved_storage.get(&volume.path) {
volume.max_reservation_gb -= reservation; volume.max_reservation_gb = volume.max_reservation_gb.saturating_sub(*reservation);
} }
} }
volumes.sort_by_key(|v| v.max_reservation_gb); volumes.sort_by_key(|v| v.max_reservation_gb);
@ -249,7 +289,9 @@ impl Resources {
} }
fn free_vm_resources(&mut self, vm: &VM) { fn free_vm_resources(&mut self, vm: &VM) {
self.existing_vms.remove(&vm.uuid); if self.existing_vms.remove(&vm.uuid) {
return;
}
self.reserved_vcpus = self.reserved_vcpus.saturating_sub(vm.vcpus); self.reserved_vcpus = self.reserved_vcpus.saturating_sub(vm.vcpus);
self.reserved_memory = self.reserved_memory.saturating_sub(vm.memory_mb); self.reserved_memory = self.reserved_memory.saturating_sub(vm.memory_mb);
for nic in vm.nics.iter() { for nic in vm.nics.iter() {
@ -270,8 +312,12 @@ impl Resources {
for (host_port, _) in vm.fw_ports.iter() { for (host_port, _) in vm.fw_ports.iter() {
self.reserved_ports.remove(host_port); self.reserved_ports.remove(host_port);
} }
self.reserved_storage.entry(vm.storage_dir.clone()).and_modify(|gb| *gb -= vm.disk_size_gb); self.reserved_storage
let _ = self.save_to_disk(); .entry(vm.storage_dir.clone())
.and_modify(|gb| *gb = gb.saturating_sub(vm.disk_size_gb));
if let Err(e) = self.save_to_disk() {
log::error!("Could not save resources to disk: {e}");
}
} }
} }