switching from LP to credits

This commit is contained in:
ghe0 2025-06-21 23:55:06 +03:00
parent 44e560fcc0
commit bf18def461
Signed by: ghe0
GPG Key ID: 451028EE56A0FBB4
6 changed files with 202 additions and 75 deletions

2
Cargo.lock generated

@ -396,7 +396,7 @@ dependencies = [
[[package]] [[package]]
name = "detee-shared" name = "detee-shared"
version = "0.1.0" version = "0.1.0"
source = "git+ssh://git@gitea.detee.cloud/testnet/proto?branch=surreal_brain#d6ca058d2de78b5257517034bca2b2c7d5929db8" source = "git+ssh://git@gitea.detee.cloud/testnet/proto?branch=credits#6e25437c9a2bb6fd92304bb9f983843ca02165c2"
dependencies = [ dependencies = [
"bincode", "bincode",
"prost", "prost",

@ -27,7 +27,7 @@ bs58 = "0.5.1"
chrono = "0.4.39" chrono = "0.4.39"
# TODO: switch this back to main after the upgrade # TODO: switch this back to main after the upgrade
detee-shared = { git = "ssh://git@gitea.detee.cloud/testnet/proto", branch = "surreal_brain" } detee-shared = { git = "ssh://git@gitea.detee.cloud/testnet/proto", branch = "credits" }
# detee-shared = { path = "../detee-shared" } # detee-shared = { path = "../detee-shared" }
[build-dependencies] [build-dependencies]

@ -10,7 +10,7 @@ use std::{
#[derive(Deserialize, Debug, Clone)] #[derive(Deserialize, Debug, Clone)]
pub struct Volume { pub struct Volume {
pub path: String, pub path: String,
pub max_reservation_gb: usize, pub max_reservation_mib: usize,
} }
#[derive(Deserialize, Debug)] #[derive(Deserialize, Debug)]
@ -50,14 +50,14 @@ pub struct Config {
pub network: String, pub network: String,
pub max_cores_per_vm: usize, pub max_cores_per_vm: usize,
pub max_vcpu_reservation: usize, pub max_vcpu_reservation: usize,
pub max_mem_reservation_mb: usize, pub max_mem_reservation_mib: usize,
pub network_interfaces: Vec<Interface>, pub network_interfaces: Vec<Interface>,
pub volumes: Vec<Volume>, pub volumes: Vec<Volume>,
#[serde(with = "range_format")] #[serde(with = "range_format")]
pub public_port_range: Range<u16>, pub public_port_range: Range<u16>,
pub max_ports_per_vm: u16, pub max_ports_per_vm: u16,
// price per unit per minute // price per slot per month
pub price: u64, pub price: f64,
} }
mod range_format { mod range_format {
@ -131,6 +131,11 @@ impl Config {
} }
Ok(config) Ok(config)
} }
pub fn nanoprice_u64(&self) -> u64 {
(self.price * 1_000_000_000_f64) as u64
}
} }
fn calc_ipv4_netmask(ip: Ipv4Addr, gateway: Ipv4Addr) -> u32 { fn calc_ipv4_netmask(ip: Ipv4Addr, gateway: Ipv4Addr) -> u32 {

@ -50,7 +50,7 @@ pub async fn register_node(config: &crate::config::Config) -> Result<Vec<DeleteV
country: ip_info.country, country: ip_info.country,
region: ip_info.region, region: ip_info.region,
city: ip_info.city, city: ip_info.city,
price: config.price, price: config.nanoprice_u64(),
}; };
let pubkey = PUBLIC_KEY.clone(); let pubkey = PUBLIC_KEY.clone();

@ -42,6 +42,39 @@ impl VMHandler {
Self { receiver, sender, config, res } Self { receiver, sender, config, res }
} }
// returns storage available per VM and total storage available
fn storage_available(&self) -> (usize, usize) {
let mut total_storage_available = 0_usize;
let mut avail_storage_mib = 0_usize;
for volume in self.config.volumes.iter() {
let reservation: usize = match self.res.reserved_storage.get(&volume.path) {
Some(reserved) => *reserved,
None => 0 as usize,
};
let volume_mib_available = volume.max_reservation_mib.saturating_sub(reservation);
total_storage_available += volume_mib_available;
if avail_storage_mib < volume_mib_available {
avail_storage_mib = volume_mib_available;
}
}
(avail_storage_mib, total_storage_available)
}
/// returns Memory per vCPU and Disk per vCPU ratio
fn slot_ratios(&self) -> (usize, usize) {
let (_, total_storage_mib) = self.storage_available();
let available_cpus: usize =
self.config.max_vcpu_reservation.saturating_sub(self.res.reserved_vcpus);
let available_mem: usize =
self.config.max_mem_reservation_mib.saturating_sub(self.res.reserved_memory_mib);
let memory_per_cpu = available_mem / available_cpus;
let disk_per_cpu = total_storage_mib / available_cpus;
(memory_per_cpu, disk_per_cpu)
}
fn get_available_ips(&self) -> (u32, u32) { fn get_available_ips(&self) -> (u32, u32) {
let mut avail_ipv4 = 0; let mut avail_ipv4 = 0;
let mut avail_ipv6 = 0; let mut avail_ipv6 = 0;
@ -61,35 +94,70 @@ impl VMHandler {
async fn send_node_resources(&mut self) { async fn send_node_resources(&mut self) {
let (avail_ipv4, avail_ipv6) = self.get_available_ips(); let (avail_ipv4, avail_ipv6) = self.get_available_ips();
let mut total_gb_available = 0;
for volume in self.config.volumes.iter() { let (avail_storage_mib, total_storage_available) = self.storage_available();
let reservation: usize = match self.res.reserved_storage.get(&volume.path) { let avail_vcpus = self.config.max_vcpu_reservation.saturating_sub(self.res.reserved_vcpus);
Some(reserved) => *reserved, let avail_memory_mib =
None => 0 as usize, self.config.max_mem_reservation_mib.saturating_sub(self.res.reserved_memory_mib);
};
let volume_gb_available = volume.max_reservation_gb - reservation; // If storage is separated into multiple volumes, that limits the maxium VM size.
if total_gb_available < volume_gb_available { // Due to this, we have to limit the maximum amount of vCPUs and Memory per VM, based on
total_gb_available = volume_gb_available; // the maximum possible disk size per VM.
} let avail_vcpus = avail_vcpus * avail_storage_mib / total_storage_available;
} let avail_memory_mib = avail_memory_mib * avail_storage_mib / total_storage_available;
let avail_storage_gb = total_gb_available as u32;
let res = snp_proto::VmNodeResources { let res = snp_proto::VmNodeResources {
node_pubkey: PUBLIC_KEY.clone(), node_pubkey: PUBLIC_KEY.clone(),
avail_ports: (self.config.public_port_range.len() - self.res.reserved_ports.len()) avail_ports: (self.config.public_port_range.len() - self.res.reserved_ports.len())
as u32, as u32,
avail_ipv4, avail_ipv4,
avail_ipv6, avail_ipv6,
avail_vcpus: (self.config.max_vcpu_reservation - self.res.reserved_vcpus) as u32, avail_vcpus: avail_vcpus as u32,
avail_memory_mb: (self.config.max_mem_reservation_mb - self.res.reserved_memory) as u32, avail_memory_mib: avail_memory_mib as u32,
avail_storage_gb, avail_storage_mib: avail_storage_mib as u32,
max_ports_per_vm: self.config.max_ports_per_vm as u32, max_ports_per_vm: self.config.max_ports_per_vm as u32,
}; };
debug!("sending node resources on brain: {res:?}"); debug!("sending node resources on brain: {res:?}");
let _ = self.sender.send(res.into()).await; let _ = self.sender.send(res.into()).await;
} }
fn balance_new_vm_req(&self, new_vm_req: snp_proto::NewVmReq) -> snp_proto::NewVmReq {
let (memory_per_cpu, disk_per_cpu) = self.slot_ratios();
let mut vcpus = new_vm_req.vcpus;
if vcpus < (new_vm_req.memory_mib).div_ceil(memory_per_cpu as u32) {
vcpus = (new_vm_req.memory_mib).div_ceil(memory_per_cpu as u32);
}
if vcpus < (new_vm_req.disk_mib).div_ceil(disk_per_cpu as u32) {
vcpus = (new_vm_req.disk_mib).div_ceil(disk_per_cpu as u32);
}
let memory_mib = vcpus * memory_per_cpu as u32;
let disk_mib = vcpus * disk_per_cpu as u32;
snp_proto::NewVmReq {
uuid: new_vm_req.uuid,
hostname: new_vm_req.hostname,
admin_pubkey: new_vm_req.admin_pubkey,
node_pubkey: new_vm_req.node_pubkey,
extra_ports: new_vm_req.extra_ports,
public_ipv4: new_vm_req.public_ipv4,
public_ipv6: new_vm_req.public_ipv6,
vcpus,
memory_mib,
disk_mib,
kernel_url: new_vm_req.kernel_url,
kernel_sha: new_vm_req.kernel_sha,
dtrfs_url: new_vm_req.dtrfs_url,
dtrfs_sha: new_vm_req.dtrfs_sha,
price_per_slot: new_vm_req.price_per_slot,
locked_nano: new_vm_req.locked_nano,
}
}
async fn handle_new_vm_req(&mut self, new_vm_req: snp_proto::NewVmReq) { async fn handle_new_vm_req(&mut self, new_vm_req: snp_proto::NewVmReq) {
debug!("Processing new vm request: {new_vm_req:?}"); debug!("Processing new vm request: {new_vm_req:?}");
let new_vm_req = self.balance_new_vm_req(new_vm_req);
debug!("New VM Request balanced to: {new_vm_req:?}");
let uuid = new_vm_req.uuid.clone(); let uuid = new_vm_req.uuid.clone();
match state::VM::new(new_vm_req.into(), &self.config, &mut self.res) { match state::VM::new(new_vm_req.into(), &self.config, &mut self.res) {
Ok(vm) => match vm.start() { Ok(vm) => match vm.start() {
@ -142,8 +210,38 @@ impl VMHandler {
} }
} }
fn balance_update_vm_req(&self, new_vm_req: snp_proto::UpdateVmReq) -> snp_proto::UpdateVmReq {
let (memory_per_cpu, disk_per_cpu) = self.slot_ratios();
let mut vcpus = new_vm_req.vcpus;
if vcpus < (new_vm_req.memory_mib).div_ceil(memory_per_cpu as u32) {
vcpus = (new_vm_req.memory_mib).div_ceil(memory_per_cpu as u32);
}
if vcpus < (new_vm_req.disk_mib).div_ceil(disk_per_cpu as u32) {
vcpus = (new_vm_req.disk_mib).div_ceil(disk_per_cpu as u32);
}
let memory_mib = vcpus * memory_per_cpu as u32;
let disk_mib = vcpus * disk_per_cpu as u32;
snp_proto::UpdateVmReq {
uuid: new_vm_req.uuid,
hostname: new_vm_req.hostname,
admin_pubkey: new_vm_req.admin_pubkey,
vcpus,
memory_mib,
disk_mib,
kernel_url: new_vm_req.kernel_url,
kernel_sha: new_vm_req.kernel_sha,
dtrfs_url: new_vm_req.dtrfs_url,
dtrfs_sha: new_vm_req.dtrfs_sha,
}
}
async fn handle_update_vm_req(&mut self, update_vm_req: snp_proto::UpdateVmReq) -> Result<()> { async fn handle_update_vm_req(&mut self, update_vm_req: snp_proto::UpdateVmReq) -> Result<()> {
debug!("Processing update vm request: {update_vm_req:?}"); debug!("Processing Update VM Request: {update_vm_req:?}");
let update_vm_req = self.balance_update_vm_req(update_vm_req);
debug!("Update VM Request balanced to: {update_vm_req:?}");
let vm_id = update_vm_req.uuid.clone(); let vm_id = update_vm_req.uuid.clone();
let content = std::fs::read_to_string(VM_CONFIG_DIR.to_string() + &vm_id + ".yaml")?; let content = std::fs::read_to_string(VM_CONFIG_DIR.to_string() + &vm_id + ".yaml")?;
let mut vm: state::VM = serde_yaml::from_str(&content)?; let mut vm: state::VM = serde_yaml::from_str(&content)?;

@ -8,8 +8,7 @@ use log::info;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::{ use std::{
collections::{HashMap, HashSet}, collections::{HashMap, HashSet},
fs, fs::{self, remove_file, File},
fs::{remove_file, File},
io::Write, io::Write,
process::Command, process::Command,
}; };
@ -19,7 +18,7 @@ pub struct Resources {
pub existing_vms: HashSet<String>, pub existing_vms: HashSet<String>,
// QEMU does not support MHz limiation // QEMU does not support MHz limiation
pub reserved_vcpus: usize, pub reserved_vcpus: usize,
pub reserved_memory: usize, pub reserved_memory_mib: usize,
pub reserved_ports: HashSet<u16>, pub reserved_ports: HashSet<u16>,
pub reserved_storage: HashMap<String, usize>, pub reserved_storage: HashMap<String, usize>,
pub reserved_ipv4: HashSet<String>, pub reserved_ipv4: HashSet<String>,
@ -49,16 +48,16 @@ impl Resources {
let vm: VM = serde_yaml::from_str(&content)?; let vm: VM = serde_yaml::from_str(&content)?;
res.existing_vms.insert(vm.uuid); res.existing_vms.insert(vm.uuid);
res.reserved_vcpus = res.reserved_vcpus.saturating_add(vm.vcpus); res.reserved_vcpus = res.reserved_vcpus.saturating_add(vm.vcpus);
res.reserved_memory = res.reserved_memory.saturating_add(vm.memory_mb); res.reserved_memory_mib = res.reserved_memory_mib.saturating_add(vm.memory_mib);
for (port, _) in vm.fw_ports.iter() { for (port, _) in vm.fw_ports.iter() {
res.reserved_ports.insert(*port); res.reserved_ports.insert(*port);
} }
res.reserved_storage res.reserved_storage
.entry(vm.storage_dir.clone()) .entry(vm.storage_dir.clone())
.and_modify(|gb| { .and_modify(|gb| {
*gb = gb.saturating_add(vm.disk_size_gb); *gb = gb.saturating_add(vm.disk_mib);
}) })
.or_insert(vm.disk_size_gb); .or_insert(vm.disk_mib);
for nic in vm.nics { for nic in vm.nics {
for ip in nic.ips { for ip in nic.ips {
if let Ok(ip_address) = ip.address.parse::<std::net::IpAddr>() { if let Ok(ip_address) = ip.address.parse::<std::net::IpAddr>() {
@ -89,13 +88,13 @@ impl Resources {
storage_pools.push(StoragePool { storage_pools.push(StoragePool {
path: config_vol.path.clone(), path: config_vol.path.clone(),
// TODO: check if the storage is actualy available at that path // TODO: check if the storage is actualy available at that path
available_gb: config_vol.max_reservation_gb, available_gb: config_vol.max_reservation_mib,
}); });
} }
let mut res = Resources { let mut res = Resources {
existing_vms: HashSet::new(), existing_vms: HashSet::new(),
reserved_vcpus: 0, reserved_vcpus: 0,
reserved_memory: 0, reserved_memory_mib: 0,
reserved_ports: HashSet::new(), reserved_ports: HashSet::new(),
reserved_storage: HashMap::new(), reserved_storage: HashMap::new(),
reserved_ipv4: HashSet::new(), reserved_ipv4: HashSet::new(),
@ -112,12 +111,13 @@ impl Resources {
let mut volumes = config.volumes.clone(); let mut volumes = config.volumes.clone();
for volume in volumes.iter_mut() { for volume in volumes.iter_mut() {
if let Some(reservation) = self.reserved_storage.get(&volume.path) { if let Some(reservation) = self.reserved_storage.get(&volume.path) {
volume.max_reservation_gb = volume.max_reservation_gb.saturating_sub(*reservation); volume.max_reservation_mib =
volume.max_reservation_mib.saturating_sub(*reservation);
} }
} }
volumes.sort_by_key(|v| v.max_reservation_gb); volumes.sort_by_key(|v| v.max_reservation_mib);
if let Some(biggest_volume) = volumes.last() { if let Some(biggest_volume) = volumes.last() {
if biggest_volume.max_reservation_gb >= required_gb { if biggest_volume.max_reservation_mib >= required_gb {
return Some(biggest_volume.path.clone()); return Some(biggest_volume.path.clone());
} }
} }
@ -264,7 +264,7 @@ impl Resources {
fn reserve_vm_resources(&mut self, vm: &VM) { fn reserve_vm_resources(&mut self, vm: &VM) {
self.existing_vms.insert(vm.uuid.clone()); self.existing_vms.insert(vm.uuid.clone());
self.reserved_vcpus += vm.vcpus; self.reserved_vcpus += vm.vcpus;
self.reserved_memory += vm.memory_mb; self.reserved_memory_mib += vm.memory_mib;
for nic in vm.nics.iter() { for nic in vm.nics.iter() {
if let Some(vtap) = nic.if_config.vtap_name() { if let Some(vtap) = nic.if_config.vtap_name() {
self.reserved_if_names.insert(vtap); self.reserved_if_names.insert(vtap);
@ -286,8 +286,8 @@ impl Resources {
self.reserved_storage self.reserved_storage
.entry(vm.storage_dir.clone()) .entry(vm.storage_dir.clone())
.and_modify(|gb| *gb = gb.saturating_add(vm.disk_size_gb)) .and_modify(|gb| *gb = gb.saturating_add(vm.disk_mib))
.or_insert(vm.disk_size_gb); .or_insert(vm.disk_mib);
let _ = self.save_to_disk(); let _ = self.save_to_disk();
} }
@ -296,7 +296,7 @@ impl Resources {
return; return;
} }
self.reserved_vcpus = self.reserved_vcpus.saturating_sub(vm.vcpus); self.reserved_vcpus = self.reserved_vcpus.saturating_sub(vm.vcpus);
self.reserved_memory = self.reserved_memory.saturating_sub(vm.memory_mb); self.reserved_memory_mib = self.reserved_memory_mib.saturating_sub(vm.memory_mib);
for nic in vm.nics.iter() { for nic in vm.nics.iter() {
if let Some(vtap) = nic.if_config.vtap_name() { if let Some(vtap) = nic.if_config.vtap_name() {
self.reserved_if_names.remove(&vtap); self.reserved_if_names.remove(&vtap);
@ -317,7 +317,7 @@ impl Resources {
} }
self.reserved_storage self.reserved_storage
.entry(vm.storage_dir.clone()) .entry(vm.storage_dir.clone())
.and_modify(|gb| *gb = gb.saturating_sub(vm.disk_size_gb)); .and_modify(|gb| *gb = gb.saturating_sub(vm.disk_mib));
if let Err(e) = self.save_to_disk() { if let Err(e) = self.save_to_disk() {
log::error!("Could not save resources to disk: {e}"); log::error!("Could not save resources to disk: {e}");
} }
@ -400,8 +400,8 @@ pub struct VM {
// currently hardcoded to EPYC-v4 // currently hardcoded to EPYC-v4
// cpu_type: String, // cpu_type: String,
vcpus: usize, vcpus: usize,
memory_mb: usize, memory_mib: usize,
disk_size_gb: usize, disk_mib: usize,
kernel_sha: String, kernel_sha: String,
dtrfs_sha: String, dtrfs_sha: String,
storage_dir: String, storage_dir: String,
@ -447,14 +447,28 @@ impl From<VM> for snp_proto::MeasurementArgs {
impl From<VM> for snp_proto::NewVmResp { impl From<VM> for snp_proto::NewVmResp {
fn from(vm: VM) -> Self { fn from(vm: VM) -> Self {
let uuid = vm.uuid.clone(); let uuid = vm.uuid.clone();
snp_proto::NewVmResp { uuid, args: Some(vm.into()), error: "".to_string() } snp_proto::NewVmResp {
uuid,
vcpus: vm.vcpus as u32,
memory_mib: vm.memory_mib as u32,
disk_mib: vm.disk_mib as u32,
args: Some(vm.into()),
error: "".to_string(),
}
} }
} }
impl From<VM> for snp_proto::UpdateVmResp { impl From<VM> for snp_proto::UpdateVmResp {
fn from(vm: VM) -> Self { fn from(vm: VM) -> Self {
let uuid = vm.uuid.clone(); let uuid = vm.uuid.clone();
snp_proto::UpdateVmResp { uuid, args: Some(vm.into()), error: "".to_string() } snp_proto::UpdateVmResp {
uuid,
vcpus: vm.vcpus as u32,
memory_mib: vm.memory_mib as u32,
disk_mib: vm.disk_mib as u32,
args: Some(vm.into()),
error: "".to_string(),
}
} }
} }
@ -465,14 +479,14 @@ pub struct NewVMRequest {
extra_ports: Vec<u16>, extra_ports: Vec<u16>,
public_ipv4: bool, public_ipv4: bool,
public_ipv6: bool, public_ipv6: bool,
disk_size_gb: usize, disk_size_mib: usize,
vcpus: usize, vcpus: usize,
memory_mb: usize, memory_mib: usize,
kernel_url: String, kernel_url: String,
kernel_sha: String, kernel_sha: String,
dtrfs_url: String, dtrfs_url: String,
dtrfs_sha: String, dtrfs_sha: String,
price: u64, price_per_slot: u64,
} }
impl From<snp_proto::NewVmReq> for NewVMRequest { impl From<snp_proto::NewVmReq> for NewVMRequest {
@ -483,14 +497,14 @@ impl From<snp_proto::NewVmReq> for NewVMRequest {
extra_ports: req.extra_ports.iter().map(|&port| port as u16).collect(), extra_ports: req.extra_ports.iter().map(|&port| port as u16).collect(),
public_ipv4: req.public_ipv4, public_ipv4: req.public_ipv4,
public_ipv6: req.public_ipv6, public_ipv6: req.public_ipv6,
disk_size_gb: req.disk_size_gb as usize, disk_size_mib: req.disk_mib as usize,
vcpus: req.vcpus as usize, vcpus: req.vcpus as usize,
memory_mb: req.memory_mb as usize, memory_mib: req.memory_mib as usize * 1000,
kernel_url: req.kernel_url, kernel_url: req.kernel_url,
kernel_sha: req.kernel_sha, kernel_sha: req.kernel_sha,
dtrfs_url: req.dtrfs_url, dtrfs_url: req.dtrfs_url,
dtrfs_sha: req.dtrfs_sha, dtrfs_sha: req.dtrfs_sha,
price: req.price_per_unit, price_per_slot: req.price_per_slot,
} }
} }
} }
@ -499,8 +513,8 @@ impl From<snp_proto::NewVmReq> for NewVMRequest {
pub struct UpdateVMReq { pub struct UpdateVMReq {
pub uuid: String, pub uuid: String,
vcpus: usize, vcpus: usize,
memory_mb: usize, memory_mib: usize,
disk_size_gb: usize, disk_size_mib: usize,
// we are not using Option<String>, as these will be passed from gRPC // we are not using Option<String>, as these will be passed from gRPC
kernel_url: String, kernel_url: String,
kernel_sha: String, kernel_sha: String,
@ -513,8 +527,8 @@ impl From<snp_proto::UpdateVmReq> for UpdateVMReq {
Self { Self {
uuid: req.uuid, uuid: req.uuid,
vcpus: req.vcpus as usize, vcpus: req.vcpus as usize,
memory_mb: req.memory_mb as usize, memory_mib: req.memory_mib as usize,
disk_size_gb: req.disk_size_gb as usize, disk_size_mib: req.disk_mib as usize,
kernel_url: req.kernel_url, kernel_url: req.kernel_url,
kernel_sha: req.kernel_sha, kernel_sha: req.kernel_sha,
dtrfs_url: req.dtrfs_url, dtrfs_url: req.dtrfs_url,
@ -547,7 +561,7 @@ impl VM {
config: &Config, config: &Config,
res: &mut Resources, res: &mut Resources,
) -> Result<Self, VMCreationErrors> { ) -> Result<Self, VMCreationErrors> {
if req.price < config.price { if req.price_per_slot < config.nanoprice_u64() {
return Err(VMCreationErrors::PriceIsTooLow); return Err(VMCreationErrors::PriceIsTooLow);
} }
if res.existing_vms.contains(&req.uuid) { if res.existing_vms.contains(&req.uuid) {
@ -566,10 +580,13 @@ impl VM {
if config.max_vcpu_reservation < res.reserved_vcpus.saturating_add(req.vcpus) { if config.max_vcpu_reservation < res.reserved_vcpus.saturating_add(req.vcpus) {
return Err(VMCreationErrors::NotEnoughCPU); return Err(VMCreationErrors::NotEnoughCPU);
} }
if config.max_mem_reservation_mb < res.reserved_memory.saturating_add(req.memory_mb) { if config.max_mem_reservation_mib
< ((res.reserved_memory_mib.saturating_add(req.memory_mib) as f64 / 1000_f64).ceil()
as usize)
{
return Err(VMCreationErrors::NotEnoughMemory); return Err(VMCreationErrors::NotEnoughMemory);
} }
if req.disk_size_gb < 4 { if req.disk_size_mib < 4 {
return Err(VMCreationErrors::DiskTooSmall); return Err(VMCreationErrors::DiskTooSmall);
} }
@ -632,7 +649,7 @@ impl VM {
} }
} }
let storage_pool_path = match res.available_storage_pool(req.disk_size_gb, config) { let storage_pool_path = match res.available_storage_pool(req.disk_size_mib, config) {
Some(path) => path, Some(path) => path,
None => return Err(VMCreationErrors::NotEnoughStorage), None => return Err(VMCreationErrors::NotEnoughStorage),
}; };
@ -642,8 +659,8 @@ impl VM {
admin_key: req.admin_key, admin_key: req.admin_key,
nics: vm_nics, nics: vm_nics,
vcpus: req.vcpus, vcpus: req.vcpus,
memory_mb: req.memory_mb, memory_mib: req.memory_mib,
disk_size_gb: req.disk_size_gb, disk_mib: req.disk_size_mib,
kernel_sha: req.kernel_sha, kernel_sha: req.kernel_sha,
dtrfs_sha: req.dtrfs_sha, dtrfs_sha: req.dtrfs_sha,
fw_ports: port_pairs, fw_ports: port_pairs,
@ -672,13 +689,20 @@ impl VM {
{ {
return Err(VMCreationErrors::NotEnoughCPU); return Err(VMCreationErrors::NotEnoughCPU);
} }
if req.memory_mb > 0
&& config.max_mem_reservation_mb if req.memory_mib > 0
< res.reserved_memory.saturating_sub(self.memory_mb).saturating_add(req.memory_mb) && config.max_mem_reservation_mib
< ((res
.reserved_memory_mib
.saturating_add(req.memory_mib)
.saturating_add(req.memory_mib) as f64
/ 1000_f64)
.ceil() as usize)
{ {
return Err(VMCreationErrors::NotEnoughMemory); return Err(VMCreationErrors::NotEnoughMemory);
} }
if req.disk_size_gb > 0 && req.disk_size_gb < self.disk_size_gb {
if req.disk_size_mib > 0 && req.disk_size_mib < self.disk_mib {
return Err(VMCreationErrors::DiskTooSmall); return Err(VMCreationErrors::DiskTooSmall);
} }
@ -710,24 +734,24 @@ impl VM {
} }
// Update the resources // Update the resources
res.reserved_memory = res.reserved_memory.saturating_add(req.memory_mb); res.reserved_memory_mib = res.reserved_memory_mib.saturating_add(req.memory_mib);
res.reserved_memory = res.reserved_memory.saturating_sub(self.memory_mb); res.reserved_memory_mib = res.reserved_memory_mib.saturating_sub(self.memory_mib);
res.reserved_vcpus = res.reserved_vcpus.saturating_add(req.vcpus); res.reserved_vcpus = res.reserved_vcpus.saturating_add(req.vcpus);
res.reserved_vcpus = res.reserved_vcpus.saturating_sub(self.vcpus); res.reserved_vcpus = res.reserved_vcpus.saturating_sub(self.vcpus);
res.reserved_storage.entry(self.storage_dir.clone()).and_modify(|gb| { res.reserved_storage.entry(self.storage_dir.clone()).and_modify(|gb| {
*gb = gb.saturating_add(req.disk_size_gb); *gb = gb.saturating_add(req.disk_size_mib);
*gb = gb.saturating_sub(self.disk_size_gb); *gb = gb.saturating_sub(self.disk_mib);
}); });
let _ = res.save_to_disk(); let _ = res.save_to_disk();
if req.memory_mb != 0 { if req.memory_mib != 0 {
self.memory_mb = req.memory_mb; self.memory_mib = req.memory_mib;
} }
if req.vcpus != 0 { if req.vcpus != 0 {
self.vcpus = req.vcpus; self.vcpus = req.vcpus;
} }
if req.disk_size_gb != 0 { if req.disk_size_mib != 0 {
self.disk_size_gb = req.disk_size_gb; self.disk_mib = req.disk_size_mib;
} }
if let Err(e) = systemctl_stop_and_disable(&self.uuid) { if let Err(e) = systemctl_stop_and_disable(&self.uuid) {
@ -854,9 +878,9 @@ impl VM {
vars += "\n"; vars += "\n";
vars += &format!(r#"export VCPUS="{}""#, self.vcpus); vars += &format!(r#"export VCPUS="{}""#, self.vcpus);
vars += "\n"; vars += "\n";
vars += &format!(r#"export MEMORY="{}M""#, (self.memory_mb / 2 * 2)); vars += &format!(r#"export MEMORY="{}M""#, (self.memory_mib / 2 * 2));
vars += "\n"; vars += "\n";
vars += &format!(r#"export MAX_MEMORY="{}M""#, (self.memory_mb / 2 * 2) + 256); vars += &format!(r#"export MAX_MEMORY="{}M""#, (self.memory_mib / 2 * 2) + 256);
vars += "\n"; vars += "\n";
vars += &format!(r#"export DISK="{}""#, self.disk_path()); vars += &format!(r#"export DISK="{}""#, self.disk_path());
vars += "\n"; vars += "\n";
@ -925,7 +949,7 @@ impl VM {
.arg("-f") .arg("-f")
.arg("qcow2") .arg("qcow2")
.arg(self.disk_path()) .arg(self.disk_path())
.arg(self.disk_size_gb.to_string() + "G") .arg(self.disk_mib.to_string() + "M")
.output()?; .output()?;
if !result.status.success() { if !result.status.success() {
return Err(anyhow!( return Err(anyhow!(
@ -943,7 +967,7 @@ impl VM {
let result = Command::new("qemu-img") let result = Command::new("qemu-img")
.arg("resize") .arg("resize")
.arg(self.disk_path()) .arg(self.disk_path())
.arg(self.disk_size_gb.to_string() + "G") .arg(self.disk_mib.to_string() + "M")
.output()?; .output()?;
if !result.status.success() { if !result.status.success() {
return Err(anyhow!( return Err(anyhow!(