Compare commits

..

4 Commits

Author SHA1 Message Date
bdf5bf947e
switching from LP to credits 2025-07-02 03:42:04 +03:00
44e560fcc0
switch to new staging brain 2025-06-20 02:29:05 +03:00
d6a8f14124
Improves brain connection reliability
Updates the brain connection logic to randomly select from a list of available URLs for staging and testnet environments.
2025-06-20 02:29:05 +03:00
217ab03164
new brain proto and new staging address 2025-06-20 02:29:01 +03:00
7 changed files with 185 additions and 114 deletions

5
Cargo.lock generated

@ -1,7 +1,6 @@
# SPDX-License-Identifier: Apache-2.0
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
# SPDX-License-Identifier: Apache-2.0
version = 4
[[package]]
@ -397,7 +396,7 @@ dependencies = [
[[package]]
name = "detee-shared"
version = "0.1.0"
source = "git+ssh://git@gitea.detee.cloud/testnet/proto?branch=main#b5289f1f5ba3ddae2ee066d6deb073ce92436b71"
source = "git+ssh://git@gitea.detee.cloud/testnet/proto?branch=credits-v2#f344c171c5a8d7ae8cad1628396e6b3a1af0f1ba"
dependencies = [
"bincode",
"prost",

@ -26,7 +26,8 @@ serde_json = "1.0.135"
bs58 = "0.5.1"
chrono = "0.4.39"
detee-shared = { git = "ssh://git@gitea.detee.cloud/testnet/proto", branch = "main" }
# TODO: switch this back to main after the upgrade
detee-shared = { git = "ssh://git@gitea.detee.cloud/testnet/proto", branch = "credits-v2" }
# detee-shared = { path = "../detee-shared" }
[build-dependencies]

@ -10,7 +10,7 @@ use std::{
#[derive(Deserialize, Debug, Clone)]
pub struct Volume {
pub path: String,
pub max_reservation_gb: usize,
pub max_reservation_mib: usize,
}
#[derive(Deserialize, Debug)]
@ -48,9 +48,8 @@ pub enum InterfaceType {
pub struct Config {
pub owner_wallet: String,
pub network: String,
pub max_cores_per_vm: usize,
pub max_vcpu_reservation: usize,
pub max_mem_reservation_mb: usize,
pub max_mem_reservation_mib: usize,
pub network_interfaces: Vec<Interface>,
pub volumes: Vec<Volume>,
#[serde(with = "range_format")]

@ -4,12 +4,21 @@ use anyhow::Result;
use ed25519_dalek::SigningKey;
use lazy_static::lazy_static;
use log::{info, warn};
use rand::Rng;
use sha2::{Digest, Sha256};
use std::{fs::File, io::Read, io::Write};
use std::{
fs::File,
io::{Read, Write},
};
pub(crate) const DETEE_ROOT_CA: &str = "/etc/detee/root_ca.pem";
pub(crate) const BRAIN_STAGING: (&str, &str) = ("https://159.65.58.38:31337", "staging-brain");
pub(crate) const BRAIN_TESTING: (&str, &str) = ("https://164.92.249.180:31337", "testnet-brain");
pub(crate) const BRAIN_STAGING_URLS: [&str; 3] = [
"https://156.146.63.216:31337",
"https://156.146.63.216:31337",
"https://156.146.63.216:31337",
];
pub(crate) const BRAIN_TESTING_URLS: [&str; 3] =
["https://156.146.63.218:31337", "https://173.234.17.2:8050", "https://156.146.63.218:31337"];
pub(crate) const VM_BOOT_DIR: &str = "/var/lib/detee/boot/";
pub(crate) const USED_RESOURCES: &str = "/etc/detee/daemon/used_resources.yaml";
pub(crate) const VM_CONFIG_DIR: &str = "/etc/detee/daemon/vms/";
@ -24,6 +33,14 @@ pub(crate) const OVMF_HASH: &str =
pub(crate) const OVMF_URL: &str =
"https://drive.google.com/uc?export=download&id=1V-vLkaiLaGmFSjrN84Z6nELQOxKNAoSJ";
pub fn brain_staging() -> (&'static str, &'static str) {
(BRAIN_STAGING_URLS[rand::thread_rng().gen_range(0..BRAIN_STAGING_URLS.len())], "staging-brain")
}
pub fn brain_testing() -> (&'static str, &'static str) {
(BRAIN_TESTING_URLS[rand::thread_rng().gen_range(0..BRAIN_TESTING_URLS.len())], "testnet-brain")
}
lazy_static! {
pub static ref PUBLIC_KEY: String = get_public_key();
pub static ref IP_INFO: IPInfo = get_ip_info().unwrap();

@ -1,12 +1,10 @@
// SPDX-License-Identifier: Apache-2.0
use crate::global::*;
use crate::snp_proto::VmDaemonMessage;
use crate::{global::*, snp_proto::VmDaemonMessage};
use anyhow::Result;
use detee_shared::vm_proto::DeleteVmReq;
use log::{debug, info, warn};
use snp_proto::{
brain_vm_daemon_client::BrainVmDaemonClient, BrainVmMessage, RegisterVmNodeReq, VmContract,
};
use snp_proto::{brain_vm_daemon_client::BrainVmDaemonClient, BrainVmMessage, RegisterVmNodeReq};
use tokio::{
sync::mpsc::{Receiver, Sender},
task::JoinSet,
@ -20,14 +18,16 @@ pub mod snp_proto {
async fn client(network: &str) -> Result<BrainVmDaemonClient<Channel>> {
let (brain_url, brain_san) = match network {
"staging" => BRAIN_STAGING,
"testnet" => BRAIN_TESTING,
"staging" => brain_staging(),
"testnet" => brain_testing(),
_ => {
return Err(anyhow::anyhow!(
"The only networks currently supported are staging and testnet."
))
}
};
info!("brain_url: {brain_url}, brain_san: {brain_san}");
let pem = std::fs::read_to_string(DETEE_ROOT_CA)?;
let ca = Certificate::from_pem(pem);
@ -38,9 +38,8 @@ async fn client(network: &str) -> Result<BrainVmDaemonClient<Channel>> {
Ok(BrainVmDaemonClient::new(channel))
}
pub async fn register_node(config: &crate::config::Config) -> Result<Vec<VmContract>> {
use tonic::metadata::AsciiMetadataValue;
use tonic::Request;
pub async fn register_node(config: &crate::config::Config) -> Result<Vec<DeleteVmReq>> {
use tonic::{metadata::AsciiMetadataValue, Request};
let mut client = client(&config.network).await?;
debug!("Starting node registration...");
let ip_info = IP_INFO.clone();
@ -70,7 +69,7 @@ pub async fn register_node(config: &crate::config::Config) -> Result<Vec<VmContr
while let Some(stream_update) = grpc_stream.next().await {
match stream_update {
Ok(node) => {
debug!("Received contract from brain: {node:?}");
debug!("Received deleted VM from brain: {node:?}");
contracts.push(node);
}
Err(e) => {

@ -5,8 +5,7 @@ mod global;
mod grpc;
mod state;
use crate::global::*;
use crate::{config::Config, grpc::snp_proto};
use crate::{config::Config, global::*, grpc::snp_proto};
use anyhow::{anyhow, Result};
use log::{debug, info, warn};
use std::{fs::File, path::Path};
@ -60,29 +59,62 @@ impl VMHandler {
)
}
async fn send_node_resources(&mut self) {
let (avail_ipv4, avail_ipv6) = self.get_available_ips();
let mut total_gb_available = 0;
// returns storage available per VM and total storage available
fn storage_available(&self) -> (usize, usize) {
let mut total_storage_available = 0_usize;
let mut avail_storage_mib = 0_usize;
for volume in self.config.volumes.iter() {
let reservation: usize = match self.res.reserved_storage.get(&volume.path) {
Some(reserved) => *reserved,
None => 0 as usize,
};
let volume_gb_available = volume.max_reservation_gb - reservation;
if total_gb_available < volume_gb_available {
total_gb_available = volume_gb_available;
let volume_mib_available = volume.max_reservation_mib.saturating_sub(reservation);
total_storage_available += volume_mib_available;
if avail_storage_mib < volume_mib_available {
avail_storage_mib = volume_mib_available;
}
}
let avail_storage_gb = total_gb_available as u32;
(avail_storage_mib, total_storage_available)
}
/// returns Memory per vCPU and Disk per vCPU ratio
fn slot_ratios(&self) -> (usize, usize) {
let (_, total_storage_mib) = self.storage_available();
let available_cpus: usize =
self.config.max_vcpu_reservation.saturating_sub(self.res.reserved_vcpus);
let available_mem: usize =
self.config.max_mem_reservation_mib.saturating_sub(self.res.reserved_memory_mib);
let memory_per_cpu = available_mem / available_cpus;
let disk_per_cpu = total_storage_mib / available_cpus;
(memory_per_cpu, disk_per_cpu)
}
async fn send_node_resources(&mut self) {
let (avail_ipv4, avail_ipv6) = self.get_available_ips();
let (avail_storage_mib, total_storage_available) = self.storage_available();
let avail_vcpus = self.config.max_vcpu_reservation.saturating_sub(self.res.reserved_vcpus);
let avail_memory_mib =
self.config.max_mem_reservation_mib.saturating_sub(self.res.reserved_memory_mib);
// If storage is separated into multiple volumes, that limits the maxium VM size.
// Due to this, we have to limit the maximum amount of vCPUs and Memory per VM, based on
// the maximum possible disk size per VM.
let avail_vcpus = avail_vcpus * avail_storage_mib / total_storage_available;
let avail_memory_mib = avail_memory_mib * avail_storage_mib / total_storage_available;
let res = snp_proto::VmNodeResources {
node_pubkey: PUBLIC_KEY.clone(),
avail_ports: (self.config.public_port_range.len() - self.res.reserved_ports.len())
as u32,
avail_ipv4,
avail_ipv6,
avail_vcpus: (self.config.max_vcpu_reservation - self.res.reserved_vcpus) as u32,
avail_memory_mb: (self.config.max_mem_reservation_mb - self.res.reserved_memory) as u32,
avail_storage_gb,
avail_vcpus: avail_vcpus as u32,
avail_memory_mib: avail_memory_mib as u32,
avail_storage_mib: avail_storage_mib as u32,
max_ports_per_vm: self.config.max_ports_per_vm as u32,
};
debug!("sending node resources on brain: {res:?}");
@ -90,6 +122,30 @@ impl VMHandler {
}
async fn handle_new_vm_req(&mut self, new_vm_req: snp_proto::NewVmReq) {
// Currently the daemon allows a deviation of 10% for newly created VMs, so that the
// process doesn't get interrupted by small bugs caused by human error in coding.
// Over time, we will probably allow deviation based on server utilization, however that
// needs to be implmeneted for both VM creation and VM upgrade.
let (memory_per_cpu, disk_per_cpu) = self.slot_ratios();
let vm_memory_per_cpu = new_vm_req.memory_mib / new_vm_req.vcpus;
let vm_disk_per_cpu = new_vm_req.disk_size_mib / new_vm_req.vcpus;
if !within_10_percent(memory_per_cpu, vm_memory_per_cpu as usize)
|| !within_10_percent(disk_per_cpu, vm_disk_per_cpu as usize)
{
warn!("Refusing to create vm due to unbalanced resources: {new_vm_req:?}");
let _ = self
.sender
.send(
snp_proto::NewVmResp {
uuid: new_vm_req.uuid,
error: format!("Unbalanced hardware resources."),
..Default::default()
}
.into(),
)
.await;
return;
};
debug!("Processing new vm request: {new_vm_req:?}");
let uuid = new_vm_req.uuid.clone();
match state::VM::new(new_vm_req.into(), &self.config, &mut self.res) {
@ -207,17 +263,14 @@ impl VMHandler {
}
}
fn clear_deleted_contracts(&mut self, contracts: Vec<snp_proto::VmContract>) {
for uuid in self.res.existing_vms.clone() {
if contracts.iter().find(|c| c.uuid == uuid).is_none() {
info!("VM {uuid} exists locally but not found in brain. Deleting...");
let content =
match std::fs::read_to_string(VM_CONFIG_DIR.to_string() + &uuid + ".yaml") {
fn clear_deleted_contracts(&mut self, deleted_vms: Vec<snp_proto::DeleteVmReq>) {
for deleted_vm in deleted_vms {
let uuid = deleted_vm.uuid;
let content = match std::fs::read_to_string(VM_CONFIG_DIR.to_string() + &uuid + ".yaml")
{
Ok(content) => content,
Err(e) => {
log::error!(
"Could not find VM config for {uuid}. Cannot delete VM: {e:?}"
);
log::debug!("Could not find VM config for {uuid}. Maybe it already got deleted? Error: {e:?}");
continue;
}
};
@ -234,7 +287,6 @@ impl VMHandler {
}
}
}
}
}
#[tokio::main]
@ -254,14 +306,11 @@ async fn main() {
let mut vm_handler = VMHandler::new(brain_msg_rx, daemon_msg_tx.clone());
let network = vm_handler.config.network.clone();
let contracts: Vec<String> = vm_handler.res.existing_vms.clone().into_iter().collect();
info!("Registering with the brain and getting back VM Contracts (if they exist).");
let mut contracts: Vec<String> = Vec::new();
info!("Registering with the brain and getting back deleted VMs.");
match grpc::register_node(&vm_handler.config).await {
Ok(c) => {
contracts.append(&mut c.iter().map(|c| c.uuid.clone()).collect());
vm_handler.clear_deleted_contracts(c)
}
Ok(deleted_vms) => vm_handler.clear_deleted_contracts(deleted_vms),
Err(e) => log::error!("Could not get contracts from brain: {e:?}"),
};
@ -307,3 +356,9 @@ fn download_and_replace_binary() -> Result<()> {
}
Ok(())
}
fn within_10_percent(a: usize, b: usize) -> bool {
let diff = a.abs_diff(b); // u32
let reference = a.max(b); // the larger of the two
diff * 10 <= reference
}

@ -19,7 +19,7 @@ pub struct Resources {
pub existing_vms: HashSet<String>,
// QEMU does not support MHz limiation
pub reserved_vcpus: usize,
pub reserved_memory: usize,
pub reserved_memory_mib: usize,
pub reserved_ports: HashSet<u16>,
pub reserved_storage: HashMap<String, usize>,
pub reserved_ipv4: HashSet<String>,
@ -49,16 +49,16 @@ impl Resources {
let vm: VM = serde_yaml::from_str(&content)?;
res.existing_vms.insert(vm.uuid);
res.reserved_vcpus = res.reserved_vcpus.saturating_add(vm.vcpus);
res.reserved_memory = res.reserved_memory.saturating_add(vm.memory_mb);
res.reserved_memory_mib = res.reserved_memory_mib.saturating_add(vm.memory_mib);
for (port, _) in vm.fw_ports.iter() {
res.reserved_ports.insert(*port);
}
res.reserved_storage
.entry(vm.storage_dir.clone())
.and_modify(|gb| {
*gb = gb.saturating_add(vm.disk_size_gb);
*gb = gb.saturating_add(vm.disk_size_mib);
})
.or_insert(vm.disk_size_gb);
.or_insert(vm.disk_size_mib);
for nic in vm.nics {
for ip in nic.ips {
if let Ok(ip_address) = ip.address.parse::<std::net::IpAddr>() {
@ -89,13 +89,13 @@ impl Resources {
storage_pools.push(StoragePool {
path: config_vol.path.clone(),
// TODO: check if the storage is actualy available at that path
available_gb: config_vol.max_reservation_gb,
available_gb: config_vol.max_reservation_mib,
});
}
let mut res = Resources {
existing_vms: HashSet::new(),
reserved_vcpus: 0,
reserved_memory: 0,
reserved_memory_mib: 0,
reserved_ports: HashSet::new(),
reserved_storage: HashMap::new(),
reserved_ipv4: HashSet::new(),
@ -112,12 +112,12 @@ impl Resources {
let mut volumes = config.volumes.clone();
for volume in volumes.iter_mut() {
if let Some(reservation) = self.reserved_storage.get(&volume.path) {
volume.max_reservation_gb = volume.max_reservation_gb.saturating_sub(*reservation);
volume.max_reservation_mib = volume.max_reservation_mib.saturating_sub(*reservation);
}
}
volumes.sort_by_key(|v| v.max_reservation_gb);
volumes.sort_by_key(|v| v.max_reservation_mib);
if let Some(biggest_volume) = volumes.last() {
if biggest_volume.max_reservation_gb >= required_gb {
if biggest_volume.max_reservation_mib >= required_gb {
return Some(biggest_volume.path.clone());
}
}
@ -264,7 +264,7 @@ impl Resources {
fn reserve_vm_resources(&mut self, vm: &VM) {
self.existing_vms.insert(vm.uuid.clone());
self.reserved_vcpus += vm.vcpus;
self.reserved_memory += vm.memory_mb;
self.reserved_memory_mib += vm.memory_mib;
for nic in vm.nics.iter() {
if let Some(vtap) = nic.if_config.vtap_name() {
self.reserved_if_names.insert(vtap);
@ -286,8 +286,8 @@ impl Resources {
self.reserved_storage
.entry(vm.storage_dir.clone())
.and_modify(|gb| *gb = gb.saturating_add(vm.disk_size_gb))
.or_insert(vm.disk_size_gb);
.and_modify(|gb| *gb = gb.saturating_add(vm.disk_size_mib))
.or_insert(vm.disk_size_mib);
let _ = self.save_to_disk();
}
@ -296,7 +296,7 @@ impl Resources {
return;
}
self.reserved_vcpus = self.reserved_vcpus.saturating_sub(vm.vcpus);
self.reserved_memory = self.reserved_memory.saturating_sub(vm.memory_mb);
self.reserved_memory_mib = self.reserved_memory_mib.saturating_sub(vm.memory_mib);
for nic in vm.nics.iter() {
if let Some(vtap) = nic.if_config.vtap_name() {
self.reserved_if_names.remove(&vtap);
@ -317,7 +317,7 @@ impl Resources {
}
self.reserved_storage
.entry(vm.storage_dir.clone())
.and_modify(|gb| *gb = gb.saturating_sub(vm.disk_size_gb));
.and_modify(|gb| *gb = gb.saturating_sub(vm.disk_size_mib));
if let Err(e) = self.save_to_disk() {
log::error!("Could not save resources to disk: {e}");
}
@ -400,8 +400,8 @@ pub struct VM {
// currently hardcoded to EPYC-v4
// cpu_type: String,
vcpus: usize,
memory_mb: usize,
disk_size_gb: usize,
memory_mib: usize,
disk_size_mib: usize,
kernel_sha: String,
dtrfs_sha: String,
storage_dir: String,
@ -465,9 +465,9 @@ pub struct NewVMRequest {
extra_ports: Vec<u16>,
public_ipv4: bool,
public_ipv6: bool,
disk_size_gb: usize,
disk_size_mib: usize,
vcpus: usize,
memory_mb: usize,
memory_mib: usize,
kernel_url: String,
kernel_sha: String,
dtrfs_url: String,
@ -483,9 +483,9 @@ impl From<snp_proto::NewVmReq> for NewVMRequest {
extra_ports: req.extra_ports.iter().map(|&port| port as u16).collect(),
public_ipv4: req.public_ipv4,
public_ipv6: req.public_ipv6,
disk_size_gb: req.disk_size_gb as usize,
disk_size_mib: req.disk_size_mib as usize,
vcpus: req.vcpus as usize,
memory_mb: req.memory_mb as usize,
memory_mib: req.memory_mib as usize,
kernel_url: req.kernel_url,
kernel_sha: req.kernel_sha,
dtrfs_url: req.dtrfs_url,
@ -499,8 +499,8 @@ impl From<snp_proto::NewVmReq> for NewVMRequest {
pub struct UpdateVMReq {
pub uuid: String,
vcpus: usize,
memory_mb: usize,
disk_size_gb: usize,
memory_mib: usize,
disk_size_mib: usize,
// we are not using Option<String>, as these will be passed from gRPC
kernel_url: String,
kernel_sha: String,
@ -513,8 +513,8 @@ impl From<snp_proto::UpdateVmReq> for UpdateVMReq {
Self {
uuid: req.uuid,
vcpus: req.vcpus as usize,
memory_mb: req.memory_mb as usize,
disk_size_gb: req.disk_size_gb as usize,
memory_mib: req.memory_mib as usize,
disk_size_mib: req.disk_size_mib as usize,
kernel_url: req.kernel_url,
kernel_sha: req.kernel_sha,
dtrfs_url: req.dtrfs_url,
@ -528,7 +528,6 @@ pub enum VMCreationErrors {
PriceIsTooLow,
VMAlreadyExists(VM),
NATandIPv4Conflict,
TooManyCores,
NotEnoughPorts,
NotEnoughCPU,
NotEnoughMemory,
@ -536,6 +535,7 @@ pub enum VMCreationErrors {
IPv4NotAvailable,
IPv6NotAvailable,
DiskTooSmall,
DowngradeNotSupported,
ServerDiskError(String),
BootFileError(String),
HypervizorError(String),
@ -560,16 +560,13 @@ impl VM {
if req.extra_ports.len() > 0 && req.public_ipv4 {
return Err(VMCreationErrors::NATandIPv4Conflict);
}
if config.max_cores_per_vm < req.vcpus {
return Err(VMCreationErrors::TooManyCores);
}
if config.max_vcpu_reservation < res.reserved_vcpus.saturating_add(req.vcpus) {
return Err(VMCreationErrors::NotEnoughCPU);
}
if config.max_mem_reservation_mb < res.reserved_memory.saturating_add(req.memory_mb) {
if config.max_mem_reservation_mib < res.reserved_memory_mib.saturating_add(req.memory_mib) {
return Err(VMCreationErrors::NotEnoughMemory);
}
if req.disk_size_gb < 4 {
if req.disk_size_mib < 4 {
return Err(VMCreationErrors::DiskTooSmall);
}
@ -632,7 +629,7 @@ impl VM {
}
}
let storage_pool_path = match res.available_storage_pool(req.disk_size_gb, config) {
let storage_pool_path = match res.available_storage_pool(req.disk_size_mib, config) {
Some(path) => path,
None => return Err(VMCreationErrors::NotEnoughStorage),
};
@ -642,8 +639,8 @@ impl VM {
admin_key: req.admin_key,
nics: vm_nics,
vcpus: req.vcpus,
memory_mb: req.memory_mb,
disk_size_gb: req.disk_size_gb,
memory_mib: req.memory_mib,
disk_size_mib: req.disk_size_mib,
kernel_sha: req.kernel_sha,
dtrfs_sha: req.dtrfs_sha,
fw_ports: port_pairs,
@ -663,8 +660,12 @@ impl VM {
config: &Config,
res: &mut Resources,
) -> Result<(), VMCreationErrors> {
if req.vcpus > 0 && config.max_cores_per_vm < req.vcpus {
return Err(VMCreationErrors::TooManyCores);
if req.vcpus < self.vcpus {
// Downgrade will be upported only after we implement deviation for VMs.
// (Deviation from the slot size allows management of VMs with unbalanced resources,
// without fully saturating a node. We are disabling downgrades to avoid complexity at
// this stage of the product)
return Err(VMCreationErrors::DowngradeNotSupported);
}
if req.vcpus > 0
&& config.max_vcpu_reservation
@ -672,13 +673,13 @@ impl VM {
{
return Err(VMCreationErrors::NotEnoughCPU);
}
if req.memory_mb > 0
&& config.max_mem_reservation_mb
< res.reserved_memory.saturating_sub(self.memory_mb).saturating_add(req.memory_mb)
if req.memory_mib > 0
&& config.max_mem_reservation_mib
< res.reserved_memory_mib.saturating_sub(self.memory_mib).saturating_add(req.memory_mib)
{
return Err(VMCreationErrors::NotEnoughMemory);
}
if req.disk_size_gb > 0 && req.disk_size_gb < self.disk_size_gb {
if req.disk_size_mib > 0 && req.disk_size_mib < self.disk_size_mib {
return Err(VMCreationErrors::DiskTooSmall);
}
@ -710,24 +711,24 @@ impl VM {
}
// Update the resources
res.reserved_memory = res.reserved_memory.saturating_add(req.memory_mb);
res.reserved_memory = res.reserved_memory.saturating_sub(self.memory_mb);
res.reserved_memory_mib = res.reserved_memory_mib.saturating_add(req.memory_mib);
res.reserved_memory_mib = res.reserved_memory_mib.saturating_sub(self.memory_mib);
res.reserved_vcpus = res.reserved_vcpus.saturating_add(req.vcpus);
res.reserved_vcpus = res.reserved_vcpus.saturating_sub(self.vcpus);
res.reserved_storage.entry(self.storage_dir.clone()).and_modify(|gb| {
*gb = gb.saturating_add(req.disk_size_gb);
*gb = gb.saturating_sub(self.disk_size_gb);
*gb = gb.saturating_add(req.disk_size_mib);
*gb = gb.saturating_sub(self.disk_size_mib);
});
let _ = res.save_to_disk();
if req.memory_mb != 0 {
self.memory_mb = req.memory_mb;
if req.memory_mib != 0 {
self.memory_mib = req.memory_mib;
}
if req.vcpus != 0 {
self.vcpus = req.vcpus;
}
if req.disk_size_gb != 0 {
self.disk_size_gb = req.disk_size_gb;
if req.disk_size_mib != 0 {
self.disk_size_mib = req.disk_size_mib;
}
if let Err(e) = systemctl_stop_and_disable(&self.uuid) {
@ -854,9 +855,9 @@ impl VM {
vars += "\n";
vars += &format!(r#"export VCPUS="{}""#, self.vcpus);
vars += "\n";
vars += &format!(r#"export MEMORY="{}M""#, (self.memory_mb / 2 * 2));
vars += &format!(r#"export MEMORY="{}M""#, (self.memory_mib / 2 * 2));
vars += "\n";
vars += &format!(r#"export MAX_MEMORY="{}M""#, (self.memory_mb / 2 * 2) + 256);
vars += &format!(r#"export MAX_MEMORY="{}M""#, (self.memory_mib / 2 * 2) + 256);
vars += "\n";
vars += &format!(r#"export DISK="{}""#, self.disk_path());
vars += "\n";
@ -925,7 +926,7 @@ impl VM {
.arg("-f")
.arg("qcow2")
.arg(self.disk_path())
.arg(self.disk_size_gb.to_string() + "G")
.arg(self.disk_size_mib.to_string() + "M")
.output()?;
if !result.status.success() {
return Err(anyhow!(
@ -943,7 +944,7 @@ impl VM {
let result = Command::new("qemu-img")
.arg("resize")
.arg(self.disk_path())
.arg(self.disk_size_gb.to_string() + "G")
.arg(self.disk_size_mib.to_string() + "M")
.output()?;
if !result.status.success() {
return Err(anyhow!(