diff --git a/Cargo.lock b/Cargo.lock index bc28727..5567e19 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1,7 +1,6 @@ -# SPDX-License-Identifier: Apache-2.0 - # This file is automatically @generated by Cargo. # It is not intended for manual editing. +# SPDX-License-Identifier: Apache-2.0 version = 4 [[package]] @@ -435,7 +434,7 @@ dependencies = [ [[package]] name = "detee-shared" version = "0.1.0" -source = "git+ssh://git@gitea.detee.cloud/testnet/proto?branch=surreal_brain_app#0b195b4589e4ec689af7ddca27dc051716ecee78" +source = "git+ssh://git@gitea.detee.cloud/testnet/proto?branch=credits_app#01e93d3a2e4502c0e8e72026e8a1c55810961815" dependencies = [ "bincode", "prost", diff --git a/Cargo.toml b/Cargo.toml index 7100174..8d6bbc7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -27,7 +27,7 @@ bs58 = "0.5.1" chrono = "0.4.39" sha2 = "0.10.8" -detee-shared = { git = "ssh://git@gitea.detee.cloud/testnet/proto", branch = "surreal_brain_app" } +detee-shared = { git = "ssh://git@gitea.detee.cloud/testnet/proto", branch = "credits_app" } # detee-shared = { path = "../detee-shared" } [build-dependencies] diff --git a/src/config.rs b/src/config.rs index 2c6a0e1..97d06ee 100644 --- a/src/config.rs +++ b/src/config.rs @@ -13,10 +13,10 @@ pub struct HostConfig { pub host_ip_address: String, pub operator_wallet: String, pub max_cores_per_app: u32, - pub max_memory_mb_per_app: u32, + pub max_memory_mib_per_app: u32, pub max_vcpu_reservation: u32, - pub max_mem_reservation_mb: u32, - pub max_disk_reservation_gb: u32, + pub max_mem_reservation_mib: u32, + pub max_disk_reservation_mib: u32, pub max_ports_per_app: u32, // price per unit per minute pub price: u64, diff --git a/src/container.rs b/src/container.rs index ffc4ded..3288faf 100644 --- a/src/container.rs +++ b/src/container.rs @@ -23,14 +23,14 @@ pub fn deploy_enclave( enclave_path, hratls_pubkey ); - let memory_mb = app_resource.memory_mb; + let memory_mib = app_resource.memory_mib; let vcpus = app_resource.vcpus; // TODO: docker limit disk space - // let disk_mb = app_resource.disk_mb; - // --storage-opt size={disk_mb}m + // let disk_mib = app_resource.disk_mib; + // --storage-opt size={disk_mib}m let docker_deploy_str = format!( - "docker run -d --restart unless-stopped --name {container_name_uuid} --memory={memory_mb}m --cpus={vcpus} \ + "docker run -d --restart unless-stopped --name {container_name_uuid} --memory={memory_mib}m --cpus={vcpus} \ -v {enclave_path}:/enclave_package --device /dev/sgx/enclave --device /dev/sgx/provision \ {port_maping_string} noormohammedb/occlum-enclave:v1 {hratls_pubkey}" ); diff --git a/src/data.rs b/src/data.rs index 2973ae6..73e2c14 100644 --- a/src/data.rs +++ b/src/data.rs @@ -23,8 +23,8 @@ use crate::global::USED_RESOURCES_PATH; pub struct HostResources { pub existing_apps: HashSet, pub reserved_vcpus: u32, - pub reserved_memory_mb: u32, - pub reserved_disk_gb: u32, + pub reserved_memory_mib: u32, + pub reserved_disk_mib: u32, pub reserved_host_ports: HashSet, } @@ -52,9 +52,9 @@ impl HostResources { } pub fn reserve_resources(&mut self, app: &App) -> Result<()> { - self.reserved_memory_mb += app.app_resource.memory_mb; + self.reserved_memory_mib += app.app_resource.memory_mib; self.reserved_vcpus += app.app_resource.vcpus; - self.reserved_disk_gb += app.app_resource.disk_size_gb; + self.reserved_disk_mib += app.app_resource.disk_size_mib; for (port, _) in app.mapped_ports.iter() { self.reserved_host_ports.insert(*port); @@ -65,9 +65,9 @@ impl HostResources { } pub fn un_reserve_resources(&mut self, app: &App) -> Result<()> { - self.reserved_memory_mb -= app.app_resource.memory_mb; + self.reserved_memory_mib -= app.app_resource.memory_mib; self.reserved_vcpus -= app.app_resource.vcpus; - self.reserved_disk_gb -= app.app_resource.disk_size_gb; + self.reserved_disk_mib -= app.app_resource.disk_size_mib; for (port, _) in app.mapped_ports.iter() { self.reserved_host_ports.remove(port); @@ -147,7 +147,7 @@ impl App { return Err(anyhow!("too many vcpus for app")); } - if host_config.max_memory_mb_per_app < new_app_req.resource.memory_mb { + if host_config.max_memory_mib_per_app < new_app_req.resource.memory_mib { return Err(anyhow!("too much memory for app")); } @@ -159,14 +159,14 @@ impl App { { return Err(anyhow!("vcpus not available")); } - if host_config.max_mem_reservation_mb + if host_config.max_mem_reservation_mib < host_resource - .reserved_memory_mb - .saturating_add(new_app_req.resource.memory_mb) + .reserved_memory_mib + .saturating_add(new_app_req.resource.memory_mib) { return Err(anyhow!("not enough memory available")); } - if new_app_req.resource.disk_size_gb < 1 { + if new_app_req.resource.disk_size_mib < 1 { return Err(anyhow!("disk too small")); } diff --git a/src/main.rs b/src/main.rs index 29ebe0b..a3c734c 100644 --- a/src/main.rs +++ b/src/main.rs @@ -102,6 +102,29 @@ impl AppHandler { } async fn handle_new_app_req(&mut self, new_app_req: AppDeployConfig) { + let (avail_memory_per_cpu, avail_disk_per_cpu) = self.slot_ratios(); + let req_resource = &new_app_req.resource; + let req_memory_per_cpu = req_resource.memory_mib / req_resource.vcpus; + let req_disk_per_cpu = req_resource.disk_size_mib / req_resource.vcpus; + if !within_10_percent(avail_memory_per_cpu, req_memory_per_cpu as usize) + || !within_10_percent(avail_disk_per_cpu, req_disk_per_cpu as usize) + { + warn!("Refusing to create app due to unbalanced resources: {new_app_req:?}"); + let _ = self + .sender + .send( + NewAppRes { + uuid: new_app_req.uuid, + error: format!("Unbalanced hardware resources."), + ..Default::default() + } + .into(), + ) + .await; + return; + }; + log::debug!("Processing new app request: {new_app_req:?}"); + let uuid = new_app_req.uuid.clone(); let app_result = App::new(new_app_req, &self.host_config, &mut self.host_resource).await; @@ -146,6 +169,25 @@ impl AppHandler { Ok(()) } + /// returns Memory per vCPU and Disk per vCPU ratio + fn slot_ratios(&self) -> (usize, usize) { + let total_storage_mib = self.host_config.max_disk_reservation_mib as usize; + let available_cpus = + self.host_config + .max_vcpu_reservation + .saturating_sub(self.host_resource.reserved_vcpus) as usize; + + let available_mem = + self.host_config + .max_mem_reservation_mib + .saturating_sub(self.host_resource.reserved_memory_mib) as usize; + + let memory_per_cpu = available_mem / available_cpus; + let disk_per_cpu = total_storage_mib / available_cpus; + + (memory_per_cpu, disk_per_cpu) + } + async fn send_node_resources(&mut self) { let host_config = self.host_config.clone(); let host_resource = self.host_resource.clone(); @@ -155,16 +197,18 @@ impl AppHandler { (host_config.public_port_range.len() - host_resource.reserved_host_ports.len()) as u32; let avail_vcpus = host_config.max_vcpu_reservation - host_resource.reserved_vcpus; - let avail_memory_mb = host_config.max_mem_reservation_mb - host_resource.reserved_memory_mb; - let avail_storage_gb = host_config.max_disk_reservation_gb - host_resource.reserved_disk_gb; + let avail_memory_mib = + host_config.max_mem_reservation_mib - host_resource.reserved_memory_mib; + let avail_storage_mib = + host_config.max_disk_reservation_mib - host_resource.reserved_disk_mib; let max_ports_per_app = host_config.max_ports_per_app; let resource_update = AppNodeResources { node_pubkey, avail_no_of_port, avail_vcpus, - avail_memory_mb, - avail_storage_gb, + avail_memory_mib, + avail_storage_mib, max_ports_per_app, }; @@ -264,3 +308,9 @@ async fn download_and_replace_binary(network: &str) -> Result<()> { } Ok(()) } + +fn within_10_percent(a: usize, b: usize) -> bool { + let diff = a.abs_diff(b); // u32 + let reference = a.max(b); // the larger of the two + diff * 10 <= reference +}