switching to sloat and credit system

updated proto
change all units to mib
calculating sloat ration while new app and sending resource
This commit is contained in:
Noor 2025-06-26 21:46:43 +05:30
parent bb33f32b23
commit 202a1a9998
Signed by: noormohammedb
GPG Key ID: D83EFB8B3B967146
6 changed files with 75 additions and 26 deletions

5
Cargo.lock generated

@ -1,7 +1,6 @@
# SPDX-License-Identifier: Apache-2.0
# This file is automatically @generated by Cargo. # This file is automatically @generated by Cargo.
# It is not intended for manual editing. # It is not intended for manual editing.
# SPDX-License-Identifier: Apache-2.0
version = 4 version = 4
[[package]] [[package]]
@ -435,7 +434,7 @@ dependencies = [
[[package]] [[package]]
name = "detee-shared" name = "detee-shared"
version = "0.1.0" version = "0.1.0"
source = "git+ssh://git@gitea.detee.cloud/testnet/proto?branch=surreal_brain_app#0b195b4589e4ec689af7ddca27dc051716ecee78" source = "git+ssh://git@gitea.detee.cloud/testnet/proto?branch=credits_app#01e93d3a2e4502c0e8e72026e8a1c55810961815"
dependencies = [ dependencies = [
"bincode", "bincode",
"prost", "prost",

@ -27,7 +27,7 @@ bs58 = "0.5.1"
chrono = "0.4.39" chrono = "0.4.39"
sha2 = "0.10.8" sha2 = "0.10.8"
detee-shared = { git = "ssh://git@gitea.detee.cloud/testnet/proto", branch = "surreal_brain_app" } detee-shared = { git = "ssh://git@gitea.detee.cloud/testnet/proto", branch = "credits_app" }
# detee-shared = { path = "../detee-shared" } # detee-shared = { path = "../detee-shared" }
[build-dependencies] [build-dependencies]

@ -13,10 +13,10 @@ pub struct HostConfig {
pub host_ip_address: String, pub host_ip_address: String,
pub operator_wallet: String, pub operator_wallet: String,
pub max_cores_per_app: u32, pub max_cores_per_app: u32,
pub max_memory_mb_per_app: u32, pub max_memory_mib_per_app: u32,
pub max_vcpu_reservation: u32, pub max_vcpu_reservation: u32,
pub max_mem_reservation_mb: u32, pub max_mem_reservation_mib: u32,
pub max_disk_reservation_gb: u32, pub max_disk_reservation_mib: u32,
pub max_ports_per_app: u32, pub max_ports_per_app: u32,
// price per unit per minute // price per unit per minute
pub price: u64, pub price: u64,

@ -23,14 +23,14 @@ pub fn deploy_enclave(
enclave_path, hratls_pubkey enclave_path, hratls_pubkey
); );
let memory_mb = app_resource.memory_mb; let memory_mib = app_resource.memory_mib;
let vcpus = app_resource.vcpus; let vcpus = app_resource.vcpus;
// TODO: docker limit disk space // TODO: docker limit disk space
// let disk_mb = app_resource.disk_mb; // let disk_mib = app_resource.disk_mib;
// --storage-opt size={disk_mb}m // --storage-opt size={disk_mib}m
let docker_deploy_str = format!( let docker_deploy_str = format!(
"docker run -d --restart unless-stopped --name {container_name_uuid} --memory={memory_mb}m --cpus={vcpus} \ "docker run -d --restart unless-stopped --name {container_name_uuid} --memory={memory_mib}m --cpus={vcpus} \
-v {enclave_path}:/enclave_package --device /dev/sgx/enclave --device /dev/sgx/provision \ -v {enclave_path}:/enclave_package --device /dev/sgx/enclave --device /dev/sgx/provision \
{port_maping_string} noormohammedb/occlum-enclave:v1 {hratls_pubkey}" {port_maping_string} noormohammedb/occlum-enclave:v1 {hratls_pubkey}"
); );

@ -23,8 +23,8 @@ use crate::global::USED_RESOURCES_PATH;
pub struct HostResources { pub struct HostResources {
pub existing_apps: HashSet<String>, pub existing_apps: HashSet<String>,
pub reserved_vcpus: u32, pub reserved_vcpus: u32,
pub reserved_memory_mb: u32, pub reserved_memory_mib: u32,
pub reserved_disk_gb: u32, pub reserved_disk_mib: u32,
pub reserved_host_ports: HashSet<u16>, pub reserved_host_ports: HashSet<u16>,
} }
@ -52,9 +52,9 @@ impl HostResources {
} }
pub fn reserve_resources(&mut self, app: &App) -> Result<()> { pub fn reserve_resources(&mut self, app: &App) -> Result<()> {
self.reserved_memory_mb += app.app_resource.memory_mb; self.reserved_memory_mib += app.app_resource.memory_mib;
self.reserved_vcpus += app.app_resource.vcpus; self.reserved_vcpus += app.app_resource.vcpus;
self.reserved_disk_gb += app.app_resource.disk_size_gb; self.reserved_disk_mib += app.app_resource.disk_size_mib;
for (port, _) in app.mapped_ports.iter() { for (port, _) in app.mapped_ports.iter() {
self.reserved_host_ports.insert(*port); self.reserved_host_ports.insert(*port);
@ -65,9 +65,9 @@ impl HostResources {
} }
pub fn un_reserve_resources(&mut self, app: &App) -> Result<()> { pub fn un_reserve_resources(&mut self, app: &App) -> Result<()> {
self.reserved_memory_mb -= app.app_resource.memory_mb; self.reserved_memory_mib -= app.app_resource.memory_mib;
self.reserved_vcpus -= app.app_resource.vcpus; self.reserved_vcpus -= app.app_resource.vcpus;
self.reserved_disk_gb -= app.app_resource.disk_size_gb; self.reserved_disk_mib -= app.app_resource.disk_size_mib;
for (port, _) in app.mapped_ports.iter() { for (port, _) in app.mapped_ports.iter() {
self.reserved_host_ports.remove(port); self.reserved_host_ports.remove(port);
@ -147,7 +147,7 @@ impl App {
return Err(anyhow!("too many vcpus for app")); return Err(anyhow!("too many vcpus for app"));
} }
if host_config.max_memory_mb_per_app < new_app_req.resource.memory_mb { if host_config.max_memory_mib_per_app < new_app_req.resource.memory_mib {
return Err(anyhow!("too much memory for app")); return Err(anyhow!("too much memory for app"));
} }
@ -159,14 +159,14 @@ impl App {
{ {
return Err(anyhow!("vcpus not available")); return Err(anyhow!("vcpus not available"));
} }
if host_config.max_mem_reservation_mb if host_config.max_mem_reservation_mib
< host_resource < host_resource
.reserved_memory_mb .reserved_memory_mib
.saturating_add(new_app_req.resource.memory_mb) .saturating_add(new_app_req.resource.memory_mib)
{ {
return Err(anyhow!("not enough memory available")); return Err(anyhow!("not enough memory available"));
} }
if new_app_req.resource.disk_size_gb < 1 { if new_app_req.resource.disk_size_mib < 1 {
return Err(anyhow!("disk too small")); return Err(anyhow!("disk too small"));
} }

@ -102,6 +102,29 @@ impl AppHandler {
} }
async fn handle_new_app_req(&mut self, new_app_req: AppDeployConfig) { async fn handle_new_app_req(&mut self, new_app_req: AppDeployConfig) {
let (avail_memory_per_cpu, avail_disk_per_cpu) = self.slot_ratios();
let req_resource = &new_app_req.resource;
let req_memory_per_cpu = req_resource.memory_mib / req_resource.vcpus;
let req_disk_per_cpu = req_resource.disk_size_mib / req_resource.vcpus;
if !within_10_percent(avail_memory_per_cpu, req_memory_per_cpu as usize)
|| !within_10_percent(avail_disk_per_cpu, req_disk_per_cpu as usize)
{
warn!("Refusing to create app due to unbalanced resources: {new_app_req:?}");
let _ = self
.sender
.send(
NewAppRes {
uuid: new_app_req.uuid,
error: format!("Unbalanced hardware resources."),
..Default::default()
}
.into(),
)
.await;
return;
};
log::debug!("Processing new app request: {new_app_req:?}");
let uuid = new_app_req.uuid.clone(); let uuid = new_app_req.uuid.clone();
let app_result = App::new(new_app_req, &self.host_config, &mut self.host_resource).await; let app_result = App::new(new_app_req, &self.host_config, &mut self.host_resource).await;
@ -146,6 +169,25 @@ impl AppHandler {
Ok(()) Ok(())
} }
/// returns Memory per vCPU and Disk per vCPU ratio
fn slot_ratios(&self) -> (usize, usize) {
let total_storage_mib = self.host_config.max_disk_reservation_mib as usize;
let available_cpus =
self.host_config
.max_vcpu_reservation
.saturating_sub(self.host_resource.reserved_vcpus) as usize;
let available_mem =
self.host_config
.max_mem_reservation_mib
.saturating_sub(self.host_resource.reserved_memory_mib) as usize;
let memory_per_cpu = available_mem / available_cpus;
let disk_per_cpu = total_storage_mib / available_cpus;
(memory_per_cpu, disk_per_cpu)
}
async fn send_node_resources(&mut self) { async fn send_node_resources(&mut self) {
let host_config = self.host_config.clone(); let host_config = self.host_config.clone();
let host_resource = self.host_resource.clone(); let host_resource = self.host_resource.clone();
@ -155,16 +197,18 @@ impl AppHandler {
(host_config.public_port_range.len() - host_resource.reserved_host_ports.len()) as u32; (host_config.public_port_range.len() - host_resource.reserved_host_ports.len()) as u32;
let avail_vcpus = host_config.max_vcpu_reservation - host_resource.reserved_vcpus; let avail_vcpus = host_config.max_vcpu_reservation - host_resource.reserved_vcpus;
let avail_memory_mb = host_config.max_mem_reservation_mb - host_resource.reserved_memory_mb; let avail_memory_mib =
let avail_storage_gb = host_config.max_disk_reservation_gb - host_resource.reserved_disk_gb; host_config.max_mem_reservation_mib - host_resource.reserved_memory_mib;
let avail_storage_mib =
host_config.max_disk_reservation_mib - host_resource.reserved_disk_mib;
let max_ports_per_app = host_config.max_ports_per_app; let max_ports_per_app = host_config.max_ports_per_app;
let resource_update = AppNodeResources { let resource_update = AppNodeResources {
node_pubkey, node_pubkey,
avail_no_of_port, avail_no_of_port,
avail_vcpus, avail_vcpus,
avail_memory_mb, avail_memory_mib,
avail_storage_gb, avail_storage_mib,
max_ports_per_app, max_ports_per_app,
}; };
@ -264,3 +308,9 @@ async fn download_and_replace_binary(network: &str) -> Result<()> {
} }
Ok(()) Ok(())
} }
fn within_10_percent(a: usize, b: usize) -> bool {
let diff = a.abs_diff(b); // u32
let reference = a.max(b); // the larger of the two
diff * 10 <= reference
}