enable multiple offers per VM node
This commit is contained in:
parent
9e3a39545d
commit
1034b7b09a
2
Cargo.lock
generated
2
Cargo.lock
generated
@ -1011,7 +1011,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "detee-shared"
|
||||
version = "0.1.0"
|
||||
source = "git+ssh://git@gitea.detee.cloud/testnet/proto?branch=credits_app#01e93d3a2e4502c0e8e72026e8a1c55810961815"
|
||||
source = "git+ssh://git@gitea.detee.cloud/testnet/proto?branch=offers#4753a17fa29393b3f99b6dfcdcec48d935e6ebd9"
|
||||
dependencies = [
|
||||
"bincode 2.0.1",
|
||||
"prost",
|
||||
|
@ -15,7 +15,7 @@ serde_yaml = "0.9.34"
|
||||
surrealdb = "2.2.2"
|
||||
tokio = { version = "1.44.2", features = ["macros", "rt-multi-thread"] }
|
||||
tonic = { version = "0.12", features = ["tls"] }
|
||||
detee-shared = { git = "ssh://git@gitea.detee.cloud/testnet/proto", branch = "credits_app" }
|
||||
detee-shared = { git = "ssh://git@gitea.detee.cloud/testnet/proto", branch = "offers" }
|
||||
ed25519-dalek = "2.1.1"
|
||||
bs58 = "0.5.1"
|
||||
tokio-stream = "0.1.17"
|
||||
|
@ -4,12 +4,12 @@
|
||||
// and dangling impls from the model
|
||||
use dotenv::dotenv;
|
||||
use std::error::Error;
|
||||
use surreal_brain::{db, old_brain};
|
||||
use surreal_brain::db;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<(), Box<dyn Error>> {
|
||||
dotenv().ok();
|
||||
let old_brain_data = old_brain::BrainData::load_from_disk()?;
|
||||
// let old_brain_data = old_brain::BrainData::load_from_disk()?;
|
||||
// println!("{}", serde_yaml::to_string(&old_brain_data)?);
|
||||
|
||||
let db_url = std::env::var("DB_URL").expect("DB_URL not set in .env");
|
||||
@ -21,7 +21,7 @@ async fn main() -> Result<(), Box<dyn Error>> {
|
||||
let db = db::db_connection(&db_url, &db_user, &db_pass, &db_ns, &db_name).await.unwrap();
|
||||
env_logger::builder().filter_level(log::LevelFilter::Trace);
|
||||
|
||||
db::migration0(&db, &old_brain_data).await?;
|
||||
db::migration1_patch(&db).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
@ -8,7 +8,10 @@ pub const CERT_PATH: &str = "/etc/detee/brain/brain-crt.pem";
|
||||
pub const CERT_KEY_PATH: &str = "/etc/detee/brain/brain-key.pem";
|
||||
pub const CONFIG_PATH: &str = "/etc/detee/brain/config.ini";
|
||||
|
||||
pub const DB_SCHEMA_FILES: [&str; 2] = ["surql/tables.sql", "surql/functions.sql"];
|
||||
pub const MIGRATION_0_SCHEMA: [&str; 2] = ["surql/migration0/tables.sql", "surql/migration0/functions.sql"];
|
||||
pub const MIGRATION_0_MOCK_DATA: &str = "surql/migration0/mock_data.sql";
|
||||
pub const MIGRATION_1_PATCH: &str = "surql/migration1/patch_tables.sql";
|
||||
pub const MIGRATION_1_SCHEMA: &str = "surql/migration1/final_tables.sql";
|
||||
|
||||
pub static ADMIN_ACCOUNTS: LazyLock<Vec<String>> = LazyLock::new(|| {
|
||||
let default_admin_keys = vec![
|
||||
@ -23,13 +26,12 @@ pub static ADMIN_ACCOUNTS: LazyLock<Vec<String>> = LazyLock::new(|| {
|
||||
.unwrap_or(default_admin_keys)
|
||||
});
|
||||
|
||||
pub const OLD_BRAIN_DATA_PATH: &str = "./saved_data.yaml";
|
||||
|
||||
pub const ACCOUNT: &str = "account";
|
||||
pub const KICK: &str = "kick";
|
||||
pub const BAN: &str = "ban";
|
||||
|
||||
pub const VM_NODE: &str = "vm_node";
|
||||
pub const VM_NODE_OFFER: &str = "vm_node_offer";
|
||||
pub const ACTIVE_VM: &str = "active_vm";
|
||||
pub const VM_UPDATE_EVENT: &str = "vm_update_event";
|
||||
pub const NEW_VM_REQ: &str = "new_vm_req";
|
||||
|
@ -4,10 +4,10 @@ use std::time::Duration;
|
||||
|
||||
use super::Error;
|
||||
use crate::constants::{
|
||||
ACCOUNT, ACTIVE_APP, APP_DAEMON_TIMEOUT, APP_NODE, DEFAULT_ENDPOINT, DELETED_APP, NEW_APP_REQ,
|
||||
ACCOUNT, ACTIVE_APP, APP_DAEMON_TIMEOUT, APP_NODE, DELETED_APP, NEW_APP_REQ,
|
||||
};
|
||||
use crate::db::general::Report;
|
||||
use crate::{db, old_brain};
|
||||
use crate::db;
|
||||
use detee_shared::app_proto::{self, NewAppRes};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use surrealdb::engine::remote::ws::Client;
|
||||
@ -631,72 +631,6 @@ impl AppNodeResources {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&old_brain::BrainData> for Vec<AppNode> {
|
||||
fn from(old_data: &old_brain::BrainData) -> Self {
|
||||
let mut nodes = Vec::new();
|
||||
for old_node in old_data.app_nodes.iter() {
|
||||
nodes.push(AppNode {
|
||||
id: RecordId::from((APP_NODE, old_node.node_pubkey.clone())),
|
||||
operator: RecordId::from((ACCOUNT, old_node.operator_wallet.clone())),
|
||||
pub_sub_node: DEFAULT_ENDPOINT.to_string(),
|
||||
country: old_node.country.clone(),
|
||||
region: old_node.region.clone(),
|
||||
city: old_node.city.clone(),
|
||||
ip: old_node.ip.clone(),
|
||||
avail_mem_mib: old_node.avail_mem_mb,
|
||||
avail_vcpus: old_node.avail_vcpus,
|
||||
avail_storage_mib: old_node.avail_storage_mb,
|
||||
avail_ports: old_node.avail_no_of_port,
|
||||
max_ports_per_app: old_node.max_ports_per_app,
|
||||
price: old_node.price,
|
||||
disconnected_at: Datetime::default(),
|
||||
connected_at: Datetime::default(),
|
||||
});
|
||||
}
|
||||
nodes
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&old_brain::BrainData> for Vec<ActiveApp> {
|
||||
fn from(old_data: &old_brain::BrainData) -> Self {
|
||||
let mut contracts = Vec::new();
|
||||
for old_c in old_data.app_contracts.iter() {
|
||||
let mut mapped_ports = Vec::new();
|
||||
for port in old_c.mapped_ports.clone().into_iter().map(|(b, c)| (b as u32, c as u32)) {
|
||||
mapped_ports.push(port);
|
||||
}
|
||||
|
||||
let mr_enclave_hex = old_c
|
||||
.public_package_mr_enclave
|
||||
.clone()
|
||||
.unwrap_or_default()
|
||||
.iter()
|
||||
.map(|byte| format!("{byte:02X}"))
|
||||
.collect();
|
||||
|
||||
contracts.push(ActiveApp {
|
||||
id: RecordId::from((ACTIVE_APP, old_c.uuid.replace("-", ""))),
|
||||
admin: RecordId::from((ACCOUNT, old_c.admin_pubkey.clone())),
|
||||
app_node: RecordId::from((APP_NODE, old_c.node_pubkey.clone())),
|
||||
mapped_ports,
|
||||
host_ipv4: old_c.host_ipv4.clone(),
|
||||
disk_size_mib: old_c.disk_size_mb,
|
||||
vcpus: old_c.vcpus,
|
||||
memory_mib: old_c.memory_mb,
|
||||
price_per_unit: old_c.price_per_unit,
|
||||
locked_nano: old_c.locked_nano,
|
||||
created_at: old_c.created_at.into(),
|
||||
collected_at: old_c.collected_at.into(),
|
||||
app_name: old_c.app_name.clone(),
|
||||
mr_enclave: mr_enclave_hex,
|
||||
package_url: old_c.package_url.clone(),
|
||||
hratls_pubkey: old_c.hratls_pubkey.clone(),
|
||||
});
|
||||
}
|
||||
contracts
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct DeletedApp {
|
||||
pub id: RecordId,
|
||||
|
@ -3,7 +3,6 @@
|
||||
use super::Error;
|
||||
use crate::constants::{ACCOUNT, BAN, KICK, TOKEN_DECIMAL, VM_NODE};
|
||||
use crate::db::prelude::*;
|
||||
use crate::old_brain;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use surrealdb::engine::remote::ws::Client;
|
||||
use surrealdb::sql::Datetime;
|
||||
@ -147,27 +146,6 @@ impl Account {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&old_brain::BrainData> for Vec<Account> {
|
||||
fn from(old_data: &old_brain::BrainData) -> Self {
|
||||
let mut accounts = Vec::new();
|
||||
for old_account in old_data.accounts.iter() {
|
||||
let mut a = Account {
|
||||
id: RecordId::from((ACCOUNT, old_account.key())),
|
||||
balance: old_account.value().balance,
|
||||
tmp_locked: old_account.value().tmp_locked,
|
||||
escrow: 0,
|
||||
email: String::new(),
|
||||
};
|
||||
if let Some(operator) = old_data.operators.get(old_account.key()) {
|
||||
a.escrow = operator.escrow;
|
||||
a.email = operator.email.clone();
|
||||
}
|
||||
accounts.push(a);
|
||||
}
|
||||
accounts
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct Ban {
|
||||
#[serde(rename = "in")]
|
||||
@ -339,8 +317,8 @@ impl Operator {
|
||||
let operator = Self::inspect(db, account).await?;
|
||||
let mut result = db
|
||||
.query(format!(
|
||||
"select *, operator, <-report.* as reports from vm_node
|
||||
where operator = account:{account};"
|
||||
"select *, operator, <-report.* as reports, <-offers.* as offers
|
||||
from vm_node where operator = account:{account};"
|
||||
))
|
||||
.query(format!(
|
||||
"select *, operator, <-report.* as reports from app_node
|
||||
|
@ -5,10 +5,9 @@ pub mod general;
|
||||
pub mod vm;
|
||||
|
||||
use crate::constants::{
|
||||
APP_NODE, DB_SCHEMA_FILES, DEFAULT_ENDPOINT, DELETED_APP, DELETED_VM, NEW_APP_REQ, NEW_VM_REQ,
|
||||
UPDATE_VM_REQ,
|
||||
APP_NODE, DEFAULT_ENDPOINT, DELETED_APP, DELETED_VM, MIGRATION_0_MOCK_DATA, MIGRATION_0_SCHEMA,
|
||||
MIGRATION_1_PATCH, MIGRATION_1_SCHEMA, NEW_APP_REQ, NEW_VM_REQ, UPDATE_VM_REQ,
|
||||
};
|
||||
use crate::old_brain;
|
||||
use prelude::*;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use surrealdb::engine::remote::ws::{Client, Ws};
|
||||
@ -77,34 +76,34 @@ pub async fn db_connection(
|
||||
Ok(db_connection)
|
||||
}
|
||||
|
||||
pub async fn migration0(
|
||||
db: &Surreal<Client>,
|
||||
old_data: &old_brain::BrainData,
|
||||
) -> Result<(), Error> {
|
||||
let accounts: Vec<Account> = old_data.into();
|
||||
let vm_nodes: Vec<VmNode> = old_data.into();
|
||||
let app_nodes: Vec<AppNode> = old_data.into();
|
||||
let active_vm: Vec<ActiveVm> = old_data.into();
|
||||
let active_app: Vec<ActiveApp> = old_data.into();
|
||||
// TODO: figure if we can inspect DB schema in the DB with the schema from migration0,
|
||||
// and execute migration1_patch only if it is needed.
|
||||
|
||||
for schema_data in DB_SCHEMA_FILES.map(|path| (std::fs::read_to_string(path), path)) {
|
||||
pub async fn migration0_tables(db: &Surreal<Client>) -> Result<(), Error> {
|
||||
for schema_data in MIGRATION_0_SCHEMA.map(|path| (std::fs::read_to_string(path), path)) {
|
||||
let schema_file = schema_data.1;
|
||||
println!("Loading schema from {schema_file}");
|
||||
let schema = schema_data.0?;
|
||||
db.query(schema).await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
println!("Inserting accounts...");
|
||||
let _: Vec<Account> = db.insert(()).content(accounts).await?;
|
||||
println!("Inserting vm nodes...");
|
||||
let _: Vec<VmNode> = db.insert(()).content(vm_nodes).await?;
|
||||
println!("Inserting app nodes...");
|
||||
let _: Vec<AppNode> = db.insert(()).content(app_nodes).await.unwrap();
|
||||
println!("Inserting active vm contracts...");
|
||||
let _: Vec<ActiveVm> = db.insert(()).relation(active_vm).await?;
|
||||
println!("Inserting app contracts...");
|
||||
let _: Vec<ActiveApp> = db.insert(()).relation(active_app).await?;
|
||||
pub async fn migration0_mock_data(db: &Surreal<Client>) -> Result<(), Error> {
|
||||
let patch_instructions = std::fs::read_to_string(MIGRATION_0_MOCK_DATA)?;
|
||||
db.query(patch_instructions).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn migration1_patch(db: &Surreal<Client>) -> Result<(), Error> {
|
||||
let patch_instructions = std::fs::read_to_string(MIGRATION_1_PATCH)?;
|
||||
db.query(patch_instructions).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn migration1_tables(db: &Surreal<Client>) -> Result<(), Error> {
|
||||
let patch_instructions = std::fs::read_to_string(MIGRATION_1_SCHEMA)?;
|
||||
db.query(patch_instructions).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
109
src/db/vm.rs
109
src/db/vm.rs
@ -5,11 +5,10 @@ use std::time::Duration;
|
||||
|
||||
use super::Error;
|
||||
use crate::constants::{
|
||||
ACCOUNT, ACTIVE_VM, DEFAULT_ENDPOINT, DELETED_VM, NEW_VM_REQ, UPDATE_VM_REQ, VM_DAEMON_TIMEOUT,
|
||||
VM_NODE, VM_UPDATE_EVENT,
|
||||
ACCOUNT, ACTIVE_VM, DELETED_VM, NEW_VM_REQ, UPDATE_VM_REQ, VM_DAEMON_TIMEOUT, VM_NODE,
|
||||
VM_NODE_OFFER, VM_UPDATE_EVENT,
|
||||
};
|
||||
use crate::db::{Account, ErrorFromTable, Report};
|
||||
use crate::old_brain;
|
||||
use detee_shared::vm_proto;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use surrealdb::engine::remote::ws::Client;
|
||||
@ -26,14 +25,10 @@ pub struct VmNode {
|
||||
pub region: String,
|
||||
pub city: String,
|
||||
pub ip: String,
|
||||
pub avail_mem_mib: u32,
|
||||
pub avail_vcpus: u32,
|
||||
pub avail_storage_mib: u32,
|
||||
pub avail_ipv4: u32,
|
||||
pub avail_ipv6: u32,
|
||||
pub avail_ports: u32,
|
||||
pub max_ports_per_vm: u32,
|
||||
pub price: u64,
|
||||
pub connected_at: Datetime,
|
||||
pub disconnected_at: Datetime,
|
||||
}
|
||||
@ -59,22 +54,33 @@ impl VmNode {
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub struct VmNodeResources {
|
||||
pub avail_mem_mib: u32,
|
||||
pub avail_vcpus: u32,
|
||||
pub avail_storage_mib: u32,
|
||||
pub avail_ipv4: u32,
|
||||
pub avail_ipv6: u32,
|
||||
pub avail_ports: u32,
|
||||
pub max_ports_per_vm: u32,
|
||||
pub offers: Vec<VmNodeOffer>,
|
||||
}
|
||||
|
||||
impl VmNodeResources {
|
||||
pub async fn merge(self, db: &Surreal<Client>, node_id: &str) -> Result<(), Error> {
|
||||
let offers = self.offers.clone();
|
||||
let _: Option<VmNode> = db.update((VM_NODE, node_id)).merge(self).await?;
|
||||
db.query(format!("delete from {VM_NODE_OFFER} where vm_node = vm_node:{};", node_id))
|
||||
.await?;
|
||||
let _: Vec<VmNodeOffer> = db.insert(VM_NODE_OFFER).content(offers).await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Clone)]
|
||||
pub struct VmNodeOffer {
|
||||
pub vm_node: RecordId,
|
||||
pub price: u64,
|
||||
pub memory_mib: u64,
|
||||
pub vcpus: u64,
|
||||
pub disk_mib: u64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct VmNodeWithReports {
|
||||
pub id: RecordId,
|
||||
@ -83,15 +89,12 @@ pub struct VmNodeWithReports {
|
||||
pub region: String,
|
||||
pub city: String,
|
||||
pub ip: String,
|
||||
pub avail_mem_mib: u64,
|
||||
pub avail_vcpus: u64,
|
||||
pub avail_storage_mib: u64,
|
||||
pub avail_ipv4: u32,
|
||||
pub avail_ipv6: u32,
|
||||
pub avail_ports: u32,
|
||||
pub max_ports_per_vm: u32,
|
||||
pub price: u64,
|
||||
pub reports: Vec<Report>,
|
||||
pub offers: Vec<VmNodeOffer>,
|
||||
}
|
||||
|
||||
impl VmNodeWithReports {
|
||||
@ -100,7 +103,8 @@ impl VmNodeWithReports {
|
||||
filters: vm_proto::VmNodeFilters,
|
||||
) -> Result<Vec<Self>, Error> {
|
||||
let mut query = format!(
|
||||
"select *, <-report.* as reports from {VM_NODE} where
|
||||
"select *, <-report.* as reports, <-offers.* as offers
|
||||
from {VM_NODE} where
|
||||
avail_ports >= {} &&
|
||||
max_ports_per_vm >= {} &&
|
||||
avail_ipv4 >= {} &&
|
||||
@ -133,6 +137,20 @@ impl VmNodeWithReports {
|
||||
let vm_nodes: Vec<Self> = result.take(0)?;
|
||||
Ok(vm_nodes)
|
||||
}
|
||||
|
||||
pub async fn find_by_daemon_pubkey(
|
||||
db: &Surreal<Client>,
|
||||
daemon_key: &str,
|
||||
) -> Result<Option<Self>, Error> {
|
||||
let vm_node: Option<VmNodeWithReports> = db
|
||||
.query(format!(
|
||||
"SELECT *, <-report.* as reports, <-offers.* as offers
|
||||
FROM {VM_NODE} WHERE id = {VM_NODE}:{daemon_key};"
|
||||
))
|
||||
.await?
|
||||
.take(0)?;
|
||||
Ok(vm_node)
|
||||
}
|
||||
}
|
||||
|
||||
pub enum VmDaemonMsg {
|
||||
@ -1006,64 +1024,3 @@ impl ActiveVmWithNode {
|
||||
Ok(active_vms)
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: delete all of these From implementation after migration 0 gets executed
|
||||
|
||||
impl From<&old_brain::BrainData> for Vec<VmNode> {
|
||||
fn from(old_data: &old_brain::BrainData) -> Self {
|
||||
let mut nodes = Vec::new();
|
||||
for old_node in old_data.vm_nodes.iter() {
|
||||
nodes.push(VmNode {
|
||||
id: RecordId::from((VM_NODE, old_node.public_key.clone())),
|
||||
operator: RecordId::from((ACCOUNT, old_node.operator_wallet.clone())),
|
||||
pub_sub_node: DEFAULT_ENDPOINT.to_string(),
|
||||
country: old_node.country.clone(),
|
||||
region: old_node.region.clone(),
|
||||
city: old_node.city.clone(),
|
||||
ip: old_node.ip.clone(),
|
||||
avail_mem_mib: old_node.avail_mem_mb,
|
||||
avail_vcpus: old_node.avail_vcpus,
|
||||
avail_storage_mib: old_node.avail_storage_gbs * 1024,
|
||||
avail_ipv4: old_node.avail_ipv4,
|
||||
avail_ipv6: old_node.avail_ipv6,
|
||||
avail_ports: old_node.avail_ports,
|
||||
max_ports_per_vm: old_node.max_ports_per_vm,
|
||||
price: old_node.price,
|
||||
disconnected_at: Datetime::default(),
|
||||
connected_at: Datetime::default(),
|
||||
});
|
||||
}
|
||||
nodes
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&old_brain::BrainData> for Vec<ActiveVm> {
|
||||
fn from(old_data: &old_brain::BrainData) -> Self {
|
||||
let mut contracts = Vec::new();
|
||||
for old_c in old_data.vm_contracts.iter() {
|
||||
let mut mapped_ports = Vec::new();
|
||||
for port in old_c.exposed_ports.iter() {
|
||||
mapped_ports.push((*port, 8080u32));
|
||||
}
|
||||
contracts.push(ActiveVm {
|
||||
id: RecordId::from((ACTIVE_VM, old_c.uuid.replace("-", ""))),
|
||||
admin: RecordId::from((ACCOUNT, old_c.admin_pubkey.clone())),
|
||||
vm_node: RecordId::from((VM_NODE, old_c.node_pubkey.clone())),
|
||||
hostname: old_c.hostname.clone(),
|
||||
mapped_ports,
|
||||
public_ipv4: old_c.public_ipv4.clone(),
|
||||
public_ipv6: old_c.public_ipv6.clone(),
|
||||
disk_size_mib: old_c.disk_size_gb * 1024,
|
||||
vcpus: old_c.vcpus,
|
||||
memory_mib: old_c.memory_mb,
|
||||
dtrfs_sha: old_c.dtrfs_sha.clone(),
|
||||
kernel_sha: old_c.kernel_sha.clone(),
|
||||
price_per_unit: old_c.price_per_unit,
|
||||
locked_nano: old_c.locked_nano,
|
||||
created_at: old_c.created_at.into(),
|
||||
collected_at: old_c.collected_at.into(),
|
||||
});
|
||||
}
|
||||
contracts
|
||||
}
|
||||
}
|
||||
|
@ -229,12 +229,18 @@ impl From<db::VmNodeWithReports> for VmNodeListResp {
|
||||
city: vm_node.city,
|
||||
ip: vm_node.ip,
|
||||
reports: vm_node.reports.iter().map(|n| n.reason.clone()).collect(),
|
||||
price: vm_node.price,
|
||||
vcpus: vm_node.avail_vcpus,
|
||||
memory_mib: vm_node.avail_mem_mib,
|
||||
disk_mib: vm_node.avail_storage_mib,
|
||||
public_ipv4: vm_node.avail_ipv4 > 0,
|
||||
public_ipv6: vm_node.avail_ipv6 > 0,
|
||||
offers: vm_node
|
||||
.offers
|
||||
.iter()
|
||||
.map(|offer| VmNodeOffer {
|
||||
price: offer.price,
|
||||
vcpus: offer.vcpus,
|
||||
memory_mib: offer.memory_mib,
|
||||
disk_mib: offer.disk_mib,
|
||||
})
|
||||
.collect(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -261,13 +267,21 @@ impl From<db::AppNodeWithReports> for AppNodeListResp {
|
||||
impl From<VmNodeResources> for db::VmNodeResources {
|
||||
fn from(res: VmNodeResources) -> Self {
|
||||
Self {
|
||||
avail_mem_mib: res.avail_memory_mib,
|
||||
avail_vcpus: res.avail_vcpus,
|
||||
avail_storage_mib: res.avail_storage_mib,
|
||||
avail_ipv4: res.avail_ipv4,
|
||||
avail_ipv6: res.avail_ipv6,
|
||||
avail_ports: res.avail_ports,
|
||||
max_ports_per_vm: res.max_ports_per_vm,
|
||||
offers: res
|
||||
.offers
|
||||
.iter()
|
||||
.map(|offer| db::VmNodeOffer {
|
||||
vm_node: RecordId::from((VM_NODE, res.node_pubkey.clone())),
|
||||
price: offer.price,
|
||||
vcpus: offer.vcpus,
|
||||
memory_mib: offer.memory_mib,
|
||||
disk_mib: offer.disk_mib,
|
||||
})
|
||||
.collect(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -50,10 +50,6 @@ impl BrainVmDaemon for VmDaemonServer {
|
||||
region: req.region,
|
||||
city: req.city,
|
||||
ip: req.main_ip,
|
||||
price: req.price,
|
||||
avail_mem_mib: 0,
|
||||
avail_vcpus: 0,
|
||||
avail_storage_mib: 0,
|
||||
avail_ipv4: 0,
|
||||
avail_ipv6: 0,
|
||||
avail_ports: 0,
|
||||
|
@ -3,4 +3,3 @@
|
||||
pub mod constants;
|
||||
pub mod db;
|
||||
pub mod grpc;
|
||||
pub mod old_brain;
|
||||
|
135
src/old_brain.rs
135
src/old_brain.rs
@ -1,135 +0,0 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
// TODO: delete this file after migration0 gets executed
|
||||
|
||||
use chrono::Utc;
|
||||
use dashmap::DashMap;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::{HashMap, HashSet};
|
||||
|
||||
use crate::constants::OLD_BRAIN_DATA_PATH;
|
||||
|
||||
#[derive(Clone, Default, Serialize, Deserialize, Debug)]
|
||||
pub struct AccountData {
|
||||
pub balance: u64,
|
||||
pub tmp_locked: u64,
|
||||
// holds reasons why VMs of this account got kicked
|
||||
pub kicked_for: Vec<String>,
|
||||
pub last_kick: chrono::DateTime<Utc>,
|
||||
// holds accounts that banned this account
|
||||
pub banned_by: HashSet<String>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Default, Serialize, Deserialize)]
|
||||
pub struct OperatorData {
|
||||
pub escrow: u64,
|
||||
pub email: String,
|
||||
pub banned_users: HashSet<String>,
|
||||
pub vm_nodes: HashSet<String>,
|
||||
pub app_nodes: HashSet<String>,
|
||||
}
|
||||
|
||||
#[derive(Eq, PartialEq, Clone, Debug, Default, Serialize, Deserialize)]
|
||||
pub struct VmNode {
|
||||
pub public_key: String,
|
||||
pub operator_wallet: String,
|
||||
pub country: String,
|
||||
pub region: String,
|
||||
pub city: String,
|
||||
pub ip: String,
|
||||
pub avail_mem_mb: u32,
|
||||
pub avail_vcpus: u32,
|
||||
pub avail_storage_gbs: u32,
|
||||
pub avail_ipv4: u32,
|
||||
pub avail_ipv6: u32,
|
||||
pub avail_ports: u32,
|
||||
pub max_ports_per_vm: u32,
|
||||
// nanoLP per unit per minute
|
||||
pub price: u64,
|
||||
// 1st String is user wallet and 2nd String is report message
|
||||
pub reports: HashMap<String, String>,
|
||||
pub offline_minutes: u64,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct VmContract {
|
||||
pub uuid: String,
|
||||
pub hostname: String,
|
||||
pub admin_pubkey: String,
|
||||
pub node_pubkey: String,
|
||||
pub exposed_ports: Vec<u32>,
|
||||
pub public_ipv4: String,
|
||||
pub public_ipv6: String,
|
||||
pub disk_size_gb: u32,
|
||||
pub vcpus: u32,
|
||||
pub memory_mb: u32,
|
||||
pub kernel_sha: String,
|
||||
pub dtrfs_sha: String,
|
||||
pub created_at: chrono::DateTime<Utc>,
|
||||
pub updated_at: chrono::DateTime<Utc>,
|
||||
// recommended value is 20000
|
||||
/// price per unit per minute
|
||||
pub price_per_unit: u64,
|
||||
pub locked_nano: u64,
|
||||
pub collected_at: chrono::DateTime<Utc>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
|
||||
pub struct AppContract {
|
||||
pub uuid: String,
|
||||
pub package_url: String,
|
||||
pub admin_pubkey: String,
|
||||
pub node_pubkey: String,
|
||||
pub mapped_ports: Vec<(u16, u16)>,
|
||||
pub host_ipv4: String,
|
||||
pub disk_size_mb: u32,
|
||||
pub vcpus: u32,
|
||||
pub memory_mb: u32,
|
||||
pub created_at: chrono::DateTime<Utc>,
|
||||
pub updated_at: chrono::DateTime<Utc>,
|
||||
// price per unit per minute
|
||||
// recommended value is 20000
|
||||
pub price_per_unit: u64,
|
||||
pub locked_nano: u64,
|
||||
pub collected_at: chrono::DateTime<Utc>,
|
||||
pub hratls_pubkey: String,
|
||||
pub public_package_mr_enclave: Option<Vec<u8>>,
|
||||
pub app_name: String,
|
||||
}
|
||||
|
||||
#[derive(Eq, Hash, PartialEq, Clone, Debug, Default, Serialize, Deserialize)]
|
||||
pub struct AppNode {
|
||||
pub node_pubkey: String,
|
||||
pub operator_wallet: String,
|
||||
pub country: String,
|
||||
pub region: String,
|
||||
pub city: String,
|
||||
pub ip: String,
|
||||
pub avail_mem_mb: u32,
|
||||
pub avail_vcpus: u32,
|
||||
pub avail_storage_mb: u32,
|
||||
pub avail_no_of_port: u32,
|
||||
pub max_ports_per_app: u32,
|
||||
// nanotokens per unit per minute
|
||||
pub price: u64,
|
||||
pub offline_minutes: u64,
|
||||
}
|
||||
|
||||
#[derive(Default, Serialize, Deserialize)]
|
||||
pub struct BrainData {
|
||||
pub accounts: DashMap<String, AccountData>,
|
||||
pub operators: DashMap<String, OperatorData>,
|
||||
pub vm_nodes: Vec<VmNode>,
|
||||
pub vm_contracts: Vec<VmContract>,
|
||||
|
||||
pub app_nodes: Vec<AppNode>,
|
||||
pub app_contracts: Vec<AppContract>,
|
||||
}
|
||||
|
||||
impl BrainData {
|
||||
pub fn load_from_disk() -> Result<Self, Box<dyn std::error::Error>> {
|
||||
let content = std::fs::read_to_string(OLD_BRAIN_DATA_PATH)?;
|
||||
let data: Self = serde_yaml::from_str(&content)?;
|
||||
Ok(data)
|
||||
}
|
||||
}
|
377
surql/migration0/mock_data.sql
Normal file
377
surql/migration0/mock_data.sql
Normal file
File diff suppressed because one or more lines are too long
158
surql/migration1/final_tables.sql
Normal file
158
surql/migration1/final_tables.sql
Normal file
@ -0,0 +1,158 @@
|
||||
-- SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
DEFINE TABLE account SCHEMAFULL;
|
||||
DEFINE FIELD balance ON TABLE account TYPE int DEFAULT 0;
|
||||
DEFINE FIELD tmp_locked ON TABLE account TYPE int DEFAULT 0;
|
||||
DEFINE FIELD escrow ON TABLE account TYPE int DEFAULT 0;
|
||||
DEFINE FIELD email ON TABLE account TYPE string DEFAULT "";
|
||||
|
||||
DEFINE TABLE vm_node SCHEMAFULL;
|
||||
DEFINE FIELD operator ON TABLE vm_node TYPE record<account>;
|
||||
DEFINE FIELD pub_sub_node ON TABLE vm_node TYPE string default "127.0.0.1:31337";
|
||||
DEFINE FIELD country ON TABLE vm_node TYPE string;
|
||||
DEFINE FIELD region ON TABLE vm_node TYPE string;
|
||||
DEFINE FIELD city ON TABLE vm_node TYPE string;
|
||||
DEFINE FIELD ip ON TABLE vm_node TYPE string;
|
||||
DEFINE FIELD avail_ipv4 ON TABLE vm_node TYPE int;
|
||||
DEFINE FIELD avail_ipv6 ON TABLE vm_node TYPE int;
|
||||
DEFINE FIELD avail_ports ON TABLE vm_node TYPE int;
|
||||
DEFINE FIELD max_ports_per_vm ON TABLE vm_node TYPE int;
|
||||
DEFINE FIELD connected_at ON TABLE vm_node TYPE datetime;
|
||||
DEFINE FIELD disconnected_at ON TABLE vm_node TYPE datetime;
|
||||
|
||||
DEFINE TABLE vm_node_offer SCHEMAFULL;
|
||||
DEFINE FIELD vm_node ON TABLE vm_node_offer TYPE record<vm_node>;
|
||||
DEFINE FIELD price ON TABLE vm_node_offer TYPE int;
|
||||
DEFINE FIELD vcpus ON TABLE vm_node_offer TYPE int;
|
||||
DEFINE FIELD memory_mib ON TABLE vm_node_offer TYPE int;
|
||||
DEFINE FIELD disk_mib ON TABLE vm_node_offer TYPE int;
|
||||
|
||||
DEFINE TABLE new_vm_req TYPE RELATION FROM account TO vm_node SCHEMAFULL;
|
||||
DEFINE FIELD hostname ON TABLE new_vm_req TYPE string;
|
||||
DEFINE FIELD extra_ports ON TABLE new_vm_req TYPE array<int>;
|
||||
DEFINE FIELD public_ipv4 ON TABLE new_vm_req TYPE bool;
|
||||
DEFINE FIELD public_ipv6 ON TABLE new_vm_req TYPE bool;
|
||||
DEFINE FIELD disk_size_mib ON TABLE new_vm_req TYPE int;
|
||||
DEFINE FIELD vcpus ON TABLE new_vm_req TYPE int;
|
||||
DEFINE FIELD memory_mib ON TABLE new_vm_req TYPE int;
|
||||
DEFINE FIELD dtrfs_sha ON TABLE new_vm_req TYPE string;
|
||||
DEFINE FIELD dtrfs_url ON TABLE new_vm_req TYPE string;
|
||||
DEFINE FIELD kernel_sha ON TABLE new_vm_req TYPE string;
|
||||
DEFINE FIELD kernel_url ON TABLE new_vm_req TYPE string;
|
||||
DEFINE FIELD created_at ON TABLE new_vm_req TYPE datetime;
|
||||
DEFINE FIELD price_per_unit ON TABLE new_vm_req TYPE int;
|
||||
DEFINE FIELD locked_nano ON TABLE new_vm_req TYPE int;
|
||||
DEFINE FIELD error ON TABLE new_vm_req TYPE string;
|
||||
|
||||
DEFINE TABLE active_vm TYPE RELATION FROM account TO vm_node SCHEMAFULL;
|
||||
DEFINE FIELD hostname ON TABLE active_vm TYPE string;
|
||||
DEFINE FIELD mapped_ports ON TABLE active_vm TYPE array<[int, int]>;
|
||||
DEFINE FIELD public_ipv4 ON TABLE active_vm TYPE string;
|
||||
DEFINE FIELD public_ipv6 ON TABLE active_vm TYPE string;
|
||||
DEFINE FIELD disk_size_mib ON TABLE active_vm TYPE int;
|
||||
DEFINE FIELD vcpus ON TABLE active_vm TYPE int;
|
||||
DEFINE FIELD memory_mib ON TABLE active_vm TYPE int;
|
||||
DEFINE FIELD dtrfs_sha ON TABLE active_vm TYPE string;
|
||||
DEFINE FIELD kernel_sha ON TABLE active_vm TYPE string;
|
||||
DEFINE FIELD created_at ON TABLE active_vm TYPE datetime;
|
||||
DEFINE FIELD price_per_unit ON TABLE active_vm TYPE int;
|
||||
DEFINE FIELD locked_nano ON TABLE active_vm TYPE int;
|
||||
DEFINE FIELD collected_at ON TABLE active_vm TYPE datetime;
|
||||
|
||||
DEFINE TABLE update_vm_req TYPE RELATION FROM account TO vm_node SCHEMAFULL;
|
||||
DEFINE FIELD vcpus ON TABLE update_vm_req TYPE int;
|
||||
DEFINE FIELD memory_mib ON TABLE update_vm_req TYPE int;
|
||||
DEFINE FIELD disk_size_mib ON TABLE update_vm_req TYPE int;
|
||||
DEFINE FIELD dtrfs_sha ON TABLE update_vm_req TYPE string;
|
||||
DEFINE FIELD dtrfs_url ON TABLE update_vm_req TYPE string;
|
||||
DEFINE FIELD kernel_sha ON TABLE update_vm_req TYPE string;
|
||||
DEFINE FIELD kernel_url ON TABLE update_vm_req TYPE string;
|
||||
DEFINE FIELD created_at ON TABLE update_vm_req TYPE datetime;
|
||||
DEFINE FIELD error ON TABLE update_vm_req TYPE string;
|
||||
|
||||
DEFINE TABLE deleted_vm TYPE RELATION FROM account TO vm_node SCHEMAFULL;
|
||||
DEFINE FIELD hostname ON TABLE deleted_vm TYPE string;
|
||||
DEFINE FIELD mapped_ports ON TABLE deleted_vm TYPE array<[int, int]>;
|
||||
DEFINE FIELD public_ipv4 ON TABLE deleted_vm TYPE string;
|
||||
DEFINE FIELD public_ipv6 ON TABLE deleted_vm TYPE string;
|
||||
DEFINE FIELD disk_size_mib ON TABLE deleted_vm TYPE int;
|
||||
DEFINE FIELD vcpus ON TABLE deleted_vm TYPE int;
|
||||
DEFINE FIELD memory_mib ON TABLE deleted_vm TYPE int;
|
||||
DEFINE FIELD dtrfs_sha ON TABLE deleted_vm TYPE string;
|
||||
DEFINE FIELD kernel_sha ON TABLE deleted_vm TYPE string;
|
||||
DEFINE FIELD created_at ON TABLE deleted_vm TYPE datetime;
|
||||
DEFINE FIELD deleted_at ON TABLE deleted_vm TYPE datetime DEFAULT time::now();
|
||||
DEFINE FIELD price_per_unit ON TABLE deleted_vm TYPE int;
|
||||
|
||||
DEFINE TABLE app_node SCHEMAFULL;
|
||||
DEFINE FIELD operator ON TABLE app_node TYPE record<account>;
|
||||
DEFINE FIELD pub_sub_node ON TABLE app_node TYPE string default "127.0.0.1:31337";
|
||||
DEFINE FIELD country ON TABLE app_node TYPE string;
|
||||
DEFINE FIELD region ON TABLE app_node TYPE string;
|
||||
DEFINE FIELD city ON TABLE app_node TYPE string;
|
||||
DEFINE FIELD ip ON TABLE app_node TYPE string;
|
||||
DEFINE FIELD avail_mem_mib ON TABLE app_node TYPE int;
|
||||
DEFINE FIELD avail_vcpus ON TABLE app_node TYPE int;
|
||||
DEFINE FIELD avail_storage_mib ON TABLE app_node TYPE int;
|
||||
DEFINE FIELD avail_ports ON TABLE app_node TYPE int;
|
||||
DEFINE FIELD max_ports_per_app ON TABLE app_node TYPE int;
|
||||
DEFINE FIELD price ON TABLE app_node TYPE int;
|
||||
DEFINE FIELD connected_at ON TABLE app_node TYPE datetime;
|
||||
DEFINE FIELD disconnected_at ON TABLE app_node TYPE datetime;
|
||||
|
||||
DEFINE TABLE new_app_req Type RELATION FROM account to app_node SCHEMAFULL;
|
||||
DEFINE FIELD app_name ON TABLE new_app_req TYPE string;
|
||||
DEFINE FIELD package_url ON TABLE new_app_req TYPE string;
|
||||
DEFINE FIELD mr_enclave ON TABLE new_app_req TYPE string;
|
||||
DEFINE FIELD hratls_pubkey ON TABLE new_app_req TYPE string;
|
||||
DEFINE FIELD ports ON TABLE new_app_req TYPE array<int>;
|
||||
DEFINE FIELD memory_mib ON TABLE new_app_req TYPE int;
|
||||
DEFINE FIELD vcpus ON TABLE new_app_req TYPE int;
|
||||
DEFINE FIELD disk_size_mib ON TABLE new_app_req TYPE int;
|
||||
DEFINE FIELD locked_nano ON TABLE new_app_req TYPE int;
|
||||
DEFINE FIELD price_per_unit ON TABLE new_app_req TYPE int;
|
||||
DEFINE FIELD error ON TABLE new_app_req TYPE string;
|
||||
DEFINE FIELD created_at ON TABLE new_app_req TYPE datetime;
|
||||
|
||||
DEFINE TABLE active_app TYPE RELATION FROM account TO app_node SCHEMAFULL;
|
||||
DEFINE FIELD app_name ON TABLE active_app TYPE string;
|
||||
DEFINE FIELD mapped_ports ON TABLE active_app TYPE array<[int, int]>;
|
||||
DEFINE FIELD host_ipv4 ON TABLE active_app TYPE string;
|
||||
DEFINE FIELD vcpus ON TABLE active_app TYPE int;
|
||||
DEFINE FIELD memory_mib ON TABLE active_app TYPE int;
|
||||
DEFINE FIELD disk_size_mib ON TABLE active_app TYPE int;
|
||||
DEFINE FIELD created_at ON TABLE active_app TYPE datetime;
|
||||
DEFINE FIELD price_per_unit ON TABLE active_app TYPE int;
|
||||
DEFINE FIELD locked_nano ON TABLE active_app TYPE int;
|
||||
DEFINE FIELD collected_at ON TABLE active_app TYPE datetime;
|
||||
DEFINE FIELD mr_enclave ON TABLE active_app TYPE string;
|
||||
DEFINE FIELD package_url ON TABLE active_app TYPE string;
|
||||
DEFINE FIELD hratls_pubkey ON TABLE active_app TYPE string;
|
||||
|
||||
DEFINE TABLE deleted_app TYPE RELATION FROM account TO app_node SCHEMAFULL;
|
||||
DEFINE FIELD app_name ON TABLE deleted_app TYPE string;
|
||||
DEFINE FIELD mapped_ports ON TABLE deleted_app TYPE array<[int, int]>;
|
||||
DEFINE FIELD host_ipv4 ON TABLE deleted_app TYPE string;
|
||||
DEFINE FIELD vcpus ON TABLE deleted_app TYPE int;
|
||||
DEFINE FIELD memory_mib ON TABLE deleted_app TYPE int;
|
||||
DEFINE FIELD disk_size_mib ON TABLE deleted_app TYPE int;
|
||||
DEFINE FIELD created_at ON TABLE deleted_app TYPE datetime;
|
||||
DEFINE FIELD deleted_at ON TABLE deleted_app TYPE datetime DEFAULT time::now();
|
||||
DEFINE FIELD price_per_unit ON TABLE deleted_app TYPE int;
|
||||
DEFINE FIELD mr_enclave ON TABLE deleted_app TYPE string;
|
||||
DEFINE FIELD package_url ON TABLE deleted_app TYPE string;
|
||||
DEFINE FIELD hratls_pubkey ON TABLE deleted_app TYPE string;
|
||||
|
||||
DEFINE TABLE ban TYPE RELATION FROM account TO account;
|
||||
DEFINE FIELD created_at ON TABLE ban TYPE datetime DEFAULT time::now();;
|
||||
|
||||
DEFINE TABLE kick TYPE RELATION FROM account TO account;
|
||||
DEFINE FIELD created_at ON TABLE kick TYPE datetime;
|
||||
DEFINE FIELD reason ON TABLE kick TYPE string;
|
||||
DEFINE FIELD contract ON TABLE kick TYPE record<deleted_vm|deleted_app>;
|
||||
DEFINE FIELD node ON TABLE kick TYPE record<vm_node|app_node>;
|
||||
|
||||
DEFINE TABLE report TYPE RELATION FROM account TO vm_node|app_node;
|
||||
DEFINE FIELD created_at ON TABLE report TYPE datetime;
|
||||
DEFINE FIELD reason ON TABLE report TYPE string;
|
||||
DEFINE FIELD contract_id ON TABLE report TYPE string;
|
14
surql/migration1/patch_tables.sql
Normal file
14
surql/migration1/patch_tables.sql
Normal file
@ -0,0 +1,14 @@
|
||||
-- SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
REMOVE FIELD price ON TABLE vm_node;
|
||||
REMOVE FIELD avail_vcpus ON TABLE vm_node;
|
||||
REMOVE FIELD avail_storage_mib ON TABLE vm_node;
|
||||
REMOVE FIELD avail_mem_mib ON TABLE vm_node;
|
||||
UPDATE vm_node;
|
||||
|
||||
DEFINE TABLE vm_node_offer SCHEMAFULL;
|
||||
DEFINE FIELD vm_node ON TABLE vm_node_offer TYPE record<vm_node>;
|
||||
DEFINE FIELD price ON TABLE vm_node_offer TYPE int;
|
||||
DEFINE FIELD vcpus ON TABLE vm_node_offer TYPE int;
|
||||
DEFINE FIELD memory_mib ON TABLE vm_node_offer TYPE int;
|
||||
DEFINE FIELD disk_mib ON TABLE vm_node_offer TYPE int;
|
@ -9,7 +9,6 @@ use detee_shared::vm_proto::brain_vm_daemon_server::BrainVmDaemonServer;
|
||||
use dotenv::dotenv;
|
||||
use hyper_util::rt::TokioIo;
|
||||
use std::sync::Arc;
|
||||
use surreal_brain::constants::DB_SCHEMA_FILES;
|
||||
use surreal_brain::grpc::app::{AppCliServer, AppDaemonServer};
|
||||
use surreal_brain::grpc::general::GeneralCliServer;
|
||||
use surreal_brain::grpc::vm::{VmCliServer, VmDaemonServer};
|
||||
@ -34,15 +33,9 @@ pub async fn prepare_test_db() -> Result<Surreal<Client>> {
|
||||
let db = surreal_brain::db::db_connection(&db_url, &db_user, &db_pass, db_ns, db_name).await?;
|
||||
DB_STATE
|
||||
.get_or_init(|| async {
|
||||
let raw_mock_data = std::fs::read_to_string("tests/mock_data.yaml")?;
|
||||
let mock_data: surreal_brain::old_brain::BrainData =
|
||||
serde_yaml::from_str(&raw_mock_data)?;
|
||||
|
||||
db.query(format!("REMOVE DATABASE {db_name}")).await?;
|
||||
for schema in DB_SCHEMA_FILES.map(std::fs::read_to_string) {
|
||||
db.query(schema?).await?;
|
||||
}
|
||||
surreal_brain::db::migration0(&db, &mock_data).await?;
|
||||
surreal_brain::db::migration0_mock_data(&db).await?;
|
||||
surreal_brain::db::migration1_patch(&db).await?;
|
||||
Ok::<(), anyhow::Error>(())
|
||||
})
|
||||
.await;
|
||||
|
@ -57,7 +57,6 @@ pub async fn register_vm_node(
|
||||
city: ip_info.city,
|
||||
country: ip_info.country,
|
||||
region: ip_info.region,
|
||||
price: 1200,
|
||||
};
|
||||
|
||||
let mut grpc_stream = client.register_vm_node(key.sign_request(req)?).await?.into_inner();
|
||||
|
@ -12,7 +12,7 @@ use detee_shared::general_proto::brain_general_cli_client::BrainGeneralCliClient
|
||||
use detee_shared::general_proto::{AirdropReq, BanUserReq, SlashReq};
|
||||
use detee_shared::vm_proto::brain_vm_daemon_client::BrainVmDaemonClient;
|
||||
use futures::StreamExt;
|
||||
use surreal_brain::constants::{ACCOUNT, ACTIVE_APP, ACTIVE_VM, BAN, TOKEN_DECIMAL, VM_NODE};
|
||||
use surreal_brain::constants::{ACCOUNT, ACTIVE_APP, ACTIVE_VM, BAN, TOKEN_DECIMAL};
|
||||
use surreal_brain::db::prelude as db;
|
||||
use surreal_brain::db::vm::VmNodeWithReports;
|
||||
|
||||
@ -145,18 +145,10 @@ async fn test_report_node() {
|
||||
.unwrap()
|
||||
.into_inner();
|
||||
|
||||
let vm_nodes: Vec<VmNodeWithReports> = db
|
||||
.query(format!(
|
||||
"SELECT *, <-report.* as reports FROM {VM_NODE} WHERE id = {VM_NODE}:{daemon_key};"
|
||||
))
|
||||
.await
|
||||
.unwrap()
|
||||
.take(0)
|
||||
.unwrap();
|
||||
|
||||
let vm_node_with_report = &vm_nodes[0];
|
||||
|
||||
assert!(vm_node_with_report.reports[0].reason == reason);
|
||||
let vm_node = VmNodeWithReports::find_by_daemon_pubkey(&db, &daemon_key).await.unwrap();
|
||||
assert!(vm_node.is_some());
|
||||
let vm_node = vm_node.unwrap();
|
||||
assert!(vm_node.reports[0].reason == reason);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
|
@ -8,6 +8,7 @@ use detee_shared::vm_proto::brain_vm_cli_client::BrainVmCliClient;
|
||||
use detee_shared::vm_proto::brain_vm_daemon_client::BrainVmDaemonClient;
|
||||
use surreal_brain::constants::VM_NODE;
|
||||
use surreal_brain::db::prelude as db;
|
||||
use surreal_brain::db::vm::VmNodeWithReports;
|
||||
use tokio_stream::wrappers::ReceiverStream;
|
||||
|
||||
mod common;
|
||||
@ -90,10 +91,11 @@ async fn test_vm_daemon_resource_msg() {
|
||||
avail_ports: 5,
|
||||
avail_ipv4: 2,
|
||||
avail_ipv6: 88,
|
||||
avail_vcpus: 4,
|
||||
avail_memory_mib: 8192,
|
||||
avail_storage_mib: 102400,
|
||||
max_ports_per_vm: 5,
|
||||
offers: vec![
|
||||
vm_proto::VmNodeOffer { price: 1200, vcpus: 4, memory_mib: 8192, disk_mib: 102400 },
|
||||
vm_proto::VmNodeOffer { price: 750, vcpus: 1, memory_mib: 8192, disk_mib: 1002400 },
|
||||
],
|
||||
};
|
||||
|
||||
let req_data_copy = req_data.clone();
|
||||
@ -109,26 +111,26 @@ async fn test_vm_daemon_resource_msg() {
|
||||
|
||||
tokio::time::sleep(tokio::time::Duration::from_millis(300)).await;
|
||||
|
||||
let vm_node_opt: Option<db::VmNode> = db.select((VM_NODE, daemon_key.pubkey)).await.unwrap();
|
||||
let vm_node_opt: Option<db::VmNode> =
|
||||
db.select((VM_NODE, daemon_key.pubkey.clone())).await.unwrap();
|
||||
|
||||
assert!(vm_node_opt.is_some());
|
||||
|
||||
let db::VmNode {
|
||||
avail_mem_mib,
|
||||
avail_vcpus,
|
||||
avail_storage_mib,
|
||||
avail_ports,
|
||||
avail_ipv4,
|
||||
avail_ipv6,
|
||||
max_ports_per_vm,
|
||||
..
|
||||
} = vm_node_opt.unwrap();
|
||||
let db::VmNode { avail_ports, avail_ipv4, avail_ipv6, max_ports_per_vm, .. } =
|
||||
vm_node_opt.unwrap();
|
||||
|
||||
assert_eq!(avail_mem_mib, req_data.avail_memory_mib);
|
||||
assert_eq!(avail_vcpus, req_data.avail_vcpus);
|
||||
assert_eq!(avail_storage_mib, req_data.avail_storage_mib);
|
||||
assert_eq!(avail_ports, req_data.avail_ports);
|
||||
assert_eq!(avail_ipv4, req_data.avail_ipv4);
|
||||
assert_eq!(avail_ipv6, req_data.avail_ipv6);
|
||||
assert_eq!(max_ports_per_vm, req_data.max_ports_per_vm);
|
||||
|
||||
let vm_node_with_offers =
|
||||
VmNodeWithReports::find_by_daemon_pubkey(&db, &daemon_key.pubkey).await.unwrap().unwrap();
|
||||
|
||||
assert_eq!(avail_ports, vm_node_with_offers.avail_ports);
|
||||
assert_eq!(avail_ipv4, vm_node_with_offers.avail_ipv4);
|
||||
assert_eq!(avail_ipv6, vm_node_with_offers.avail_ipv6);
|
||||
assert_eq!(max_ports_per_vm, vm_node_with_offers.max_ports_per_vm);
|
||||
assert_eq!(1200, vm_node_with_offers.offers[0].price);
|
||||
assert_eq!(4, vm_node_with_offers.offers[0].vcpus);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user