enable multiple offers per VM node

This commit is contained in:
ghe0 2025-07-06 01:02:11 +03:00
parent 9e3a39545d
commit c1d906fbd8
Signed by: ghe0
GPG Key ID: 451028EE56A0FBB4
15 changed files with 193 additions and 324 deletions

2
Cargo.lock generated

@ -1011,7 +1011,7 @@ dependencies = [
[[package]]
name = "detee-shared"
version = "0.1.0"
source = "git+ssh://git@gitea.detee.cloud/testnet/proto?branch=credits_app#01e93d3a2e4502c0e8e72026e8a1c55810961815"
source = "git+ssh://git@gitea.detee.cloud/testnet/proto?branch=offers#4753a17fa29393b3f99b6dfcdcec48d935e6ebd9"
dependencies = [
"bincode 2.0.1",
"prost",

@ -15,7 +15,7 @@ serde_yaml = "0.9.34"
surrealdb = "2.2.2"
tokio = { version = "1.44.2", features = ["macros", "rt-multi-thread"] }
tonic = { version = "0.12", features = ["tls"] }
detee-shared = { git = "ssh://git@gitea.detee.cloud/testnet/proto", branch = "credits_app" }
detee-shared = { git = "ssh://git@gitea.detee.cloud/testnet/proto", branch = "offers" }
ed25519-dalek = "2.1.1"
bs58 = "0.5.1"
tokio-stream = "0.1.17"

@ -9,7 +9,7 @@ use surreal_brain::{db, old_brain};
#[tokio::main]
async fn main() -> Result<(), Box<dyn Error>> {
dotenv().ok();
let old_brain_data = old_brain::BrainData::load_from_disk()?;
// let old_brain_data = old_brain::BrainData::load_from_disk()?;
// println!("{}", serde_yaml::to_string(&old_brain_data)?);
let db_url = std::env::var("DB_URL").expect("DB_URL not set in .env");
@ -21,7 +21,7 @@ async fn main() -> Result<(), Box<dyn Error>> {
let db = db::db_connection(&db_url, &db_user, &db_pass, &db_ns, &db_name).await.unwrap();
env_logger::builder().filter_level(log::LevelFilter::Trace);
db::migration0(&db, &old_brain_data).await?;
db::migration1(&db).await?;
Ok(())
}

@ -8,7 +8,9 @@ pub const CERT_PATH: &str = "/etc/detee/brain/brain-crt.pem";
pub const CERT_KEY_PATH: &str = "/etc/detee/brain/brain-key.pem";
pub const CONFIG_PATH: &str = "/etc/detee/brain/config.ini";
pub const DB_SCHEMA_FILES: [&str; 2] = ["surql/tables.sql", "surql/functions.sql"];
pub const MIGRATION_0_SCHEMA: [&str; 2] = ["surql/migration0/tables.sql", "surql/migration0/functions.sql"];
pub const MIGRATION_1_PATCH: &str = "surql/migration1/patch_tables.sql";
pub const MIGRATION_1_SCHEMA: &str = "surql/migration1/final_tables.sql";
pub static ADMIN_ACCOUNTS: LazyLock<Vec<String>> = LazyLock::new(|| {
let default_admin_keys = vec![
@ -23,8 +25,6 @@ pub static ADMIN_ACCOUNTS: LazyLock<Vec<String>> = LazyLock::new(|| {
.unwrap_or(default_admin_keys)
});
pub const OLD_BRAIN_DATA_PATH: &str = "./saved_data.yaml";
pub const ACCOUNT: &str = "account";
pub const KICK: &str = "kick";
pub const BAN: &str = "ban";

@ -7,7 +7,7 @@ use crate::constants::{
ACCOUNT, ACTIVE_APP, APP_DAEMON_TIMEOUT, APP_NODE, DEFAULT_ENDPOINT, DELETED_APP, NEW_APP_REQ,
};
use crate::db::general::Report;
use crate::{db, old_brain};
use crate::db;
use detee_shared::app_proto::{self, NewAppRes};
use serde::{Deserialize, Serialize};
use surrealdb::engine::remote::ws::Client;
@ -631,72 +631,6 @@ impl AppNodeResources {
}
}
impl From<&old_brain::BrainData> for Vec<AppNode> {
fn from(old_data: &old_brain::BrainData) -> Self {
let mut nodes = Vec::new();
for old_node in old_data.app_nodes.iter() {
nodes.push(AppNode {
id: RecordId::from((APP_NODE, old_node.node_pubkey.clone())),
operator: RecordId::from((ACCOUNT, old_node.operator_wallet.clone())),
pub_sub_node: DEFAULT_ENDPOINT.to_string(),
country: old_node.country.clone(),
region: old_node.region.clone(),
city: old_node.city.clone(),
ip: old_node.ip.clone(),
avail_mem_mib: old_node.avail_mem_mb,
avail_vcpus: old_node.avail_vcpus,
avail_storage_mib: old_node.avail_storage_mb,
avail_ports: old_node.avail_no_of_port,
max_ports_per_app: old_node.max_ports_per_app,
price: old_node.price,
disconnected_at: Datetime::default(),
connected_at: Datetime::default(),
});
}
nodes
}
}
impl From<&old_brain::BrainData> for Vec<ActiveApp> {
fn from(old_data: &old_brain::BrainData) -> Self {
let mut contracts = Vec::new();
for old_c in old_data.app_contracts.iter() {
let mut mapped_ports = Vec::new();
for port in old_c.mapped_ports.clone().into_iter().map(|(b, c)| (b as u32, c as u32)) {
mapped_ports.push(port);
}
let mr_enclave_hex = old_c
.public_package_mr_enclave
.clone()
.unwrap_or_default()
.iter()
.map(|byte| format!("{byte:02X}"))
.collect();
contracts.push(ActiveApp {
id: RecordId::from((ACTIVE_APP, old_c.uuid.replace("-", ""))),
admin: RecordId::from((ACCOUNT, old_c.admin_pubkey.clone())),
app_node: RecordId::from((APP_NODE, old_c.node_pubkey.clone())),
mapped_ports,
host_ipv4: old_c.host_ipv4.clone(),
disk_size_mib: old_c.disk_size_mb,
vcpus: old_c.vcpus,
memory_mib: old_c.memory_mb,
price_per_unit: old_c.price_per_unit,
locked_nano: old_c.locked_nano,
created_at: old_c.created_at.into(),
collected_at: old_c.collected_at.into(),
app_name: old_c.app_name.clone(),
mr_enclave: mr_enclave_hex,
package_url: old_c.package_url.clone(),
hratls_pubkey: old_c.hratls_pubkey.clone(),
});
}
contracts
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct DeletedApp {
pub id: RecordId,

@ -3,7 +3,6 @@
use super::Error;
use crate::constants::{ACCOUNT, BAN, KICK, TOKEN_DECIMAL, VM_NODE};
use crate::db::prelude::*;
use crate::old_brain;
use serde::{Deserialize, Serialize};
use surrealdb::engine::remote::ws::Client;
use surrealdb::sql::Datetime;
@ -147,27 +146,6 @@ impl Account {
}
}
impl From<&old_brain::BrainData> for Vec<Account> {
fn from(old_data: &old_brain::BrainData) -> Self {
let mut accounts = Vec::new();
for old_account in old_data.accounts.iter() {
let mut a = Account {
id: RecordId::from((ACCOUNT, old_account.key())),
balance: old_account.value().balance,
tmp_locked: old_account.value().tmp_locked,
escrow: 0,
email: String::new(),
};
if let Some(operator) = old_data.operators.get(old_account.key()) {
a.escrow = operator.escrow;
a.email = operator.email.clone();
}
accounts.push(a);
}
accounts
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct Ban {
#[serde(rename = "in")]

@ -5,10 +5,9 @@ pub mod general;
pub mod vm;
use crate::constants::{
APP_NODE, DB_SCHEMA_FILES, DEFAULT_ENDPOINT, DELETED_APP, DELETED_VM, NEW_APP_REQ, NEW_VM_REQ,
UPDATE_VM_REQ,
APP_NODE, DEFAULT_ENDPOINT, DELETED_APP, DELETED_VM, MIGRATION_1_PATCH,
MIGRATION_1_SCHEMA, NEW_APP_REQ, NEW_VM_REQ, UPDATE_VM_REQ,
};
use crate::old_brain;
use prelude::*;
use serde::{Deserialize, Serialize};
use surrealdb::engine::remote::ws::{Client, Ws};
@ -77,34 +76,18 @@ pub async fn db_connection(
Ok(db_connection)
}
pub async fn migration0(
db: &Surreal<Client>,
old_data: &old_brain::BrainData,
) -> Result<(), Error> {
let accounts: Vec<Account> = old_data.into();
let vm_nodes: Vec<VmNode> = old_data.into();
let app_nodes: Vec<AppNode> = old_data.into();
let active_vm: Vec<ActiveVm> = old_data.into();
let active_app: Vec<ActiveApp> = old_data.into();
// TODO: figure if we can inspect DB schema in the DB with the schema from migration0,
// and execute migration1_patch only if it is needed.
for schema_data in DB_SCHEMA_FILES.map(|path| (std::fs::read_to_string(path), path)) {
let schema_file = schema_data.1;
println!("Loading schema from {schema_file}");
let schema = schema_data.0?;
db.query(schema).await?;
}
println!("Inserting accounts...");
let _: Vec<Account> = db.insert(()).content(accounts).await?;
println!("Inserting vm nodes...");
let _: Vec<VmNode> = db.insert(()).content(vm_nodes).await?;
println!("Inserting app nodes...");
let _: Vec<AppNode> = db.insert(()).content(app_nodes).await.unwrap();
println!("Inserting active vm contracts...");
let _: Vec<ActiveVm> = db.insert(()).relation(active_vm).await?;
println!("Inserting app contracts...");
let _: Vec<ActiveApp> = db.insert(()).relation(active_app).await?;
pub async fn migration1_patch(db: &Surreal<Client>) -> Result<(), Error> {
let patch_instructions = std::fs::read_to_string(MIGRATION_1_PATCH)?;
db.query(patch_instructions).await?;
Ok(())
}
pub async fn migration1_tables(db: &Surreal<Client>) -> Result<(), Error> {
let patch_instructions = std::fs::read_to_string(MIGRATION_1_SCHEMA)?;
db.query(patch_instructions).await?;
Ok(())
}

@ -9,7 +9,6 @@ use crate::constants::{
VM_NODE, VM_UPDATE_EVENT,
};
use crate::db::{Account, ErrorFromTable, Report};
use crate::old_brain;
use detee_shared::vm_proto;
use serde::{Deserialize, Serialize};
use surrealdb::engine::remote::ws::Client;
@ -1007,63 +1006,3 @@ impl ActiveVmWithNode {
}
}
// TODO: delete all of these From implementation after migration 0 gets executed
impl From<&old_brain::BrainData> for Vec<VmNode> {
fn from(old_data: &old_brain::BrainData) -> Self {
let mut nodes = Vec::new();
for old_node in old_data.vm_nodes.iter() {
nodes.push(VmNode {
id: RecordId::from((VM_NODE, old_node.public_key.clone())),
operator: RecordId::from((ACCOUNT, old_node.operator_wallet.clone())),
pub_sub_node: DEFAULT_ENDPOINT.to_string(),
country: old_node.country.clone(),
region: old_node.region.clone(),
city: old_node.city.clone(),
ip: old_node.ip.clone(),
avail_mem_mib: old_node.avail_mem_mb,
avail_vcpus: old_node.avail_vcpus,
avail_storage_mib: old_node.avail_storage_gbs * 1024,
avail_ipv4: old_node.avail_ipv4,
avail_ipv6: old_node.avail_ipv6,
avail_ports: old_node.avail_ports,
max_ports_per_vm: old_node.max_ports_per_vm,
price: old_node.price,
disconnected_at: Datetime::default(),
connected_at: Datetime::default(),
});
}
nodes
}
}
impl From<&old_brain::BrainData> for Vec<ActiveVm> {
fn from(old_data: &old_brain::BrainData) -> Self {
let mut contracts = Vec::new();
for old_c in old_data.vm_contracts.iter() {
let mut mapped_ports = Vec::new();
for port in old_c.exposed_ports.iter() {
mapped_ports.push((*port, 8080u32));
}
contracts.push(ActiveVm {
id: RecordId::from((ACTIVE_VM, old_c.uuid.replace("-", ""))),
admin: RecordId::from((ACCOUNT, old_c.admin_pubkey.clone())),
vm_node: RecordId::from((VM_NODE, old_c.node_pubkey.clone())),
hostname: old_c.hostname.clone(),
mapped_ports,
public_ipv4: old_c.public_ipv4.clone(),
public_ipv6: old_c.public_ipv6.clone(),
disk_size_mib: old_c.disk_size_gb * 1024,
vcpus: old_c.vcpus,
memory_mib: old_c.memory_mb,
dtrfs_sha: old_c.dtrfs_sha.clone(),
kernel_sha: old_c.kernel_sha.clone(),
price_per_unit: old_c.price_per_unit,
locked_nano: old_c.locked_nano,
created_at: old_c.created_at.into(),
collected_at: old_c.collected_at.into(),
});
}
contracts
}
}

@ -3,4 +3,3 @@
pub mod constants;
pub mod db;
pub mod grpc;
pub mod old_brain;

@ -1,135 +0,0 @@
// SPDX-License-Identifier: Apache-2.0
// TODO: delete this file after migration0 gets executed
use chrono::Utc;
use dashmap::DashMap;
use serde::{Deserialize, Serialize};
use std::collections::{HashMap, HashSet};
use crate::constants::OLD_BRAIN_DATA_PATH;
#[derive(Clone, Default, Serialize, Deserialize, Debug)]
pub struct AccountData {
pub balance: u64,
pub tmp_locked: u64,
// holds reasons why VMs of this account got kicked
pub kicked_for: Vec<String>,
pub last_kick: chrono::DateTime<Utc>,
// holds accounts that banned this account
pub banned_by: HashSet<String>,
}
#[derive(Clone, Default, Serialize, Deserialize)]
pub struct OperatorData {
pub escrow: u64,
pub email: String,
pub banned_users: HashSet<String>,
pub vm_nodes: HashSet<String>,
pub app_nodes: HashSet<String>,
}
#[derive(Eq, PartialEq, Clone, Debug, Default, Serialize, Deserialize)]
pub struct VmNode {
pub public_key: String,
pub operator_wallet: String,
pub country: String,
pub region: String,
pub city: String,
pub ip: String,
pub avail_mem_mb: u32,
pub avail_vcpus: u32,
pub avail_storage_gbs: u32,
pub avail_ipv4: u32,
pub avail_ipv6: u32,
pub avail_ports: u32,
pub max_ports_per_vm: u32,
// nanoLP per unit per minute
pub price: u64,
// 1st String is user wallet and 2nd String is report message
pub reports: HashMap<String, String>,
pub offline_minutes: u64,
}
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct VmContract {
pub uuid: String,
pub hostname: String,
pub admin_pubkey: String,
pub node_pubkey: String,
pub exposed_ports: Vec<u32>,
pub public_ipv4: String,
pub public_ipv6: String,
pub disk_size_gb: u32,
pub vcpus: u32,
pub memory_mb: u32,
pub kernel_sha: String,
pub dtrfs_sha: String,
pub created_at: chrono::DateTime<Utc>,
pub updated_at: chrono::DateTime<Utc>,
// recommended value is 20000
/// price per unit per minute
pub price_per_unit: u64,
pub locked_nano: u64,
pub collected_at: chrono::DateTime<Utc>,
}
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
pub struct AppContract {
pub uuid: String,
pub package_url: String,
pub admin_pubkey: String,
pub node_pubkey: String,
pub mapped_ports: Vec<(u16, u16)>,
pub host_ipv4: String,
pub disk_size_mb: u32,
pub vcpus: u32,
pub memory_mb: u32,
pub created_at: chrono::DateTime<Utc>,
pub updated_at: chrono::DateTime<Utc>,
// price per unit per minute
// recommended value is 20000
pub price_per_unit: u64,
pub locked_nano: u64,
pub collected_at: chrono::DateTime<Utc>,
pub hratls_pubkey: String,
pub public_package_mr_enclave: Option<Vec<u8>>,
pub app_name: String,
}
#[derive(Eq, Hash, PartialEq, Clone, Debug, Default, Serialize, Deserialize)]
pub struct AppNode {
pub node_pubkey: String,
pub operator_wallet: String,
pub country: String,
pub region: String,
pub city: String,
pub ip: String,
pub avail_mem_mb: u32,
pub avail_vcpus: u32,
pub avail_storage_mb: u32,
pub avail_no_of_port: u32,
pub max_ports_per_app: u32,
// nanotokens per unit per minute
pub price: u64,
pub offline_minutes: u64,
}
#[derive(Default, Serialize, Deserialize)]
pub struct BrainData {
pub accounts: DashMap<String, AccountData>,
pub operators: DashMap<String, OperatorData>,
pub vm_nodes: Vec<VmNode>,
pub vm_contracts: Vec<VmContract>,
pub app_nodes: Vec<AppNode>,
pub app_contracts: Vec<AppContract>,
}
impl BrainData {
pub fn load_from_disk() -> Result<Self, Box<dyn std::error::Error>> {
let content = std::fs::read_to_string(OLD_BRAIN_DATA_PATH)?;
let data: Self = serde_yaml::from_str(&content)?;
Ok(data)
}
}

@ -0,0 +1,158 @@
-- SPDX-License-Identifier: Apache-2.0
DEFINE TABLE account SCHEMAFULL;
DEFINE FIELD balance ON TABLE account TYPE int DEFAULT 0;
DEFINE FIELD tmp_locked ON TABLE account TYPE int DEFAULT 0;
DEFINE FIELD escrow ON TABLE account TYPE int DEFAULT 0;
DEFINE FIELD email ON TABLE account TYPE string DEFAULT "";
DEFINE TABLE vm_node SCHEMAFULL;
DEFINE FIELD operator ON TABLE vm_node TYPE record<account>;
DEFINE FIELD pub_sub_node ON TABLE vm_node TYPE string default "127.0.0.1:31337";
DEFINE FIELD country ON TABLE vm_node TYPE string;
DEFINE FIELD region ON TABLE vm_node TYPE string;
DEFINE FIELD city ON TABLE vm_node TYPE string;
DEFINE FIELD ip ON TABLE vm_node TYPE string;
DEFINE FIELD avail_ipv4 ON TABLE vm_node TYPE int;
DEFINE FIELD avail_ipv6 ON TABLE vm_node TYPE int;
DEFINE FIELD avail_ports ON TABLE vm_node TYPE int;
DEFINE FIELD max_ports_per_vm ON TABLE vm_node TYPE int;
DEFINE FIELD connected_at ON TABLE vm_node TYPE datetime;
DEFINE FIELD disconnected_at ON TABLE vm_node TYPE datetime;
DEFINE TABLE vm_node_offer SCHEMAFULL;
DEFINE FIELD vm_node ON TABLE vm_node_offer TYPE record<vm_node>;
DEFINE FIELD price ON TABLE vm_node_offer TYPE int;
DEFINE FIELD vcpus ON TABLE vm_node_offer TYPE int;
DEFINE FIELD memory_mib ON TABLE vm_node_offer TYPE int;
DEFINE FIELD disk_mib ON TABLE vm_node_offer TYPE int;
DEFINE TABLE new_vm_req TYPE RELATION FROM account TO vm_node SCHEMAFULL;
DEFINE FIELD hostname ON TABLE new_vm_req TYPE string;
DEFINE FIELD extra_ports ON TABLE new_vm_req TYPE array<int>;
DEFINE FIELD public_ipv4 ON TABLE new_vm_req TYPE bool;
DEFINE FIELD public_ipv6 ON TABLE new_vm_req TYPE bool;
DEFINE FIELD disk_size_mib ON TABLE new_vm_req TYPE int;
DEFINE FIELD vcpus ON TABLE new_vm_req TYPE int;
DEFINE FIELD memory_mib ON TABLE new_vm_req TYPE int;
DEFINE FIELD dtrfs_sha ON TABLE new_vm_req TYPE string;
DEFINE FIELD dtrfs_url ON TABLE new_vm_req TYPE string;
DEFINE FIELD kernel_sha ON TABLE new_vm_req TYPE string;
DEFINE FIELD kernel_url ON TABLE new_vm_req TYPE string;
DEFINE FIELD created_at ON TABLE new_vm_req TYPE datetime;
DEFINE FIELD price_per_unit ON TABLE new_vm_req TYPE int;
DEFINE FIELD locked_nano ON TABLE new_vm_req TYPE int;
DEFINE FIELD error ON TABLE new_vm_req TYPE string;
DEFINE TABLE active_vm TYPE RELATION FROM account TO vm_node SCHEMAFULL;
DEFINE FIELD hostname ON TABLE active_vm TYPE string;
DEFINE FIELD mapped_ports ON TABLE active_vm TYPE array<[int, int]>;
DEFINE FIELD public_ipv4 ON TABLE active_vm TYPE string;
DEFINE FIELD public_ipv6 ON TABLE active_vm TYPE string;
DEFINE FIELD disk_size_mib ON TABLE active_vm TYPE int;
DEFINE FIELD vcpus ON TABLE active_vm TYPE int;
DEFINE FIELD memory_mib ON TABLE active_vm TYPE int;
DEFINE FIELD dtrfs_sha ON TABLE active_vm TYPE string;
DEFINE FIELD kernel_sha ON TABLE active_vm TYPE string;
DEFINE FIELD created_at ON TABLE active_vm TYPE datetime;
DEFINE FIELD price_per_unit ON TABLE active_vm TYPE int;
DEFINE FIELD locked_nano ON TABLE active_vm TYPE int;
DEFINE FIELD collected_at ON TABLE active_vm TYPE datetime;
DEFINE TABLE update_vm_req TYPE RELATION FROM account TO vm_node SCHEMAFULL;
DEFINE FIELD vcpus ON TABLE update_vm_req TYPE int;
DEFINE FIELD memory_mib ON TABLE update_vm_req TYPE int;
DEFINE FIELD disk_size_mib ON TABLE update_vm_req TYPE int;
DEFINE FIELD dtrfs_sha ON TABLE update_vm_req TYPE string;
DEFINE FIELD dtrfs_url ON TABLE update_vm_req TYPE string;
DEFINE FIELD kernel_sha ON TABLE update_vm_req TYPE string;
DEFINE FIELD kernel_url ON TABLE update_vm_req TYPE string;
DEFINE FIELD created_at ON TABLE update_vm_req TYPE datetime;
DEFINE FIELD error ON TABLE update_vm_req TYPE string;
DEFINE TABLE deleted_vm TYPE RELATION FROM account TO vm_node SCHEMAFULL;
DEFINE FIELD hostname ON TABLE deleted_vm TYPE string;
DEFINE FIELD mapped_ports ON TABLE deleted_vm TYPE array<[int, int]>;
DEFINE FIELD public_ipv4 ON TABLE deleted_vm TYPE string;
DEFINE FIELD public_ipv6 ON TABLE deleted_vm TYPE string;
DEFINE FIELD disk_size_mib ON TABLE deleted_vm TYPE int;
DEFINE FIELD vcpus ON TABLE deleted_vm TYPE int;
DEFINE FIELD memory_mib ON TABLE deleted_vm TYPE int;
DEFINE FIELD dtrfs_sha ON TABLE deleted_vm TYPE string;
DEFINE FIELD kernel_sha ON TABLE deleted_vm TYPE string;
DEFINE FIELD created_at ON TABLE deleted_vm TYPE datetime;
DEFINE FIELD deleted_at ON TABLE deleted_vm TYPE datetime DEFAULT time::now();
DEFINE FIELD price_per_unit ON TABLE deleted_vm TYPE int;
DEFINE TABLE app_node SCHEMAFULL;
DEFINE FIELD operator ON TABLE app_node TYPE record<account>;
DEFINE FIELD pub_sub_node ON TABLE app_node TYPE string default "127.0.0.1:31337";
DEFINE FIELD country ON TABLE app_node TYPE string;
DEFINE FIELD region ON TABLE app_node TYPE string;
DEFINE FIELD city ON TABLE app_node TYPE string;
DEFINE FIELD ip ON TABLE app_node TYPE string;
DEFINE FIELD avail_mem_mib ON TABLE app_node TYPE int;
DEFINE FIELD avail_vcpus ON TABLE app_node TYPE int;
DEFINE FIELD avail_storage_mib ON TABLE app_node TYPE int;
DEFINE FIELD avail_ports ON TABLE app_node TYPE int;
DEFINE FIELD max_ports_per_app ON TABLE app_node TYPE int;
DEFINE FIELD price ON TABLE app_node TYPE int;
DEFINE FIELD connected_at ON TABLE app_node TYPE datetime;
DEFINE FIELD disconnected_at ON TABLE app_node TYPE datetime;
DEFINE TABLE new_app_req Type RELATION FROM account to app_node SCHEMAFULL;
DEFINE FIELD app_name ON TABLE new_app_req TYPE string;
DEFINE FIELD package_url ON TABLE new_app_req TYPE string;
DEFINE FIELD mr_enclave ON TABLE new_app_req TYPE string;
DEFINE FIELD hratls_pubkey ON TABLE new_app_req TYPE string;
DEFINE FIELD ports ON TABLE new_app_req TYPE array<int>;
DEFINE FIELD memory_mib ON TABLE new_app_req TYPE int;
DEFINE FIELD vcpus ON TABLE new_app_req TYPE int;
DEFINE FIELD disk_size_mib ON TABLE new_app_req TYPE int;
DEFINE FIELD locked_nano ON TABLE new_app_req TYPE int;
DEFINE FIELD price_per_unit ON TABLE new_app_req TYPE int;
DEFINE FIELD error ON TABLE new_app_req TYPE string;
DEFINE FIELD created_at ON TABLE new_app_req TYPE datetime;
DEFINE TABLE active_app TYPE RELATION FROM account TO app_node SCHEMAFULL;
DEFINE FIELD app_name ON TABLE active_app TYPE string;
DEFINE FIELD mapped_ports ON TABLE active_app TYPE array<[int, int]>;
DEFINE FIELD host_ipv4 ON TABLE active_app TYPE string;
DEFINE FIELD vcpus ON TABLE active_app TYPE int;
DEFINE FIELD memory_mib ON TABLE active_app TYPE int;
DEFINE FIELD disk_size_mib ON TABLE active_app TYPE int;
DEFINE FIELD created_at ON TABLE active_app TYPE datetime;
DEFINE FIELD price_per_unit ON TABLE active_app TYPE int;
DEFINE FIELD locked_nano ON TABLE active_app TYPE int;
DEFINE FIELD collected_at ON TABLE active_app TYPE datetime;
DEFINE FIELD mr_enclave ON TABLE active_app TYPE string;
DEFINE FIELD package_url ON TABLE active_app TYPE string;
DEFINE FIELD hratls_pubkey ON TABLE active_app TYPE string;
DEFINE TABLE deleted_app TYPE RELATION FROM account TO app_node SCHEMAFULL;
DEFINE FIELD app_name ON TABLE deleted_app TYPE string;
DEFINE FIELD mapped_ports ON TABLE deleted_app TYPE array<[int, int]>;
DEFINE FIELD host_ipv4 ON TABLE deleted_app TYPE string;
DEFINE FIELD vcpus ON TABLE deleted_app TYPE int;
DEFINE FIELD memory_mib ON TABLE deleted_app TYPE int;
DEFINE FIELD disk_size_mib ON TABLE deleted_app TYPE int;
DEFINE FIELD created_at ON TABLE deleted_app TYPE datetime;
DEFINE FIELD deleted_at ON TABLE deleted_app TYPE datetime DEFAULT time::now();
DEFINE FIELD price_per_unit ON TABLE deleted_app TYPE int;
DEFINE FIELD mr_enclave ON TABLE deleted_app TYPE string;
DEFINE FIELD package_url ON TABLE deleted_app TYPE string;
DEFINE FIELD hratls_pubkey ON TABLE deleted_app TYPE string;
DEFINE TABLE ban TYPE RELATION FROM account TO account;
DEFINE FIELD created_at ON TABLE ban TYPE datetime DEFAULT time::now();;
DEFINE TABLE kick TYPE RELATION FROM account TO account;
DEFINE FIELD created_at ON TABLE kick TYPE datetime;
DEFINE FIELD reason ON TABLE kick TYPE string;
DEFINE FIELD contract ON TABLE kick TYPE record<deleted_vm|deleted_app>;
DEFINE FIELD node ON TABLE kick TYPE record<vm_node|app_node>;
DEFINE TABLE report TYPE RELATION FROM account TO vm_node|app_node;
DEFINE FIELD created_at ON TABLE report TYPE datetime;
DEFINE FIELD reason ON TABLE report TYPE string;
DEFINE FIELD contract_id ON TABLE report TYPE string;

@ -0,0 +1,13 @@
-- SPDX-License-Identifier: Apache-2.0
REMOVE FIELD price ON TABLE vm_node;
REMOVE FIELD avail_vcpus ON TABLE vm_node;
REMOVE FIELD avail_storage_mib ON TABLE vm_node;
REMOVE FIELD avail_mem_mib ON TABLE vm_node;
DEFINE TABLE vm_node_offer SCHEMAFULL;
DEFINE FIELD vm_node ON TABLE vm_node_offer TYPE record<vm_node>;
DEFINE FIELD price ON TABLE vm_node_offer TYPE int;
DEFINE FIELD vcpus ON TABLE vm_node_offer TYPE int;
DEFINE FIELD memory_mib ON TABLE vm_node_offer TYPE int;
DEFINE FIELD disk_mib ON TABLE vm_node_offer TYPE int;

@ -9,7 +9,7 @@ use detee_shared::vm_proto::brain_vm_daemon_server::BrainVmDaemonServer;
use dotenv::dotenv;
use hyper_util::rt::TokioIo;
use std::sync::Arc;
use surreal_brain::constants::DB_SCHEMA_FILES;
use surreal_brain::constants::MIGRATION_0_SCHEMA;
use surreal_brain::grpc::app::{AppCliServer, AppDaemonServer};
use surreal_brain::grpc::general::GeneralCliServer;
use surreal_brain::grpc::vm::{VmCliServer, VmDaemonServer};
@ -39,7 +39,7 @@ pub async fn prepare_test_db() -> Result<Surreal<Client>> {
serde_yaml::from_str(&raw_mock_data)?;
db.query(format!("REMOVE DATABASE {db_name}")).await?;
for schema in DB_SCHEMA_FILES.map(std::fs::read_to_string) {
for schema in MIGRATION_0_SCHEMA.map(std::fs::read_to_string) {
db.query(schema?).await?;
}
surreal_brain::db::migration0(&db, &mock_data).await?;