brain/tests/grpc_vm_daemon_test.rs
ghe0 ddba7a4f95
switch from LP to credits
As part of open sourcing the software product, we should consider that
loyalty points are not the best language. Switching to "credits" makes
sense from a lot of points of view.

At the same time, this change allows an achitectural change towards
slots. Slots allow daemon resources to get booked based on the HW ratio
configured in the daemon config.
2025-06-27 15:47:42 +03:00

135 lines
4.2 KiB
Rust

// SPDX-License-Identifier: Apache-2.0
use crate::common::test_utils::{airdrop, Key};
use common::prepare_test_env::{prepare_test_db, run_service_for_stream};
use common::vm_daemon_utils::{mock_vm_daemon, register_vm_node};
use detee_shared::vm_proto;
use detee_shared::vm_proto::brain_vm_cli_client::BrainVmCliClient;
use detee_shared::vm_proto::brain_vm_daemon_client::BrainVmDaemonClient;
use surreal_brain::constants::VM_NODE;
use surreal_brain::db::prelude as db;
use tokio_stream::wrappers::ReceiverStream;
mod common;
#[tokio::test]
async fn test_reg_vm_node() {
prepare_test_db().await.unwrap();
let channel = run_service_for_stream().await.unwrap();
let mut client = BrainVmDaemonClient::new(channel);
let del_vm_reqs = register_vm_node(&mut client, &Key::new(), &Key::new().pubkey).await.unwrap();
assert!(del_vm_reqs.is_empty())
}
#[tokio::test]
async fn test_brain_message() {
env_logger::builder().filter_level(log::LevelFilter::Error).init();
prepare_test_db().await.unwrap();
let brain_channel = run_service_for_stream().await.unwrap();
let daemon_key = mock_vm_daemon(&brain_channel, None).await.unwrap();
let mut cli_client = BrainVmCliClient::new(brain_channel.clone());
let cli_key = Key::new();
let req = vm_proto::NewVmReq {
admin_pubkey: cli_key.pubkey.clone(),
node_pubkey: daemon_key,
price_per_unit: 1200,
extra_ports: vec![8080, 8081],
locked_nano: 100,
..Default::default()
};
airdrop(&brain_channel, &cli_key.pubkey, 10).await.unwrap();
let new_vm_resp =
cli_client.new_vm(cli_key.sign_request(req).unwrap()).await.unwrap().into_inner();
assert!(new_vm_resp.error.is_empty());
assert!(new_vm_resp.uuid.len() == 40);
assert!(new_vm_resp.args.is_some());
assert!(new_vm_resp.args.unwrap().exposed_ports.len() == 3);
}
#[tokio::test]
async fn test_vm_daemon_resource_msg() {
let db = prepare_test_db().await.unwrap();
let brain_channel = run_service_for_stream().await.unwrap();
let mut daemon_client = BrainVmDaemonClient::new(brain_channel);
let daemon_key = Key::new();
register_vm_node(&mut daemon_client, &daemon_key, &Key::new().pubkey).await.unwrap();
let (tx, rx) = tokio::sync::mpsc::channel(32);
let tx_01 = tx.clone();
let daemon_key_01 = daemon_key.clone();
tokio::spawn(async move {
tx_01
.send(vm_proto::VmDaemonMessage {
msg: Some(vm_proto::vm_daemon_message::Msg::Auth(
daemon_key_01.clone().sign_stream_auth_vm(vec![]).unwrap(),
)),
})
.await
.unwrap();
let rx_stream = ReceiverStream::new(rx);
daemon_client.daemon_messages(rx_stream).await.unwrap();
});
let tx_02 = tx.clone();
let daemon_pubkey = daemon_key.clone().pubkey;
let req_data = vm_proto::VmNodeResources {
node_pubkey: daemon_pubkey,
avail_ports: 5,
avail_ipv4: 2,
avail_ipv6: 88,
avail_vcpus: 4,
avail_memory_mib: 8192,
avail_storage_mib: 102400,
max_ports_per_vm: 5,
};
let req_data_copy = req_data.clone();
tokio::spawn(async move {
tx_02
.send(vm_proto::VmDaemonMessage {
msg: Some(vm_proto::vm_daemon_message::Msg::VmNodeResources(req_data_copy)),
})
.await
.unwrap();
});
tokio::time::sleep(tokio::time::Duration::from_millis(300)).await;
let vm_node_opt: Option<db::VmNode> = db.select((VM_NODE, daemon_key.pubkey)).await.unwrap();
assert!(vm_node_opt.is_some());
let db::VmNode {
avail_mem_mib,
avail_vcpus,
avail_storage_mib,
avail_ports,
avail_ipv4,
avail_ipv6,
max_ports_per_vm,
..
} = vm_node_opt.unwrap();
assert_eq!(avail_mem_mib, req_data.avail_memory_mib);
assert_eq!(avail_vcpus, req_data.avail_vcpus);
assert_eq!(avail_storage_mib, req_data.avail_storage_mib);
assert_eq!(avail_ports, req_data.avail_ports);
assert_eq!(avail_ipv4, req_data.avail_ipv4);
assert_eq!(avail_ipv6, req_data.avail_ipv6);
assert_eq!(max_ports_per_vm, req_data.max_ports_per_vm);
}