803 lines
30 KiB
Rust
803 lines
30 KiB
Rust
#![allow(dead_code)]
|
|
use crate::db;
|
|
use detee_shared::app_proto::{AppContract, AppNodeListResp};
|
|
use detee_shared::{
|
|
common_proto::{Empty, Pubkey},
|
|
general_proto::{
|
|
brain_general_cli_server::BrainGeneralCli, Account, AccountBalance, AirdropReq, BanUserReq,
|
|
InspectOperatorResp, KickReq, KickResp, ListOperatorsResp, RegOperatorReq, ReportNodeReq,
|
|
SlashReq,
|
|
},
|
|
vm_proto::{
|
|
brain_vm_cli_server::BrainVmCli, brain_vm_daemon_server::BrainVmDaemon, ListVmContractsReq,
|
|
*,
|
|
},
|
|
};
|
|
|
|
use log::info;
|
|
use std::pin::Pin;
|
|
use tokio::sync::mpsc;
|
|
use tokio_stream::wrappers::ReceiverStream;
|
|
use tokio_stream::{Stream, StreamExt};
|
|
use tonic::{Request, Response, Status, Streaming};
|
|
|
|
pub struct BrainGeneralCliForReal {}
|
|
|
|
impl From<db::Account> for AccountBalance {
|
|
fn from(account: db::Account) -> Self {
|
|
AccountBalance { balance: account.balance, tmp_locked: account.tmp_locked }
|
|
}
|
|
}
|
|
|
|
impl From<db::NewVmReq> for NewVmReq {
|
|
fn from(new_vm_req: db::NewVmReq) -> Self {
|
|
Self {
|
|
uuid: new_vm_req.id.key().to_string(),
|
|
hostname: new_vm_req.hostname,
|
|
admin_pubkey: new_vm_req.admin.key().to_string(),
|
|
node_pubkey: new_vm_req.vm_node.key().to_string(),
|
|
extra_ports: new_vm_req.extra_ports,
|
|
public_ipv4: new_vm_req.public_ipv4,
|
|
public_ipv6: new_vm_req.public_ipv6,
|
|
disk_size_gb: new_vm_req.disk_size_gb,
|
|
vcpus: new_vm_req.vcpus,
|
|
memory_mb: new_vm_req.memory_mb,
|
|
kernel_url: new_vm_req.kernel_url,
|
|
kernel_sha: new_vm_req.kernel_sha,
|
|
dtrfs_url: new_vm_req.dtrfs_url,
|
|
dtrfs_sha: new_vm_req.dtrfs_sha,
|
|
price_per_unit: new_vm_req.price_per_unit,
|
|
locked_nano: new_vm_req.locked_nano,
|
|
}
|
|
}
|
|
}
|
|
|
|
impl From<db::UpdateVmReq> for UpdateVmReq {
|
|
fn from(update_vm_req: db::UpdateVmReq) -> Self {
|
|
Self {
|
|
uuid: update_vm_req.id.key().to_string(),
|
|
// daemon does not care about VM hostname
|
|
hostname: String::new(),
|
|
admin_pubkey: update_vm_req.admin.key().to_string(),
|
|
disk_size_gb: update_vm_req.disk_size_gb,
|
|
vcpus: update_vm_req.vcpus,
|
|
memory_mb: update_vm_req.memory_mb,
|
|
kernel_url: update_vm_req.kernel_url,
|
|
kernel_sha: update_vm_req.kernel_sha,
|
|
dtrfs_url: update_vm_req.dtrfs_url,
|
|
dtrfs_sha: update_vm_req.dtrfs_sha,
|
|
}
|
|
}
|
|
}
|
|
|
|
impl From<db::DeletedVm> for DeleteVmReq {
|
|
fn from(delete_vm_req: db::DeletedVm) -> Self {
|
|
Self {
|
|
uuid: delete_vm_req.id.key().to_string(),
|
|
admin_pubkey: delete_vm_req.admin.key().to_string(),
|
|
}
|
|
}
|
|
}
|
|
|
|
impl From<db::DaemonNotification> for BrainVmMessage {
|
|
fn from(notification: db::DaemonNotification) -> Self {
|
|
match notification {
|
|
db::DaemonNotification::Create(new_vm_req) => {
|
|
BrainVmMessage { msg: Some(brain_vm_message::Msg::NewVmReq(new_vm_req.into())) }
|
|
}
|
|
db::DaemonNotification::Update(update_vm_req) => BrainVmMessage {
|
|
msg: Some(brain_vm_message::Msg::UpdateVmReq(update_vm_req.into())),
|
|
},
|
|
db::DaemonNotification::Delete(deleted_vm) => {
|
|
BrainVmMessage { msg: Some(brain_vm_message::Msg::DeleteVm(deleted_vm.into())) }
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
impl From<db::ActiveVmWithNode> for VmContract {
|
|
fn from(db_c: db::ActiveVmWithNode) -> Self {
|
|
let mut exposed_ports = Vec::new();
|
|
for port in db_c.mapped_ports.iter() {
|
|
exposed_ports.push(port.0);
|
|
}
|
|
VmContract {
|
|
uuid: db_c.id.key().to_string(),
|
|
hostname: db_c.hostname.clone(),
|
|
admin_pubkey: db_c.admin.key().to_string(),
|
|
node_pubkey: db_c.vm_node.id.key().to_string(),
|
|
node_ip: db_c.vm_node.ip.clone(),
|
|
location: format!(
|
|
"{}, {}, {}",
|
|
db_c.vm_node.city, db_c.vm_node.region, db_c.vm_node.country
|
|
),
|
|
memory_mb: db_c.memory_mb,
|
|
vcpus: db_c.vcpus,
|
|
disk_size_gb: db_c.disk_size_gb,
|
|
mapped_ports: db_c
|
|
.mapped_ports
|
|
.iter()
|
|
.map(|(h, g)| MappedPort { host_port: *h, guest_port: *g })
|
|
.collect(),
|
|
vm_public_ipv6: db_c.public_ipv6.clone(),
|
|
vm_public_ipv4: db_c.public_ipv4.clone(),
|
|
locked_nano: db_c.locked_nano,
|
|
dtrfs_sha: db_c.dtrfs_sha.clone(),
|
|
kernel_sha: db_c.kernel_sha.clone(),
|
|
nano_per_minute: db_c.price_per_minute(),
|
|
created_at: db_c.created_at.to_rfc3339(),
|
|
updated_at: db_c.updated_at.to_rfc3339(),
|
|
collected_at: db_c.collected_at.to_rfc3339(),
|
|
}
|
|
}
|
|
}
|
|
|
|
impl From<db::Error> for tonic::Status {
|
|
fn from(e: db::Error) -> Self {
|
|
Self::internal(format!("Internal error: {e}"))
|
|
}
|
|
}
|
|
|
|
impl From<db::Operator> for ListOperatorsResp {
|
|
fn from(db_o: db::Operator) -> Self {
|
|
ListOperatorsResp {
|
|
pubkey: db_o.account.key().to_string(),
|
|
escrow: db_o.escrow,
|
|
email: db_o.email,
|
|
app_nodes: db_o.app_nodes,
|
|
vm_nodes: db_o.vm_nodes,
|
|
reports: db_o.reports,
|
|
}
|
|
}
|
|
}
|
|
|
|
impl From<db::VmNodeWithReports> for VmNodeListResp {
|
|
fn from(vm_node: db::VmNodeWithReports) -> Self {
|
|
Self {
|
|
operator: vm_node.operator.key().to_string(),
|
|
node_pubkey: vm_node.id.key().to_string(),
|
|
country: vm_node.country,
|
|
region: vm_node.region,
|
|
city: vm_node.city,
|
|
ip: vm_node.ip,
|
|
reports: vm_node.reports.iter().map(|n| n.reason.clone()).collect(),
|
|
price: vm_node.price,
|
|
}
|
|
}
|
|
}
|
|
|
|
impl From<db::AppNodeWithReports> for AppNodeListResp {
|
|
fn from(app_node: db::AppNodeWithReports) -> Self {
|
|
Self {
|
|
operator: app_node.operator.key().to_string(),
|
|
node_pubkey: app_node.id.key().to_string(),
|
|
country: app_node.country,
|
|
region: app_node.region,
|
|
city: app_node.city,
|
|
ip: app_node.ip,
|
|
reports: app_node.reports.iter().map(|n| n.reason.clone()).collect(),
|
|
price: app_node.price,
|
|
}
|
|
}
|
|
}
|
|
|
|
struct BrainVmDaemonForReal {}
|
|
|
|
#[tonic::async_trait]
|
|
impl BrainVmDaemon for BrainVmDaemonForReal {
|
|
type RegisterVmNodeStream = Pin<Box<dyn Stream<Item = Result<VmContract, Status>> + Send>>;
|
|
async fn register_vm_node(
|
|
&self,
|
|
req: Request<RegisterVmNodeReq>,
|
|
) -> Result<Response<Self::RegisterVmNodeStream>, Status> {
|
|
let req = check_sig_from_req(req)?;
|
|
info!("Starting registration process for {:?}", req);
|
|
db::VmNode {
|
|
id: surrealdb::RecordId::from((db::VM_NODE, req.node_pubkey.clone())),
|
|
operator: surrealdb::RecordId::from((db::ACCOUNT, req.operator_wallet)),
|
|
country: req.country,
|
|
region: req.region,
|
|
city: req.city,
|
|
ip: req.main_ip,
|
|
price: req.price,
|
|
avail_mem_mb: 0,
|
|
avail_vcpus: 0,
|
|
avail_storage_gbs: 0,
|
|
avail_ipv4: 0,
|
|
avail_ipv6: 0,
|
|
avail_ports: 0,
|
|
max_ports_per_vm: 0,
|
|
offline_minutes: 0,
|
|
}
|
|
.register()
|
|
.await?;
|
|
|
|
info!("Sending existing contracts to {}", req.node_pubkey);
|
|
let contracts = db::ActiveVmWithNode::list_by_node(&req.node_pubkey).await?;
|
|
let (tx, rx) = mpsc::channel(6);
|
|
tokio::spawn(async move {
|
|
for contract in contracts {
|
|
let _ = tx.send(Ok(contract.into())).await;
|
|
}
|
|
});
|
|
let output_stream = ReceiverStream::new(rx);
|
|
Ok(Response::new(Box::pin(output_stream) as Self::RegisterVmNodeStream))
|
|
}
|
|
|
|
type BrainMessagesStream = Pin<Box<dyn Stream<Item = Result<BrainVmMessage, Status>> + Send>>;
|
|
async fn brain_messages(
|
|
&self,
|
|
req: Request<DaemonStreamAuth>,
|
|
) -> Result<Response<Self::BrainMessagesStream>, Status> {
|
|
let auth = req.into_inner();
|
|
let pubkey = auth.pubkey.clone();
|
|
check_sig_from_parts(
|
|
&pubkey,
|
|
&auth.timestamp,
|
|
&format!("{:?}", auth.contracts),
|
|
&auth.signature,
|
|
)?;
|
|
info!("Daemon {} connected to receive brain messages", pubkey);
|
|
|
|
let (tx, rx) = mpsc::channel(6);
|
|
{
|
|
let pubkey = pubkey.clone();
|
|
let tx = tx.clone();
|
|
tokio::spawn(async move {
|
|
match db::listen_for_node::<db::DeletedVm>(&pubkey, tx).await {
|
|
Ok(()) => log::info!("db::VmContract::listen_for_node ended for {pubkey}"),
|
|
Err(e) => {
|
|
log::warn!("db::VmContract::listen_for_node errored for {pubkey}: {e}")
|
|
}
|
|
};
|
|
});
|
|
}
|
|
{
|
|
let pubkey = pubkey.clone();
|
|
let tx = tx.clone();
|
|
tokio::spawn(async move {
|
|
let _ = db::listen_for_node::<db::NewVmReq>(&pubkey, tx.clone()).await;
|
|
});
|
|
}
|
|
{
|
|
let pubkey = pubkey.clone();
|
|
let tx = tx.clone();
|
|
tokio::spawn(async move {
|
|
let _ = db::listen_for_node::<db::UpdateVmReq>(&pubkey, tx.clone()).await;
|
|
});
|
|
}
|
|
|
|
let output_stream = ReceiverStream::new(rx).map(|msg| Ok(msg.into()));
|
|
Ok(Response::new(Box::pin(output_stream) as Self::BrainMessagesStream))
|
|
}
|
|
|
|
async fn daemon_messages(
|
|
&self,
|
|
_req: Request<Streaming<VmDaemonMessage>>,
|
|
) -> Result<Response<Empty>, Status> {
|
|
todo!();
|
|
// let mut req_stream = req.into_inner();
|
|
// let pubkey: String;
|
|
// if let Some(Ok(msg)) = req_stream.next().await {
|
|
// log::debug!("demon_messages received the following auth message: {:?}", msg.msg);
|
|
// if let Some(vm_daemon_message::Msg::Auth(auth)) = msg.msg {
|
|
// pubkey = auth.pubkey.clone();
|
|
// check_sig_from_parts(
|
|
// &pubkey,
|
|
// &auth.timestamp,
|
|
// &format!("{:?}", auth.contracts),
|
|
// &auth.signature,
|
|
// )?;
|
|
// } else {
|
|
// return Err(Status::unauthenticated(
|
|
// "Could not authenticate the daemon: could not extract auth signature",
|
|
// ));
|
|
// }
|
|
// } else {
|
|
// return Err(Status::unauthenticated("Could not authenticate the daemon"));
|
|
// }
|
|
|
|
// // info!("Received a message from daemon {pubkey}: {daemon_message:?}");
|
|
// while let Some(daemon_message) = req_stream.next().await {
|
|
// match daemon_message {
|
|
// Ok(msg) => match msg.msg {
|
|
// Some(vm_daemon_message::Msg::NewVmResp(new_vm_resp)) => {
|
|
// self.data.submit_newvm_resp(new_vm_resp).await;
|
|
// }
|
|
// Some(vm_daemon_message::Msg::UpdateVmResp(update_vm_resp)) => {
|
|
// self.data.submit_updatevm_resp(update_vm_resp).await;
|
|
// }
|
|
// Some(vm_daemon_message::Msg::VmNodeResources(node_resources)) => {
|
|
// self.data.submit_node_resources(node_resources);
|
|
// }
|
|
// _ => {}
|
|
// },
|
|
// Err(e) => {
|
|
// log::warn!("Daemon disconnected: {e:?}");
|
|
// self.data.del_daemon_tx(&pubkey);
|
|
// }
|
|
// }
|
|
// }
|
|
// Ok(Response::new(Empty {}))
|
|
}
|
|
}
|
|
|
|
#[tonic::async_trait]
|
|
impl BrainGeneralCli for BrainGeneralCliForReal {
|
|
type ListAccountsStream = Pin<Box<dyn Stream<Item = Result<Account, Status>> + Send>>;
|
|
type ListAllAppContractsStream =
|
|
Pin<Box<dyn Stream<Item = Result<AppContract, Status>> + Send>>;
|
|
type ListAllVmContractsStream = Pin<Box<dyn Stream<Item = Result<VmContract, Status>> + Send>>;
|
|
type ListOperatorsStream =
|
|
Pin<Box<dyn Stream<Item = Result<ListOperatorsResp, Status>> + Send>>;
|
|
|
|
async fn get_balance(&self, req: Request<Pubkey>) -> Result<Response<AccountBalance>, Status> {
|
|
let req = check_sig_from_req(req)?;
|
|
Ok(Response::new(db::Account::get(&req.pubkey).await?.into()))
|
|
}
|
|
|
|
async fn report_node(&self, req: Request<ReportNodeReq>) -> Result<Response<Empty>, Status> {
|
|
let req = check_sig_from_req(req)?;
|
|
let (account, node) = match db::ActiveVmWithNode::get_by_uuid(&req.contract).await? {
|
|
Some(vm_contract)
|
|
if vm_contract.admin.key().to_string() == req.admin_pubkey
|
|
&& vm_contract.vm_node.id.key().to_string() == req.node_pubkey =>
|
|
{
|
|
(vm_contract.admin, vm_contract.vm_node.id)
|
|
}
|
|
_ => {
|
|
// TODO: Hey, Noor! Please add app contract here.
|
|
return Err(Status::unauthenticated("No contract found by this ID."));
|
|
}
|
|
};
|
|
db::Report::create(account, node, req.reason).await?;
|
|
Ok(Response::new(Empty {}))
|
|
}
|
|
|
|
async fn list_operators(
|
|
&self,
|
|
req: Request<Empty>,
|
|
) -> Result<Response<Self::ListOperatorsStream>, Status> {
|
|
let _ = check_sig_from_req(req)?;
|
|
let operators = db::Operator::list().await?;
|
|
let (tx, rx) = mpsc::channel(6);
|
|
tokio::spawn(async move {
|
|
for op in operators {
|
|
let _ = tx.send(Ok(op.into())).await;
|
|
}
|
|
});
|
|
let output_stream = ReceiverStream::new(rx);
|
|
Ok(Response::new(Box::pin(output_stream) as Self::ListOperatorsStream))
|
|
}
|
|
|
|
async fn inspect_operator(
|
|
&self,
|
|
req: Request<Pubkey>,
|
|
) -> Result<Response<InspectOperatorResp>, Status> {
|
|
match db::Operator::inspect_nodes(&req.into_inner().pubkey).await? {
|
|
(Some(op), vm_nodes, app_nodes) => Ok(Response::new(InspectOperatorResp {
|
|
operator: Some(op.into()),
|
|
vm_nodes: vm_nodes.into_iter().map(|n| n.into()).collect(),
|
|
app_nodes: app_nodes.into_iter().map(|n| n.into()).collect(),
|
|
})),
|
|
(None, _, _) => Err(Status::not_found("The wallet you specified is not an operator")),
|
|
}
|
|
}
|
|
|
|
async fn register_operator(
|
|
&self,
|
|
_req: Request<RegOperatorReq>,
|
|
) -> Result<Response<Empty>, Status> {
|
|
todo!();
|
|
// let req = check_sig_from_req(req)?;
|
|
// info!("Regitering new operator: {req:?}");
|
|
// match self.data.register_operator(req) {
|
|
// Ok(()) => Ok(Response::new(Empty {})),
|
|
// Err(e) => Err(Status::failed_precondition(e.to_string())),
|
|
// }
|
|
}
|
|
|
|
async fn kick_contract(&self, _req: Request<KickReq>) -> Result<Response<KickResp>, Status> {
|
|
todo!();
|
|
// let req = check_sig_from_req(req)?;
|
|
// match self.data.kick_contract(&req.operator_wallet, &req.contract_uuid, &req.reason).await {
|
|
// Ok(nano_lp) => Ok(Response::new(KickResp { nano_lp })),
|
|
// Err(e) => Err(Status::permission_denied(e.to_string())),
|
|
// }
|
|
}
|
|
|
|
async fn ban_user(&self, _req: Request<BanUserReq>) -> Result<Response<Empty>, Status> {
|
|
todo!();
|
|
// let req = check_sig_from_req(req)?;
|
|
// self.data.ban_user(&req.operator_wallet, &req.user_wallet);
|
|
// Ok(Response::new(Empty {}))
|
|
}
|
|
|
|
// admin commands
|
|
|
|
async fn airdrop(&self, req: Request<AirdropReq>) -> Result<Response<Empty>, Status> {
|
|
check_admin_key(&req)?;
|
|
let req = check_sig_from_req(req)?;
|
|
db::Account::airdrop(&req.pubkey, req.tokens).await?;
|
|
Ok(Response::new(Empty {}))
|
|
}
|
|
|
|
async fn slash(&self, _req: Request<SlashReq>) -> Result<Response<Empty>, Status> {
|
|
todo!();
|
|
// check_admin_key(&req)?;
|
|
// let req = check_sig_from_req(req)?;
|
|
// self.data.slash_account(&req.pubkey, req.tokens);
|
|
// Ok(Response::new(Empty {}))
|
|
}
|
|
|
|
async fn list_accounts(
|
|
&self,
|
|
_req: Request<Empty>,
|
|
) -> Result<Response<Self::ListAccountsStream>, Status> {
|
|
todo!();
|
|
// check_admin_key(&req)?;
|
|
// let _ = check_sig_from_req(req)?;
|
|
// let accounts = self.data.list_accounts();
|
|
// let (tx, rx) = mpsc::channel(6);
|
|
// tokio::spawn(async move {
|
|
// for account in accounts {
|
|
// let _ = tx.send(Ok(account.into())).await;
|
|
// }
|
|
// });
|
|
// let output_stream = ReceiverStream::new(rx);
|
|
// Ok(Response::new(Box::pin(output_stream) as Self::ListAccountsStream))
|
|
}
|
|
|
|
async fn list_all_vm_contracts(
|
|
&self,
|
|
_req: Request<Empty>,
|
|
) -> Result<Response<Self::ListAllVmContractsStream>, Status> {
|
|
todo!();
|
|
// check_admin_key(&req)?;
|
|
// let _ = check_sig_from_req(req)?;
|
|
// let contracts = self.data.list_all_contracts();
|
|
// let (tx, rx) = mpsc::channel(6);
|
|
// tokio::spawn(async move {
|
|
// for contract in contracts {
|
|
// let _ = tx.send(Ok(contract.into())).await;
|
|
// }
|
|
// });
|
|
// let output_stream = ReceiverStream::new(rx);
|
|
// Ok(Response::new(Box::pin(output_stream) as Self::ListAllVmContractsStream))
|
|
}
|
|
|
|
async fn list_all_app_contracts(
|
|
&self,
|
|
_req: tonic::Request<Empty>,
|
|
) -> Result<tonic::Response<Self::ListAllAppContractsStream>, Status> {
|
|
todo!();
|
|
// check_admin_key(&req)?;
|
|
// let _ = check_sig_from_req(req)?;
|
|
// let contracts = self.data.list_all_app_contracts();
|
|
// let (tx, rx) = mpsc::channel(6);
|
|
// tokio::spawn(async move {
|
|
// for contract in contracts {
|
|
// let _ = tx.send(Ok(contract.into())).await;
|
|
// }
|
|
// });
|
|
// let output_stream = ReceiverStream::new(rx);
|
|
// Ok(Response::new(Box::pin(output_stream)))
|
|
}
|
|
}
|
|
|
|
pub struct BrainVmCliForReal {}
|
|
|
|
#[tonic::async_trait]
|
|
impl BrainVmCli for BrainVmCliForReal {
|
|
type ListVmContractsStream = Pin<Box<dyn Stream<Item = Result<VmContract, Status>> + Send>>;
|
|
type ListVmNodesStream = Pin<Box<dyn Stream<Item = Result<VmNodeListResp, Status>> + Send>>;
|
|
|
|
async fn new_vm(&self, req: Request<NewVmReq>) -> Result<Response<NewVmResp>, Status> {
|
|
let req = check_sig_from_req(req)?;
|
|
info!("New VM requested via CLI: {req:?}");
|
|
todo!();
|
|
// if self
|
|
// .data
|
|
// .is_user_banned_by_node(&req.admin_pubkey, &req.node_pubkey)
|
|
// {
|
|
// return Err(Status::permission_denied(
|
|
// "This operator banned you. What did you do?",
|
|
// ));
|
|
// }
|
|
// let admin_pubkey = req.admin_pubkey.clone();
|
|
// let (oneshot_tx, oneshot_rx) = tokio::sync::oneshot::channel();
|
|
// self.data.submit_newvm_req(req, oneshot_tx).await;
|
|
// match oneshot_rx.await {
|
|
// Ok(response) => {
|
|
// info!("Sending VM confirmation to {admin_pubkey}: {response:?}");
|
|
// Ok(Response::new(response))
|
|
// }
|
|
// Err(e) => {
|
|
// log::error!("Something weird happened. Reached error {e:?}");
|
|
// Err(Status::unknown(
|
|
// "Request failed due to unknown error. Please try again or contact the DeTEE devs team.",
|
|
// ))
|
|
// }
|
|
// }
|
|
}
|
|
|
|
async fn update_vm(&self, req: Request<UpdateVmReq>) -> Result<Response<UpdateVmResp>, Status> {
|
|
let req = check_sig_from_req(req)?;
|
|
info!("Update VM requested via CLI: {req:?}");
|
|
todo!();
|
|
// let (oneshot_tx, oneshot_rx) = tokio::sync::oneshot::channel();
|
|
// self.data.submit_updatevm_req(req, oneshot_tx).await;
|
|
// match oneshot_rx.await {
|
|
// Ok(response) => {
|
|
// info!("Sending UpdateVMResp: {response:?}");
|
|
// Ok(Response::new(response))
|
|
// }
|
|
// Err(e) => Err(Status::unknown(format!(
|
|
// "Update VM request failed due to error: {e}"
|
|
// ))),
|
|
// }
|
|
}
|
|
|
|
async fn extend_vm(&self, req: Request<ExtendVmReq>) -> Result<Response<Empty>, Status> {
|
|
let _req = check_sig_from_req(req)?;
|
|
todo!();
|
|
// match self
|
|
// .data
|
|
// .extend_vm_contract_time(&req.uuid, &req.admin_pubkey, req.locked_nano)
|
|
// {
|
|
// Ok(()) => Ok(Response::new(Empty {})),
|
|
// Err(e) => Err(Status::unknown(format!("Could not extend contract: {e}"))),
|
|
// }
|
|
}
|
|
|
|
async fn delete_vm(&self, req: Request<DeleteVmReq>) -> Result<Response<Empty>, Status> {
|
|
let _req = check_sig_from_req(req)?;
|
|
todo!();
|
|
// match self.data.delete_vm(req).await {
|
|
// Ok(()) => Ok(Response::new(Empty {})),
|
|
// Err(e) => Err(Status::not_found(e.to_string())),
|
|
// }
|
|
}
|
|
|
|
async fn list_vm_contracts(
|
|
&self,
|
|
req: Request<ListVmContractsReq>,
|
|
) -> Result<Response<Self::ListVmContractsStream>, Status> {
|
|
let req = check_sig_from_req(req)?;
|
|
info!(
|
|
"CLI {} requested ListVMVmContractsStream. As operator: {}",
|
|
req.wallet, req.as_operator
|
|
);
|
|
let mut contracts = Vec::new();
|
|
if !req.uuid.is_empty() {
|
|
if let Some(specific_contract) = db::ActiveVmWithNode::get_by_uuid(&req.uuid).await? {
|
|
if specific_contract.admin.key().to_string() == req.wallet {
|
|
contracts.push(specific_contract.into());
|
|
}
|
|
// TODO: allow operator to inspect contracts
|
|
}
|
|
} else {
|
|
if req.as_operator {
|
|
contracts
|
|
.append(&mut db::ActiveVmWithNode::list_by_operator(&req.wallet).await?.into());
|
|
} else {
|
|
contracts
|
|
.append(&mut db::ActiveVmWithNode::list_by_admin(&req.wallet).await?.into());
|
|
}
|
|
}
|
|
let (tx, rx) = mpsc::channel(6);
|
|
tokio::spawn(async move {
|
|
for contract in contracts {
|
|
let _ = tx.send(Ok(contract.into())).await;
|
|
}
|
|
});
|
|
let output_stream = ReceiverStream::new(rx);
|
|
Ok(Response::new(Box::pin(output_stream) as Self::ListVmContractsStream))
|
|
}
|
|
|
|
async fn list_vm_nodes(
|
|
&self,
|
|
req: Request<VmNodeFilters>,
|
|
) -> Result<Response<Self::ListVmNodesStream>, tonic::Status> {
|
|
let req = check_sig_from_req(req)?;
|
|
info!("CLI requested ListVmNodesStream: {req:?}");
|
|
todo!();
|
|
// let nodes = self.data.find_vm_nodes_by_filters(&req);
|
|
// let (tx, rx) = mpsc::channel(6);
|
|
// tokio::spawn(async move {
|
|
// for node in nodes {
|
|
// let _ = tx.send(Ok(node.into())).await;
|
|
// }
|
|
// });
|
|
// let output_stream = ReceiverStream::new(rx);
|
|
// Ok(Response::new(
|
|
// Box::pin(output_stream) as Self::ListVmNodesStream
|
|
// ))
|
|
}
|
|
|
|
async fn get_one_vm_node(
|
|
&self,
|
|
req: Request<VmNodeFilters>,
|
|
) -> Result<Response<VmNodeListResp>, Status> {
|
|
let req = check_sig_from_req(req)?;
|
|
info!("Unknown CLI requested ListVmNodesStream: {req:?}");
|
|
todo!();
|
|
// match self.data.get_one_node_by_filters(&req) {
|
|
// Some(node) => Ok(Response::new(node.into())),
|
|
// None => Err(Status::not_found(
|
|
// "Could not find any node based on your search criteria",
|
|
// )),
|
|
// }
|
|
}
|
|
}
|
|
|
|
trait PubkeyGetter {
|
|
fn get_pubkey(&self) -> Option<String>;
|
|
}
|
|
|
|
macro_rules! impl_pubkey_getter {
|
|
($t:ty, $field:ident) => {
|
|
impl PubkeyGetter for $t {
|
|
fn get_pubkey(&self) -> Option<String> {
|
|
Some(self.$field.clone())
|
|
}
|
|
}
|
|
};
|
|
($t:ty) => {
|
|
impl PubkeyGetter for $t {
|
|
fn get_pubkey(&self) -> Option<String> {
|
|
None
|
|
}
|
|
}
|
|
};
|
|
}
|
|
|
|
impl_pubkey_getter!(Pubkey, pubkey);
|
|
impl_pubkey_getter!(NewVmReq, admin_pubkey);
|
|
impl_pubkey_getter!(DeleteVmReq, admin_pubkey);
|
|
impl_pubkey_getter!(UpdateVmReq, admin_pubkey);
|
|
impl_pubkey_getter!(ExtendVmReq, admin_pubkey);
|
|
impl_pubkey_getter!(ReportNodeReq, admin_pubkey);
|
|
impl_pubkey_getter!(ListVmContractsReq, wallet);
|
|
impl_pubkey_getter!(RegisterVmNodeReq, node_pubkey);
|
|
impl_pubkey_getter!(RegOperatorReq, pubkey);
|
|
impl_pubkey_getter!(KickReq, operator_wallet);
|
|
impl_pubkey_getter!(BanUserReq, operator_wallet);
|
|
|
|
impl_pubkey_getter!(VmNodeFilters);
|
|
impl_pubkey_getter!(Empty);
|
|
impl_pubkey_getter!(AirdropReq);
|
|
impl_pubkey_getter!(SlashReq);
|
|
|
|
// impl_pubkey_getter!(NewAppReq, admin_pubkey);
|
|
// impl_pubkey_getter!(DelAppReq, admin_pubkey);
|
|
// impl_pubkey_getter!(ListAppContractsReq, admin_pubkey);
|
|
//
|
|
// impl_pubkey_getter!(RegisterAppNodeReq);
|
|
// impl_pubkey_getter!(AppNodeFilters);
|
|
|
|
fn check_sig_from_req<T: std::fmt::Debug + PubkeyGetter>(req: Request<T>) -> Result<T, Status> {
|
|
let time = match req.metadata().get("timestamp") {
|
|
Some(t) => t.clone(),
|
|
None => return Err(Status::unauthenticated("Timestamp not found in metadata.")),
|
|
};
|
|
let time = time
|
|
.to_str()
|
|
.map_err(|_| Status::unauthenticated("Timestamp in metadata is not a string"))?;
|
|
|
|
let now = chrono::Utc::now();
|
|
let parsed_time = chrono::DateTime::parse_from_rfc3339(time)
|
|
.map_err(|_| Status::unauthenticated("Coult not parse timestamp"))?;
|
|
let seconds_elapsed = now.signed_duration_since(parsed_time).num_seconds();
|
|
if seconds_elapsed > 4 || seconds_elapsed < -4 {
|
|
return Err(Status::unauthenticated(format!(
|
|
"Date is not within 4 sec of the time of the server: CLI {} vs Server {}",
|
|
parsed_time, now
|
|
)));
|
|
}
|
|
|
|
let signature = match req.metadata().get("request-signature") {
|
|
Some(t) => t,
|
|
None => return Err(Status::unauthenticated("signature not found in metadata.")),
|
|
};
|
|
let signature = bs58::decode(signature)
|
|
.into_vec()
|
|
.map_err(|_| Status::unauthenticated("signature is not a bs58 string"))?;
|
|
let signature = ed25519_dalek::Signature::from_bytes(
|
|
signature
|
|
.as_slice()
|
|
.try_into()
|
|
.map_err(|_| Status::unauthenticated("could not parse ed25519 signature"))?,
|
|
);
|
|
|
|
let pubkey_value = match req.metadata().get("pubkey") {
|
|
Some(p) => p.clone(),
|
|
None => return Err(Status::unauthenticated("pubkey not found in metadata.")),
|
|
};
|
|
let pubkey = ed25519_dalek::VerifyingKey::from_bytes(
|
|
&bs58::decode(&pubkey_value)
|
|
.into_vec()
|
|
.map_err(|_| Status::unauthenticated("pubkey is not a bs58 string"))?
|
|
.try_into()
|
|
.map_err(|_| Status::unauthenticated("pubkey does not have the correct size."))?,
|
|
)
|
|
.map_err(|_| Status::unauthenticated("could not parse ed25519 pubkey"))?;
|
|
|
|
let req = req.into_inner();
|
|
let message = format!("{time}{req:?}");
|
|
use ed25519_dalek::Verifier;
|
|
pubkey
|
|
.verify(message.as_bytes(), &signature)
|
|
.map_err(|_| Status::unauthenticated("the signature is not valid"))?;
|
|
if let Some(req_pubkey) = req.get_pubkey() {
|
|
if pubkey_value.to_str().unwrap().to_string() != req_pubkey {
|
|
return Err(Status::unauthenticated(
|
|
"pubkey of signature does not match pubkey of request",
|
|
));
|
|
}
|
|
}
|
|
Ok(req)
|
|
}
|
|
|
|
fn check_sig_from_parts(pubkey: &str, time: &str, msg: &str, sig: &str) -> Result<(), Status> {
|
|
let now = chrono::Utc::now();
|
|
let parsed_time = chrono::DateTime::parse_from_rfc3339(time)
|
|
.map_err(|_| Status::unauthenticated("Coult not parse timestamp"))?;
|
|
let seconds_elapsed = now.signed_duration_since(parsed_time).num_seconds();
|
|
if seconds_elapsed > 4 || seconds_elapsed < -4 {
|
|
return Err(Status::unauthenticated(format!(
|
|
"Date is not within 4 sec of the time of the server: CLI {} vs Server {}",
|
|
parsed_time, now
|
|
)));
|
|
}
|
|
|
|
let signature = bs58::decode(sig)
|
|
.into_vec()
|
|
.map_err(|_| Status::unauthenticated("signature is not a bs58 string"))?;
|
|
let signature = ed25519_dalek::Signature::from_bytes(
|
|
signature
|
|
.as_slice()
|
|
.try_into()
|
|
.map_err(|_| Status::unauthenticated("could not parse ed25519 signature"))?,
|
|
);
|
|
|
|
let pubkey = ed25519_dalek::VerifyingKey::from_bytes(
|
|
&bs58::decode(&pubkey)
|
|
.into_vec()
|
|
.map_err(|_| Status::unauthenticated("pubkey is not a bs58 string"))?
|
|
.try_into()
|
|
.map_err(|_| Status::unauthenticated("pubkey does not have the correct size."))?,
|
|
)
|
|
.map_err(|_| Status::unauthenticated("could not parse ed25519 pubkey"))?;
|
|
|
|
let msg = time.to_string() + msg;
|
|
use ed25519_dalek::Verifier;
|
|
pubkey
|
|
.verify(msg.as_bytes(), &signature)
|
|
.map_err(|_| Status::unauthenticated("the signature is not valid"))?;
|
|
|
|
Ok(())
|
|
}
|
|
|
|
const ADMIN_ACCOUNTS: &[&str] = &[
|
|
"x52w7jARC5erhWWK65VZmjdGXzBK6ZDgfv1A283d8XK",
|
|
"FHuecMbeC1PfjkW2JKyoicJAuiU7khgQT16QUB3Q1XdL",
|
|
"H21Shi4iE7vgfjWEQNvzmpmBMJSaiZ17PYUcdNoAoKNc",
|
|
];
|
|
|
|
fn check_admin_key<T>(req: &Request<T>) -> Result<(), Status> {
|
|
let pubkey = match req.metadata().get("pubkey") {
|
|
Some(p) => p.clone(),
|
|
None => return Err(Status::unauthenticated("pubkey not found in metadata.")),
|
|
};
|
|
let pubkey = pubkey
|
|
.to_str()
|
|
.map_err(|_| Status::unauthenticated("could not parse pubkey metadata to str"))?;
|
|
|
|
if !ADMIN_ACCOUNTS.contains(&pubkey) {
|
|
return Err(Status::unauthenticated("This operation is reserved to admin accounts"));
|
|
}
|
|
|
|
Ok(())
|
|
}
|