rewrite (#2)
running clippy fix separating homepage to a file adding summary of network security removing the rewrite structure removing catch unwind adding sealing to persistence redirectng to the upstream fixing some startup endgecases Co-authored-by: Jakub Doka <jakub.doka2@gmail.com> Reviewed-on: SGX/hacker-challenge-sgx#2
This commit is contained in:
parent
2517285dd3
commit
09a84a15f3
89
rewrite/Cargo.lock → Cargo.lock
generated
89
rewrite/Cargo.lock → Cargo.lock
generated
@ -1,6 +1,6 @@
|
|||||||
# This file is automatically @generated by Cargo.
|
# This file is automatically @generated by Cargo.
|
||||||
# It is not intended for manual editing.
|
# It is not intended for manual editing.
|
||||||
version = 3
|
version = 4
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "Inflector"
|
name = "Inflector"
|
||||||
@ -231,6 +231,20 @@ dependencies = [
|
|||||||
"cpufeatures",
|
"cpufeatures",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "aes-gcm"
|
||||||
|
version = "0.10.3"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "831010a0f742e1209b3bcea8fab6a8e149051ba6099432c8cb2cc117dec3ead1"
|
||||||
|
dependencies = [
|
||||||
|
"aead",
|
||||||
|
"aes",
|
||||||
|
"cipher",
|
||||||
|
"ctr",
|
||||||
|
"ghash",
|
||||||
|
"subtle",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "aes-gcm-siv"
|
name = "aes-gcm-siv"
|
||||||
version = "0.11.1"
|
version = "0.11.1"
|
||||||
@ -1508,6 +1522,33 @@ dependencies = [
|
|||||||
"syn 2.0.79",
|
"syn 2.0.79",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "detee-sgx"
|
||||||
|
version = "0.1.0"
|
||||||
|
source = "git+ssh://git@gitea.detee.cloud/SGX/detee-sgx#a47753a8e07ef533cca5df41bea4893c9eeb133e"
|
||||||
|
dependencies = [
|
||||||
|
"aes-gcm",
|
||||||
|
"base64 0.22.1",
|
||||||
|
"hex",
|
||||||
|
"hyper 1.4.1",
|
||||||
|
"hyper-rustls 0.27.3",
|
||||||
|
"hyper-util",
|
||||||
|
"lazy_static",
|
||||||
|
"log",
|
||||||
|
"pbkdf2 0.12.2",
|
||||||
|
"prost",
|
||||||
|
"rcgen",
|
||||||
|
"ring 0.17.8",
|
||||||
|
"rustls 0.23.14",
|
||||||
|
"sha2 0.10.8",
|
||||||
|
"tokio-rustls 0.26.0",
|
||||||
|
"tonic",
|
||||||
|
"tonic-build",
|
||||||
|
"tower 0.5.1",
|
||||||
|
"tower-http",
|
||||||
|
"x509-parser 0.16.0",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "dialoguer"
|
name = "dialoguer"
|
||||||
version = "0.10.4"
|
version = "0.10.4"
|
||||||
@ -1905,6 +1946,16 @@ dependencies = [
|
|||||||
"wasm-bindgen",
|
"wasm-bindgen",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "ghash"
|
||||||
|
version = "0.5.1"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "f0d8a4362ccb29cb0b265253fb0a2728f592895ee6854fd9bc13f2ffda266ff1"
|
||||||
|
dependencies = [
|
||||||
|
"opaque-debug",
|
||||||
|
"polyval",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "gimli"
|
name = "gimli"
|
||||||
version = "0.31.1"
|
version = "0.31.1"
|
||||||
@ -1975,6 +2026,7 @@ dependencies = [
|
|||||||
"base64 0.22.1",
|
"base64 0.22.1",
|
||||||
"chrono",
|
"chrono",
|
||||||
"dashmap 6.1.0",
|
"dashmap 6.1.0",
|
||||||
|
"detee-sgx",
|
||||||
"env_logger 0.11.5",
|
"env_logger 0.11.5",
|
||||||
"hex",
|
"hex",
|
||||||
"hyper 1.4.1",
|
"hyper 1.4.1",
|
||||||
@ -1982,7 +2034,6 @@ dependencies = [
|
|||||||
"hyper-util",
|
"hyper-util",
|
||||||
"lazy_static",
|
"lazy_static",
|
||||||
"log",
|
"log",
|
||||||
"occlum-ratls",
|
|
||||||
"prost",
|
"prost",
|
||||||
"prost-types",
|
"prost-types",
|
||||||
"rand 0.8.5",
|
"rand 0.8.5",
|
||||||
@ -2898,30 +2949,6 @@ dependencies = [
|
|||||||
"memchr",
|
"memchr",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "occlum-ratls"
|
|
||||||
version = "0.1.0"
|
|
||||||
source = "git+ssh://git@gitea.detee.cloud/vfaychuk/occlum-ratls#198cc0c70d8edd6000274137cfe5849bf4b9c62e"
|
|
||||||
dependencies = [
|
|
||||||
"base64 0.22.1",
|
|
||||||
"hex",
|
|
||||||
"hyper 1.4.1",
|
|
||||||
"hyper-rustls 0.27.3",
|
|
||||||
"hyper-util",
|
|
||||||
"lazy_static",
|
|
||||||
"log",
|
|
||||||
"prost",
|
|
||||||
"rcgen",
|
|
||||||
"ring 0.17.8",
|
|
||||||
"rustls 0.23.14",
|
|
||||||
"tokio-rustls 0.26.0",
|
|
||||||
"tonic",
|
|
||||||
"tonic-build",
|
|
||||||
"tower 0.5.1",
|
|
||||||
"tower-http",
|
|
||||||
"x509-parser 0.16.0",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "oid-registry"
|
name = "oid-registry"
|
||||||
version = "0.6.1"
|
version = "0.6.1"
|
||||||
@ -3011,6 +3038,16 @@ dependencies = [
|
|||||||
"digest 0.10.7",
|
"digest 0.10.7",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "pbkdf2"
|
||||||
|
version = "0.12.2"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "f8ed6a7761f76e3b9f92dfb0a60a6a6477c61024b775147ff0973a02653abaf2"
|
||||||
|
dependencies = [
|
||||||
|
"digest 0.10.7",
|
||||||
|
"hmac 0.12.1",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "pem"
|
name = "pem"
|
||||||
version = "1.1.1"
|
version = "1.1.1"
|
@ -37,7 +37,9 @@ hyper-rustls = { version = "0.27", features = ["http2"] }
|
|||||||
base64 = "0.22"
|
base64 = "0.22"
|
||||||
lazy_static = "1.5"
|
lazy_static = "1.5"
|
||||||
# TODO: create a feature for testing, make occlum feature optional and added only if not compiling for testing
|
# TODO: create a feature for testing, make occlum feature optional and added only if not compiling for testing
|
||||||
detee-sgx = { git = "ssh://git@gitea.detee.cloud/sgx/detee-sgx", features = ["tonic", "occlum"] }
|
detee-sgx = { git = "ssh://git@gitea.detee.cloud/SGX/detee-sgx", features = ["tonic", "occlum", "sealing"] }
|
||||||
|
|
||||||
|
|
||||||
env_logger = "0.11"
|
env_logger = "0.11"
|
||||||
|
|
||||||
[build-dependencies]
|
[build-dependencies]
|
@ -18,6 +18,18 @@ exit # Feel free to exit the container
|
|||||||
|
|
||||||
To test the challenge you will need the SGX support.
|
To test the challenge you will need the SGX support.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# this will boot few containers containing servers that will replicate the private key generated by one of the nodes
|
||||||
|
./scripts/testnet.sh
|
||||||
|
# to interact with the servers list the ports
|
||||||
|
docker container list
|
||||||
|
# pick some node and
|
||||||
|
curl localhost:your-port
|
||||||
|
# node will respond with a homepage with more information
|
||||||
|
```
|
||||||
|
|
||||||
|
The node homepage source is also [here](./src/HOMEPAGE.md).
|
||||||
|
|
||||||
## Contributing to the challenge
|
## Contributing to the challenge
|
||||||
|
|
||||||
Don't forget to run `cargo clippy` and `cargo fmt` before submitting a PR.
|
Don't forget to run `cargo clippy` and `cargo fmt` before submitting a PR.
|
3119
legacy_challenge/Cargo.lock
generated
3119
legacy_challenge/Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@ -1,22 +0,0 @@
|
|||||||
[package]
|
|
||||||
name = "hacker-challenge"
|
|
||||||
version = "0.1.0"
|
|
||||||
edition = "2021"
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
actix-web = "4.9.0"
|
|
||||||
async-stream = "0.3.5"
|
|
||||||
dashmap = "6.0.1"
|
|
||||||
hex = "0.4.3"
|
|
||||||
prost = "0.13.1"
|
|
||||||
prost-types = "0.13.1"
|
|
||||||
rand = "0.8.5"
|
|
||||||
serde = { version = "1.0.210", features = ["derive"] }
|
|
||||||
solana-sdk = "2.0.9"
|
|
||||||
tabled = "0.16.0"
|
|
||||||
tokio = { version = "1.39.2", features = ["fs", "macros", "rt-multi-thread"] }
|
|
||||||
tokio-stream = { version = "0.1.15", features = ["sync"] }
|
|
||||||
tonic = "0.12.1"
|
|
||||||
|
|
||||||
[build-dependencies]
|
|
||||||
tonic-build = "0.12.1"
|
|
@ -1,9 +0,0 @@
|
|||||||
fn main() {
|
|
||||||
tonic_build::configure()
|
|
||||||
.build_server(true)
|
|
||||||
.compile(
|
|
||||||
&["proto/challenge.proto"],
|
|
||||||
&["proto"],
|
|
||||||
)
|
|
||||||
.unwrap_or_else(|e| panic!("Failed to compile protos {:?}", e));
|
|
||||||
}
|
|
@ -1,15 +0,0 @@
|
|||||||
syntax = "proto3";
|
|
||||||
package challenge;
|
|
||||||
|
|
||||||
import "google/protobuf/timestamp.proto";
|
|
||||||
|
|
||||||
message NodeUpdate {
|
|
||||||
string ip = 1;
|
|
||||||
string keypair = 2;
|
|
||||||
google.protobuf.Timestamp updated_at = 3;
|
|
||||||
bool public = 4;
|
|
||||||
}
|
|
||||||
|
|
||||||
service Update {
|
|
||||||
rpc GetUpdates (stream NodeUpdate) returns (stream NodeUpdate);
|
|
||||||
}
|
|
@ -1,3 +0,0 @@
|
|||||||
reorder_impl_items = true
|
|
||||||
use_small_heuristics = "Max"
|
|
||||||
merge_imports = true
|
|
@ -1,4 +0,0 @@
|
|||||||
FROM alpine:edge
|
|
||||||
COPY start.sh /start.sh
|
|
||||||
COPY hacker-challenge /hacker-challenge
|
|
||||||
ENTRYPOINT ["/start.sh"]
|
|
@ -1,50 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
cd "$(dirname "$0")"/..
|
|
||||||
|
|
||||||
containers=$(docker ps -a | grep -c 'hacker-challenge')
|
|
||||||
|
|
||||||
if (( "$containers" < 10 )); then
|
|
||||||
echo you are supposed to run this after you run ./scripts/testnet.sh
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
echo -n "Checking if containers connected to each other... "
|
|
||||||
for i in {2..12}
|
|
||||||
do
|
|
||||||
ip="172.17.0.${i}"
|
|
||||||
curl -s "${ip}:31372/memory" | grep -e true -e false -c | grep 52 > /dev/null ||
|
|
||||||
echo Container at ip ${ip} did not connect to all other containers.
|
|
||||||
done
|
|
||||||
echo OK!
|
|
||||||
|
|
||||||
echo -n "Checking if containers can sign data... "
|
|
||||||
for i in {2..52}
|
|
||||||
do
|
|
||||||
ip="172.17.0.${i}"
|
|
||||||
random_key=$(curl -s "${ip}:31372/memory" | grep true | tail -1 | awk '{ print $4 }')
|
|
||||||
message="ValyDoesNotLikeMyCodeSoHeIsSilentAboutIt"
|
|
||||||
mkdir -p .tmp
|
|
||||||
status=$(curl -sG \
|
|
||||||
-o .tmp/output -w "%{http_code}\n" \
|
|
||||||
--data-urlencode "pubkey=${random_key}" \
|
|
||||||
--data-urlencode "something=${message}" \
|
|
||||||
"172.17.0.${i}:31372/memory/sign")
|
|
||||||
|
|
||||||
if (( "$status" != "200" )); then
|
|
||||||
echo Container at ip ${ip} could not sign string with key ${random_key}
|
|
||||||
echo The status was $status
|
|
||||||
echo The error was $(cat .tmp/output)
|
|
||||||
echo Output of keys on 172.17.0.${i}:
|
|
||||||
curl "172.17.0.${i}:31372/memory"
|
|
||||||
father_of_key=$(curl "172.17.0.${i}:31372/memory" | grep ${random_key} | awk '{ print $2 }')
|
|
||||||
echo Output of keys on ${father_of_key}:
|
|
||||||
curl "${father_of_key}:31372/memory"
|
|
||||||
rm -rf .tmp
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
done
|
|
||||||
echo OK!
|
|
||||||
rm -rf .tmp
|
|
@ -1,9 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
|
|
||||||
# This script start the hacker challenge from within the docker container.
|
|
||||||
# It's only purpose is to help bootstrap a test network.
|
|
||||||
|
|
||||||
echo $INIT_NODES | tr ' ' '\n' > /detee_challenge_nodes
|
|
||||||
touch /detee_challenge_node_history
|
|
||||||
|
|
||||||
/hacker-challenge
|
|
@ -1,26 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
cd "$(dirname "$0")"/..
|
|
||||||
set -e
|
|
||||||
cargo build --release --target x86_64-unknown-linux-musl
|
|
||||||
rm -rf build
|
|
||||||
mkdir -p build
|
|
||||||
cp ./target/x86_64-unknown-linux-musl/release/hacker-challenge build/
|
|
||||||
cp scripts/start.sh build/
|
|
||||||
cp scripts/Dockerfile build/
|
|
||||||
cd build
|
|
||||||
|
|
||||||
docker build -t hacker-challenge:latest .
|
|
||||||
|
|
||||||
docker ps -a | grep 'hacker-challenge' | awk '{ print $NF }' | xargs docker rm -f || true
|
|
||||||
|
|
||||||
for i in {0..50}
|
|
||||||
do
|
|
||||||
docker run -d --name "hacker-challenge_$i" \
|
|
||||||
--env INIT_NODES="172.17.0.2 172.17.0.3 172.17.0.4" \
|
|
||||||
hacker-challenge:latest
|
|
||||||
done
|
|
||||||
|
|
||||||
cd ..
|
|
||||||
echo sleeping 3 seconds before starting tests...
|
|
||||||
sleep 10
|
|
||||||
source ./scripts/run_tests.sh
|
|
@ -1,439 +0,0 @@
|
|||||||
use crate::{grpc::challenge::NodeUpdate, persistence::FileManager};
|
|
||||||
use dashmap::{DashMap, DashSet};
|
|
||||||
use solana_sdk::{
|
|
||||||
pubkey::{ParsePubkeyError, Pubkey},
|
|
||||||
signature::{keypair::Keypair, Signer},
|
|
||||||
};
|
|
||||||
use std::{
|
|
||||||
str::FromStr,
|
|
||||||
time::{Duration, SystemTime, UNIX_EPOCH},
|
|
||||||
};
|
|
||||||
use tabled::{Table, Tabled};
|
|
||||||
|
|
||||||
#[derive(Clone, PartialEq, Debug)]
|
|
||||||
pub struct NodeInfo {
|
|
||||||
pub pubkey: Pubkey,
|
|
||||||
pub updated_at: SystemTime,
|
|
||||||
pub public: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Needs to be surrounded in an Arc.
|
|
||||||
pub struct Store {
|
|
||||||
nodes: DashMap<IP, NodeInfo>,
|
|
||||||
conns: DashSet<IP>,
|
|
||||||
keys: DashMap<Pubkey, Keypair>,
|
|
||||||
persistence: FileManager,
|
|
||||||
}
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub enum SigningError {
|
|
||||||
CorruptedKey,
|
|
||||||
KeyNotFound,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<hex::FromHexError> for SigningError {
|
|
||||||
fn from(_: hex::FromHexError) -> Self {
|
|
||||||
Self::CorruptedKey
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<ParsePubkeyError> for SigningError {
|
|
||||||
fn from(_: ParsePubkeyError) -> Self {
|
|
||||||
Self::CorruptedKey
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<std::array::TryFromSliceError> for SigningError {
|
|
||||||
fn from(_: std::array::TryFromSliceError) -> Self {
|
|
||||||
Self::CorruptedKey
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<std::io::Error> for SigningError {
|
|
||||||
fn from(_: std::io::Error) -> Self {
|
|
||||||
Self::CorruptedKey
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type IP = String;
|
|
||||||
|
|
||||||
impl std::fmt::Display for SigningError {
|
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
|
||||||
let error_message = match self {
|
|
||||||
SigningError::CorruptedKey => "The public key is corrupted",
|
|
||||||
SigningError::KeyNotFound => "Did not find the private key",
|
|
||||||
};
|
|
||||||
write!(f, "{}", error_message)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Store {
|
|
||||||
// app should exit if any error happens here so unwrap() is good
|
|
||||||
pub async fn init(path: &str) -> Self {
|
|
||||||
Self {
|
|
||||||
nodes: DashMap::new(),
|
|
||||||
keys: DashMap::new(),
|
|
||||||
conns: DashSet::new(),
|
|
||||||
persistence: FileManager::init(path).await.unwrap(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn add_conn(&self, ip: &str) {
|
|
||||||
self.conns.insert(ip.to_string());
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn delete_conn(&self, ip: &str) {
|
|
||||||
self.conns.remove(ip);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn tabled_memory_list(&self) -> String {
|
|
||||||
#[derive(Tabled)]
|
|
||||||
struct OutputRow {
|
|
||||||
ip: String,
|
|
||||||
pubkey: String,
|
|
||||||
age: u64,
|
|
||||||
public: bool,
|
|
||||||
}
|
|
||||||
let mut output = vec![];
|
|
||||||
for (ip, node_info) in self.nodes.iter().map(|n| (n.key().clone(), n.value().clone())) {
|
|
||||||
let pubkey = node_info.pubkey.to_string();
|
|
||||||
let age = SystemTime::now()
|
|
||||||
.duration_since(node_info.updated_at)
|
|
||||||
.unwrap_or(Duration::ZERO)
|
|
||||||
.as_secs();
|
|
||||||
let public = node_info.public;
|
|
||||||
output.push(OutputRow { ip, pubkey, age, public });
|
|
||||||
}
|
|
||||||
Table::new(output).to_string()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn tabled_disk_list(&self, page: u64) -> String {
|
|
||||||
let mut offset = page.wrapping_mul(20);
|
|
||||||
#[derive(Tabled)]
|
|
||||||
struct OutputRow {
|
|
||||||
id: u64,
|
|
||||||
ip: String,
|
|
||||||
pubkey: String,
|
|
||||||
timestamp: String,
|
|
||||||
}
|
|
||||||
let mut output = vec![];
|
|
||||||
for (ip, keypair, timestamp) in
|
|
||||||
self.persistence.get_page_of_20(offset).await.unwrap().iter().map(|n| {
|
|
||||||
(
|
|
||||||
n.ip.to_string(),
|
|
||||||
Keypair::from_bytes(&n.keypair.to_bytes()).unwrap(),
|
|
||||||
n.joined_at.duration_since(UNIX_EPOCH).unwrap().as_secs().to_string(),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
{
|
|
||||||
let id = offset;
|
|
||||||
let pubkey = keypair.pubkey().to_string();
|
|
||||||
output.push(OutputRow { id, ip, pubkey, timestamp });
|
|
||||||
offset += 1;
|
|
||||||
}
|
|
||||||
Table::new(output).to_string()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn disk_sign_message_with_key(
|
|
||||||
&self,
|
|
||||||
message: &str,
|
|
||||||
key_id: u64,
|
|
||||||
) -> Result<String, SigningError> {
|
|
||||||
let crate::persistence::Node { keypair, .. } =
|
|
||||||
self.persistence.get_node_by_id(key_id).await?;
|
|
||||||
let signature = keypair.sign_message(message.as_bytes());
|
|
||||||
Ok(signature.to_string())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn sign_message_with_key(
|
|
||||||
&self,
|
|
||||||
message: &str,
|
|
||||||
pubkey: &str,
|
|
||||||
) -> Result<String, SigningError> {
|
|
||||||
let pubkey = Pubkey::from_str(&pubkey)?;
|
|
||||||
|
|
||||||
let keypair = match self.get_keypair(&pubkey).await {
|
|
||||||
Some(k) => k,
|
|
||||||
None => return Err(SigningError::KeyNotFound),
|
|
||||||
};
|
|
||||||
let signature = keypair.sign_message(message.as_bytes());
|
|
||||||
Ok(signature.to_string())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn add_key(&self, pubkey: Pubkey, keypair: Keypair) {
|
|
||||||
self.keys.insert(pubkey, keypair);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn remove_key(&self, pubkey: &Pubkey) {
|
|
||||||
self.keys.remove(pubkey);
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn get_keypair(&self, pubkey: &Pubkey) -> Option<Keypair> {
|
|
||||||
self.keys.get(pubkey).map(|k| Keypair::from_bytes(&k.to_bytes()).unwrap())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// This returns true if NodeInfo got modified.
|
|
||||||
///
|
|
||||||
/// On a side note, there are two types of people in this world:
|
|
||||||
/// 1. Those that can extrapolate... WAT?
|
|
||||||
pub async fn process_node_update(&self, node: NodeUpdate) -> bool {
|
|
||||||
// solana-sdk is great; it panics if the base58 string is corrupted
|
|
||||||
// we wrap this in catch_unwind() to make sure it doesn't crash the app
|
|
||||||
let keypair =
|
|
||||||
match std::panic::catch_unwind(|| Keypair::from_base58_string(&node.keypair.clone())) {
|
|
||||||
Ok(k) => k,
|
|
||||||
Err(_) => return false,
|
|
||||||
};
|
|
||||||
let pubkey = keypair.pubkey();
|
|
||||||
|
|
||||||
// TODO: check this suggestion
|
|
||||||
// let updated_at_std = node
|
|
||||||
// .updated_at
|
|
||||||
// .map(SystemTime::try_from)
|
|
||||||
// .unwrap_or(Ok(SystemTime::now()))
|
|
||||||
// .unwrap_or(SystemTime::now());
|
|
||||||
let updated_at_std: SystemTime = match node.updated_at {
|
|
||||||
Some(ts) => {
|
|
||||||
let duration = Duration::new(ts.seconds as u64, ts.nanos as u32);
|
|
||||||
UNIX_EPOCH.checked_add(duration).unwrap_or(SystemTime::now())
|
|
||||||
}
|
|
||||||
None => SystemTime::now(),
|
|
||||||
};
|
|
||||||
|
|
||||||
self.add_key(pubkey, Keypair::from_bytes(&keypair.to_bytes()).unwrap()).await;
|
|
||||||
let node_info = NodeInfo { pubkey, updated_at: updated_at_std, public: node.public };
|
|
||||||
if let Some(mut old_node_info) = self.update_node(node.ip.clone(), node_info.clone()).await
|
|
||||||
{
|
|
||||||
if !node_info.public {
|
|
||||||
old_node_info.public = node_info.public;
|
|
||||||
}
|
|
||||||
match old_node_info.ne(&node_info) {
|
|
||||||
true => {
|
|
||||||
self.remove_key(&old_node_info.pubkey).await;
|
|
||||||
true
|
|
||||||
}
|
|
||||||
false => false,
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if let Ok(persistence_node) = (node.ip.as_str(), keypair, updated_at_std).try_into() {
|
|
||||||
if let Err(e) = self.persistence.append_node(persistence_node).await {
|
|
||||||
println!("Could not save data to disk: {e}.");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// returns old pubkey if node got updated
|
|
||||||
async fn update_node(&self, ip: String, info: NodeInfo) -> Option<NodeInfo> {
|
|
||||||
if let Some(old_node) = self.nodes.get(&ip) {
|
|
||||||
if old_node.updated_at >= info.updated_at {
|
|
||||||
return Some(info);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
self.nodes.insert(ip, info.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn remove_inactive_nodes(&self) {
|
|
||||||
let mut dangling_pubkeys = Vec::new();
|
|
||||||
self.nodes.retain(|_, v| {
|
|
||||||
let age =
|
|
||||||
SystemTime::now().duration_since(v.updated_at).unwrap_or(Duration::ZERO).as_secs();
|
|
||||||
if age > 120 {
|
|
||||||
dangling_pubkeys.push(v.pubkey.clone());
|
|
||||||
false
|
|
||||||
} else {
|
|
||||||
true
|
|
||||||
}
|
|
||||||
});
|
|
||||||
for pubkey in dangling_pubkeys.iter() {
|
|
||||||
self.keys.remove(pubkey);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn get_localhost(&self) -> NodeUpdate {
|
|
||||||
// TODO trigger reset_localhost_keys on error instead of expects
|
|
||||||
let node = self.nodes.get("localhost").expect("no localhost node");
|
|
||||||
let keypair = self.keys.get(&node.pubkey).expect("no localhost key");
|
|
||||||
NodeUpdate {
|
|
||||||
ip: "localhost".to_string(),
|
|
||||||
keypair: keypair.to_base58_string(),
|
|
||||||
updated_at: Some(prost_types::Timestamp::from(node.value().updated_at)),
|
|
||||||
public: false,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// refreshes the keys of the node and returns a protobuf for the network
|
|
||||||
pub async fn reset_localhost_keys(&self) -> NodeUpdate {
|
|
||||||
let keypair_raw = Keypair::new();
|
|
||||||
let keypair = keypair_raw.to_base58_string();
|
|
||||||
let pubkey = keypair_raw.pubkey();
|
|
||||||
let ip = "localhost".to_string();
|
|
||||||
let updated_at = SystemTime::now();
|
|
||||||
let public = false;
|
|
||||||
self.add_key(pubkey, keypair_raw).await;
|
|
||||||
if let Some(old_data) =
|
|
||||||
self.update_node(ip.clone(), NodeInfo { pubkey, updated_at, public }).await
|
|
||||||
{
|
|
||||||
self.remove_key(&old_data.pubkey).await;
|
|
||||||
};
|
|
||||||
let updated_at = Some(prost_types::Timestamp::from(updated_at));
|
|
||||||
NodeUpdate { ip, keypair, updated_at, public }
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn get_full_node_list(&self) -> Vec<NodeUpdate> {
|
|
||||||
self.nodes
|
|
||||||
.iter()
|
|
||||||
.filter_map(|node| {
|
|
||||||
self.keys.get(&node.value().pubkey).map(|keypair| NodeUpdate {
|
|
||||||
ip: node.key().to_string(),
|
|
||||||
keypair: keypair.to_base58_string(),
|
|
||||||
updated_at: Some(prost_types::Timestamp::from(node.value().updated_at)),
|
|
||||||
public: node.value().public,
|
|
||||||
})
|
|
||||||
})
|
|
||||||
.collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
// returns a random node that does not have an active connection
|
|
||||||
pub async fn get_random_node(&self) -> Option<String> {
|
|
||||||
use rand::{rngs::OsRng, RngCore};
|
|
||||||
let len = self.nodes.len();
|
|
||||||
if len == 0 {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
let skip = OsRng.next_u64().try_into().unwrap_or(0) % len;
|
|
||||||
self.nodes
|
|
||||||
.iter()
|
|
||||||
.map(|n| n.key().clone())
|
|
||||||
.cycle()
|
|
||||||
.skip(skip)
|
|
||||||
.find(|k| !self.conns.contains(k))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use std::time::{SystemTime, UNIX_EPOCH};
|
|
||||||
|
|
||||||
use dashmap::DashMap;
|
|
||||||
use tokio::{fs::File, io::AsyncWriteExt};
|
|
||||||
|
|
||||||
use super::*;
|
|
||||||
|
|
||||||
async fn setup_file_manager(function: &str) -> std::io::Result<FileManager> {
|
|
||||||
let _ = tokio::fs::create_dir_all(".tmp").await;
|
|
||||||
let path = ".tmp/ds_".to_string() + function;
|
|
||||||
let mut file = File::create(path.clone()).await?;
|
|
||||||
file.flush().await?;
|
|
||||||
drop(file);
|
|
||||||
FileManager::init(&path).await
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn node_info_creation() {
|
|
||||||
let keypair = Keypair::new();
|
|
||||||
let node_info =
|
|
||||||
NodeInfo { pubkey: keypair.pubkey(), updated_at: SystemTime::now(), public: true };
|
|
||||||
|
|
||||||
assert_eq!(node_info.pubkey, keypair.pubkey());
|
|
||||||
assert!(node_info.updated_at >= UNIX_EPOCH);
|
|
||||||
assert!(node_info.public);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn store_creation() {
|
|
||||||
let store = Store {
|
|
||||||
nodes: DashMap::new(),
|
|
||||||
conns: DashSet::new(),
|
|
||||||
keys: DashMap::new(),
|
|
||||||
persistence: setup_file_manager("store_creation").await.unwrap(),
|
|
||||||
};
|
|
||||||
|
|
||||||
assert!(store.nodes.is_empty());
|
|
||||||
assert!(store.conns.is_empty());
|
|
||||||
assert!(store.keys.is_empty());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn signing_error_from_hex_error() {
|
|
||||||
let hex_error: Result<(), hex::FromHexError> =
|
|
||||||
Err(hex::FromHexError::InvalidHexCharacter { c: 'a', index: 0 });
|
|
||||||
let signing_error: SigningError = hex_error.unwrap_err().into();
|
|
||||||
|
|
||||||
match signing_error {
|
|
||||||
SigningError::CorruptedKey => assert!(true),
|
|
||||||
_ => assert!(false, "Expected SigningError::CorruptedKey"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn sign_message_with_key() {
|
|
||||||
let keypair = Keypair::new();
|
|
||||||
let pubkey_string = keypair.pubkey().to_string();
|
|
||||||
let store = Store {
|
|
||||||
nodes: DashMap::new(),
|
|
||||||
conns: DashSet::new(),
|
|
||||||
keys: DashMap::new(),
|
|
||||||
persistence: setup_file_manager("sign_message_with_key").await.unwrap(),
|
|
||||||
};
|
|
||||||
store.keys.insert(keypair.pubkey(), keypair);
|
|
||||||
|
|
||||||
let message = "Test message";
|
|
||||||
let result = store.sign_message_with_key(message, &pubkey_string).await;
|
|
||||||
|
|
||||||
assert!(result.is_ok());
|
|
||||||
if let Ok(signature) = result {
|
|
||||||
assert!(!signature.is_empty());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn process_node_update() {
|
|
||||||
let keypair = Keypair::new();
|
|
||||||
let node_update = NodeUpdate {
|
|
||||||
ip: "127.0.0.1".to_string(),
|
|
||||||
keypair: keypair.to_base58_string(),
|
|
||||||
updated_at: Some(prost_types::Timestamp {
|
|
||||||
seconds: SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs() as i64,
|
|
||||||
nanos: 0,
|
|
||||||
}),
|
|
||||||
public: true,
|
|
||||||
};
|
|
||||||
|
|
||||||
let store = Store {
|
|
||||||
nodes: DashMap::new(),
|
|
||||||
conns: DashSet::new(),
|
|
||||||
keys: DashMap::new(),
|
|
||||||
persistence: setup_file_manager("process_node_update").await.unwrap(),
|
|
||||||
};
|
|
||||||
|
|
||||||
let result = store.process_node_update(node_update).await;
|
|
||||||
|
|
||||||
assert!(result);
|
|
||||||
assert!(store.nodes.contains_key("127.0.0.1"));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn get_full_node_list() {
|
|
||||||
let keypair = Keypair::new();
|
|
||||||
let node_info =
|
|
||||||
NodeInfo { pubkey: keypair.pubkey(), updated_at: SystemTime::now(), public: true };
|
|
||||||
|
|
||||||
let store = Store {
|
|
||||||
nodes: DashMap::new(),
|
|
||||||
conns: DashSet::new(),
|
|
||||||
keys: DashMap::new(),
|
|
||||||
persistence: setup_file_manager("get_full_node_list").await.unwrap(),
|
|
||||||
};
|
|
||||||
|
|
||||||
store.nodes.insert("127.0.0.1".to_string(), node_info);
|
|
||||||
store.keys.insert(keypair.pubkey(), Keypair::from_bytes(&keypair.to_bytes()).unwrap());
|
|
||||||
|
|
||||||
let node_list = store.get_full_node_list().await;
|
|
||||||
|
|
||||||
assert_eq!(node_list.len(), 1);
|
|
||||||
assert_eq!(node_list[0].ip, "127.0.0.1");
|
|
||||||
assert_eq!(node_list[0].keypair, keypair.to_base58_string());
|
|
||||||
assert!(node_list[0].public);
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,75 +0,0 @@
|
|||||||
use super::challenge::NodeUpdate;
|
|
||||||
use crate::datastore::Store;
|
|
||||||
use crate::grpc::challenge::update_client::UpdateClient;
|
|
||||||
use std::sync::Arc;
|
|
||||||
use tokio::sync::broadcast::Sender;
|
|
||||||
use tokio::time::{sleep, Duration};
|
|
||||||
use tokio_stream::wrappers::BroadcastStream;
|
|
||||||
use tokio_stream::StreamExt;
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub struct ConnManager {
|
|
||||||
ds: Arc<Store>,
|
|
||||||
tx: Sender<NodeUpdate>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ConnManager {
|
|
||||||
pub fn init(ds: Arc<Store>, tx: Sender<NodeUpdate>) -> Self {
|
|
||||||
Self { ds, tx }
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn start_with_node(self, node_ip: String) {
|
|
||||||
self.connect_wrapper(node_ip).await;
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn start(self) {
|
|
||||||
loop {
|
|
||||||
if let Some(node) = self.ds.get_random_node().await {
|
|
||||||
if node != "localhost" {
|
|
||||||
self.connect_wrapper(node.clone()).await;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
sleep(Duration::from_secs(3)).await;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn connect_wrapper(&self, node_ip: String) {
|
|
||||||
let ds = self.ds.clone();
|
|
||||||
ds.add_conn(&node_ip).await;
|
|
||||||
if let Err(e) = self.connect(node_ip.clone()).await {
|
|
||||||
println!("Client connection for {node_ip} failed: {e:?}");
|
|
||||||
}
|
|
||||||
ds.delete_conn(&node_ip).await;
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn connect(&self, node_ip: String) -> Result<(), Box<dyn std::error::Error>> {
|
|
||||||
println!("Connecting to {node_ip}...");
|
|
||||||
let mut client = UpdateClient::connect(format!("http://{node_ip}:31373")).await?;
|
|
||||||
|
|
||||||
let rx = self.tx.subscribe();
|
|
||||||
let rx_stream = BroadcastStream::new(rx).filter_map(|n| n.ok());
|
|
||||||
let response = client.get_updates(rx_stream).await?;
|
|
||||||
let mut resp_stream = response.into_inner();
|
|
||||||
|
|
||||||
let _ = self.tx.send(self.ds.get_localhost().await);
|
|
||||||
|
|
||||||
while let Some(mut update) = resp_stream.message().await? {
|
|
||||||
// "localhost" IPs need to be changed to the real IP of the counterpart
|
|
||||||
if update.ip == "localhost" {
|
|
||||||
update.ip = node_ip.clone();
|
|
||||||
// since we are connecting TO this server, we have a guarantee that this
|
|
||||||
// server is not behind NAT, so we can set it public
|
|
||||||
update.public = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
// update the entire network in case the information is new
|
|
||||||
if self.ds.process_node_update(update.clone()).await {
|
|
||||||
if let Err(_) = self.tx.send(update.clone()) {
|
|
||||||
println!("tokio broadcast receivers had an issue consuming the channel");
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,6 +0,0 @@
|
|||||||
pub mod server;
|
|
||||||
pub mod client;
|
|
||||||
|
|
||||||
pub mod challenge {
|
|
||||||
tonic::include_proto!("challenge");
|
|
||||||
}
|
|
@ -1,92 +0,0 @@
|
|||||||
#![allow(dead_code)]
|
|
||||||
|
|
||||||
use super::challenge::update_server::Update;
|
|
||||||
use super::challenge::update_server::UpdateServer;
|
|
||||||
use super::challenge::NodeUpdate;
|
|
||||||
use crate::datastore::Store;
|
|
||||||
use std::pin::Pin;
|
|
||||||
use std::sync::Arc;
|
|
||||||
use tokio::sync::broadcast::Sender;
|
|
||||||
use tokio_stream::{Stream, StreamExt};
|
|
||||||
use tonic::{transport::Server, Request, Response, Status, Streaming};
|
|
||||||
|
|
||||||
pub struct MyServer {
|
|
||||||
ds: Arc<Store>,
|
|
||||||
tx: Sender<NodeUpdate>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl MyServer {
|
|
||||||
pub fn init(ds: Arc<Store>, tx: Sender<NodeUpdate>) -> Self {
|
|
||||||
Self { ds, tx }
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn start(self) {
|
|
||||||
let addr = "0.0.0.0:31373".parse().unwrap();
|
|
||||||
if let Err(e) = Server::builder()
|
|
||||||
.add_service(UpdateServer::new(self))
|
|
||||||
.serve(addr)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
println!("gRPC server failed: {e:?}");
|
|
||||||
};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tonic::async_trait]
|
|
||||||
impl Update for MyServer {
|
|
||||||
type GetUpdatesStream = Pin<Box<dyn Stream<Item = Result<NodeUpdate, Status>> + Send>>;
|
|
||||||
|
|
||||||
async fn get_updates(
|
|
||||||
&self,
|
|
||||||
req: Request<Streaming<NodeUpdate>>,
|
|
||||||
) -> Result<Response<Self::GetUpdatesStream>, Status> {
|
|
||||||
let remote_ip = req.remote_addr().unwrap().ip().to_string();
|
|
||||||
let tx = self.tx.clone();
|
|
||||||
let mut rx = self.tx.subscribe();
|
|
||||||
let mut inbound = req.into_inner();
|
|
||||||
let ds = self.ds.clone();
|
|
||||||
|
|
||||||
let stream = async_stream::stream! {
|
|
||||||
let full_update_list = ds.get_full_node_list().await;
|
|
||||||
for update in full_update_list {
|
|
||||||
yield Ok(update);
|
|
||||||
}
|
|
||||||
|
|
||||||
loop {
|
|
||||||
tokio::select! {
|
|
||||||
Some(msg) = inbound.next() => {
|
|
||||||
match msg {
|
|
||||||
Ok(mut update) => {
|
|
||||||
if update.ip == "localhost" {
|
|
||||||
update.ip = remote_ip.clone();
|
|
||||||
// note that we don't set this node online,
|
|
||||||
// as it can be behind NAT
|
|
||||||
}
|
|
||||||
if update.ip != "127.0.0.1" && ds.process_node_update(update.clone()).await {
|
|
||||||
if let Err(_) = tx.send(update.clone()) {
|
|
||||||
println!("tokio broadcast receivers had an issue consuming the channel");
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
Err(e) => {
|
|
||||||
yield Err(Status::internal(format!("Error receiving client stream: {}", e)));
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(update) = rx.recv() => {
|
|
||||||
yield Ok(update);
|
|
||||||
// disconnect client if too many connections are active
|
|
||||||
if tx.receiver_count() > 9 {
|
|
||||||
yield Err(Status::internal("Already have too many clients. Connect to another server."));
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(Response::new(Box::pin(stream) as Self::GetUpdatesStream))
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,143 +0,0 @@
|
|||||||
use crate::datastore::{SigningError, Store};
|
|
||||||
use actix_web::error::ResponseError;
|
|
||||||
use actix_web::http::StatusCode;
|
|
||||||
use actix_web::{web, App, HttpResponse, HttpServer, Responder, Result};
|
|
||||||
use serde::Deserialize;
|
|
||||||
use std::fmt;
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
const HOMEPAGE: &str = r#"Welcome, beloved hacker!
|
|
||||||
|
|
||||||
I am a node that is part of the DeTEE Hacker Challenge network.
|
|
||||||
I will allow you to sign messages using private ed25519 keys that I have in memory and on disk.
|
|
||||||
If you want to run your own instance of this enclave, go to https://detee.cloud/hacker-challenge
|
|
||||||
|
|
||||||
To access keys that are saved in memory, navigate to /memory. To sign something using a key that
|
|
||||||
is in memory, curl /memory/sign with the params "pubkey" and "something". Example:
|
|
||||||
curl -G \
|
|
||||||
--data-urlencode "pubkey=THE_PUBKEY_IN_HEX_FORMAT_HERE" \
|
|
||||||
--data-urlencode "something=YOUR_MESSAGE_HERE" \
|
|
||||||
'IP_OF_THE_NODE:31372/memory/sign'
|
|
||||||
|
|
||||||
Each node publishes a new key to the cluster every 60 seconds. Old keys are deleted.
|
|
||||||
The first key that each node publiches when joining the network is permanently saved to disk.
|
|
||||||
To access keys that are saved on disk, navigate to /disk. Disk entries are paginated.
|
|
||||||
You can navigate to a specific page by using get params. Example: https://{ip}/disk?page={number}.
|
|
||||||
To sign a random message using a key from disk, use /disk/sign and send the key id as a get param:
|
|
||||||
curl -G \
|
|
||||||
--data-urlencode "key=1337" \
|
|
||||||
--data-urlencode "something=YOUR_MESSAGE_HERE" \
|
|
||||||
'IP_OF_THE_NODE:31372/disk/sign'
|
|
||||||
|
|
||||||
Your goal is to obtain a public key by any means necessary.
|
|
||||||
If you manage to steal a key, contact us at https://detee.cloud
|
|
||||||
|
|
||||||
Good luck!
|
|
||||||
"#;
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
enum HTTPError {
|
|
||||||
NoKeyID,
|
|
||||||
NoPubkey,
|
|
||||||
NoMessage,
|
|
||||||
Store(SigningError),
|
|
||||||
}
|
|
||||||
|
|
||||||
impl fmt::Display for HTTPError {
|
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
|
||||||
match *self {
|
|
||||||
HTTPError::NoKeyID => write!(f, "Key ID must be specified as a query param"),
|
|
||||||
HTTPError::NoPubkey => write!(f, "Pubkey must be specified as a query param"),
|
|
||||||
HTTPError::NoMessage => write!(f, "Something must be specified as a query param"),
|
|
||||||
HTTPError::Store(ref err) => write!(f, "{}", err),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ResponseError for HTTPError {
|
|
||||||
fn status_code(&self) -> StatusCode {
|
|
||||||
StatusCode::BAD_REQUEST
|
|
||||||
}
|
|
||||||
|
|
||||||
fn error_response(&self) -> HttpResponse {
|
|
||||||
HttpResponse::BadRequest().body(self.to_string())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
|
||||||
struct SignQuery {
|
|
||||||
pubkey: Option<String>,
|
|
||||||
something: Option<String>,
|
|
||||||
key: Option<u64>,
|
|
||||||
page: Option<u64>,
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn homepage() -> impl Responder {
|
|
||||||
HttpResponse::Ok().body(HOMEPAGE)
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn memory_list(store: web::Data<Arc<Store>>) -> impl Responder {
|
|
||||||
let ds = store.get_ref();
|
|
||||||
let list = ds.tabled_memory_list().await; // TODO: make paginated
|
|
||||||
HttpResponse::Ok().body(list)
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn memory_sign(
|
|
||||||
store: web::Data<Arc<Store>>,
|
|
||||||
req: web::Query<SignQuery>,
|
|
||||||
) -> Result<String, HTTPError> {
|
|
||||||
let ds = store.get_ref();
|
|
||||||
|
|
||||||
let pubkey = req.pubkey.clone().ok_or(HTTPError::NoPubkey)?;
|
|
||||||
let something = req.something.clone().ok_or(HTTPError::NoMessage)?;
|
|
||||||
|
|
||||||
match ds.sign_message_with_key(&something, &pubkey).await {
|
|
||||||
Ok(s) => Ok(s),
|
|
||||||
Err(e) => Err(HTTPError::Store(e)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn disk_list(store: web::Data<Arc<Store>>, req: web::Query<SignQuery>) -> impl Responder {
|
|
||||||
let ds = store.get_ref();
|
|
||||||
let page = req.page.unwrap_or(0);
|
|
||||||
let list = ds.tabled_disk_list(page).await;
|
|
||||||
HttpResponse::Ok().body(list)
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn disk_sign(
|
|
||||||
store: web::Data<Arc<Store>>,
|
|
||||||
req: web::Query<SignQuery>,
|
|
||||||
) -> Result<String, HTTPError> {
|
|
||||||
let ds = store.get_ref();
|
|
||||||
|
|
||||||
let key = req.key.ok_or(HTTPError::NoKeyID)?;
|
|
||||||
let something = req.something.clone().ok_or(HTTPError::NoMessage)?;
|
|
||||||
|
|
||||||
match ds.disk_sign_message_with_key(&something, key).await {
|
|
||||||
Ok(s) => Ok(s),
|
|
||||||
Err(e) => Err(HTTPError::Store(e)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn init(ds: Arc<Store>) {
|
|
||||||
HttpServer::new(move || {
|
|
||||||
App::new()
|
|
||||||
.app_data(web::Data::new(ds.clone()))
|
|
||||||
.route("/", web::get().to(homepage))
|
|
||||||
.service(
|
|
||||||
web::scope("/memory")
|
|
||||||
.route("", web::get().to(memory_list))
|
|
||||||
.route("/sign", web::get().to(memory_sign)),
|
|
||||||
)
|
|
||||||
.service(
|
|
||||||
web::scope("/disk")
|
|
||||||
.route("", web::get().to(disk_list))
|
|
||||||
.route("/sign", web::get().to(disk_sign)),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.bind("0.0.0.0:31372")
|
|
||||||
.unwrap()
|
|
||||||
.run()
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
}
|
|
@ -1,63 +0,0 @@
|
|||||||
mod persistence;
|
|
||||||
mod datastore;
|
|
||||||
use crate::grpc::challenge::NodeUpdate;
|
|
||||||
use tokio::sync::broadcast::Sender;
|
|
||||||
use tokio::task::JoinSet;
|
|
||||||
mod grpc;
|
|
||||||
mod http_server;
|
|
||||||
use crate::datastore::Store;
|
|
||||||
use std::fs::File;
|
|
||||||
use std::io::{BufRead, BufReader};
|
|
||||||
use std::sync::Arc;
|
|
||||||
use tokio::sync::broadcast;
|
|
||||||
use tokio::time::{sleep, Duration};
|
|
||||||
|
|
||||||
const INIT_NODES: &str = "detee_challenge_nodes";
|
|
||||||
const DISK_PERSISTENCE: &str = "detee_challenge_node_history";
|
|
||||||
|
|
||||||
async fn cycle_keys(ds: Arc<Store>, tx: Sender<NodeUpdate>) {
|
|
||||||
loop {
|
|
||||||
sleep(Duration::from_secs(60)).await;
|
|
||||||
let _ = tx.send(ds.reset_localhost_keys().await);
|
|
||||||
ds.remove_inactive_nodes().await;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::main]
|
|
||||||
async fn main() {
|
|
||||||
let ds: Arc<Store> = Arc::new(Store::init(DISK_PERSISTENCE).await);
|
|
||||||
ds.reset_localhost_keys().await;
|
|
||||||
let (tx, mut _rx) = broadcast::channel(500);
|
|
||||||
|
|
||||||
let mut long_term_tasks = JoinSet::new();
|
|
||||||
let mut init_tasks = JoinSet::new();
|
|
||||||
|
|
||||||
long_term_tasks.spawn(cycle_keys(ds.clone(), tx.clone()));
|
|
||||||
long_term_tasks.spawn(http_server::init(ds.clone()));
|
|
||||||
long_term_tasks.spawn(grpc::server::MyServer::init(ds.clone(), tx.clone()).start());
|
|
||||||
|
|
||||||
let input = File::open(INIT_NODES).unwrap();
|
|
||||||
let buffered = BufReader::new(input);
|
|
||||||
for line in buffered.lines() {
|
|
||||||
init_tasks.spawn(
|
|
||||||
grpc::client::ConnManager::init(ds.clone(), tx.clone()).start_with_node(line.unwrap()),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut connection_count = 0;
|
|
||||||
while init_tasks.join_next().await.is_some() {
|
|
||||||
if connection_count < 3 {
|
|
||||||
long_term_tasks.spawn(grpc::client::ConnManager::init(ds.clone(), tx.clone()).start());
|
|
||||||
connection_count += 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
while connection_count < 3 {
|
|
||||||
long_term_tasks.spawn(grpc::client::ConnManager::init(ds.clone(), tx.clone()).start());
|
|
||||||
connection_count += 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
// exit no matter which task finished
|
|
||||||
long_term_tasks.join_next().await;
|
|
||||||
println!("Shutting down...");
|
|
||||||
}
|
|
@ -1,344 +0,0 @@
|
|||||||
#![allow(dead_code)]
|
|
||||||
use solana_sdk::signer::keypair::Keypair;
|
|
||||||
use std::{
|
|
||||||
net::{AddrParseError, Ipv4Addr},
|
|
||||||
time::{Duration, SystemTime, UNIX_EPOCH},
|
|
||||||
};
|
|
||||||
use tokio::{
|
|
||||||
fs::File,
|
|
||||||
io::{AsyncReadExt, AsyncSeekExt, AsyncWriteExt, SeekFrom},
|
|
||||||
sync::Mutex,
|
|
||||||
};
|
|
||||||
|
|
||||||
const DATA_SIZE: usize = 76;
|
|
||||||
const KEYPAIR_LENGTH: usize = 64;
|
|
||||||
|
|
||||||
pub enum Error {
|
|
||||||
CorruptedIP,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<AddrParseError> for Error {
|
|
||||||
fn from(_: AddrParseError) -> Self {
|
|
||||||
Error::CorruptedIP
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct Node {
|
|
||||||
pub ip: Ipv4Addr,
|
|
||||||
pub keypair: Keypair,
|
|
||||||
pub joined_at: SystemTime,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Clone for Node {
|
|
||||||
fn clone(&self) -> Self {
|
|
||||||
let cloned_keypair = Keypair::from_bytes(&self.keypair.to_bytes()).unwrap();
|
|
||||||
|
|
||||||
Node { ip: self.ip, keypair: cloned_keypair, joined_at: self.joined_at }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TryFrom<(&str, Keypair, SystemTime)> for Node {
|
|
||||||
type Error = Error;
|
|
||||||
|
|
||||||
fn try_from(value: (&str, Keypair, SystemTime)) -> Result<Self, Self::Error> {
|
|
||||||
Ok(Self { ip: value.0.parse()?, keypair: value.1, joined_at: value.2 })
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Node {
|
|
||||||
fn ip_as_string(&self) -> String {
|
|
||||||
self.ip.to_string()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn signing_key(&self) -> Keypair {
|
|
||||||
Keypair::from_bytes(&self.keypair.to_bytes()).unwrap()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn to_bytes(self) -> [u8; DATA_SIZE] {
|
|
||||||
let mut result = [0; DATA_SIZE];
|
|
||||||
result[0..4].copy_from_slice(&self.ip.octets());
|
|
||||||
result[4..68].copy_from_slice(&self.keypair.to_bytes());
|
|
||||||
result[68..DATA_SIZE].copy_from_slice(
|
|
||||||
&self.joined_at.duration_since(UNIX_EPOCH).unwrap().as_secs().to_le_bytes(),
|
|
||||||
);
|
|
||||||
result
|
|
||||||
}
|
|
||||||
|
|
||||||
fn from_bytes(bytes: [u8; DATA_SIZE]) -> Self {
|
|
||||||
let ip: [u8; 4] = bytes[0..4].try_into().unwrap();
|
|
||||||
let ip: Ipv4Addr = ip.into();
|
|
||||||
let keypair: [u8; KEYPAIR_LENGTH] = bytes[4..68].try_into().unwrap();
|
|
||||||
let keypair: Keypair = Keypair::from_bytes(&keypair).unwrap();
|
|
||||||
let joined_at: [u8; 8] = bytes[68..DATA_SIZE].try_into().unwrap();
|
|
||||||
let joined_at: u64 = u64::from_le_bytes(joined_at);
|
|
||||||
let joined_at = SystemTime::UNIX_EPOCH + Duration::from_secs(joined_at);
|
|
||||||
Self { ip, keypair, joined_at }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct FileManager {
|
|
||||||
file: Mutex<File>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl FileManager {
|
|
||||||
pub async fn init(path: &str) -> std::io::Result<Self> {
|
|
||||||
let file = File::options().read(true).append(true).open(path).await?;
|
|
||||||
Ok(Self { file: Mutex::new(file) })
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn append_node(&self, node: Node) -> std::io::Result<()> {
|
|
||||||
let mut file = self.file.lock().await;
|
|
||||||
file.seek(SeekFrom::End(0)).await?;
|
|
||||||
file.write_all(&node.to_bytes()).await?;
|
|
||||||
file.flush().await?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn get_node_by_id(&self, id: u64) -> std::io::Result<Node> {
|
|
||||||
let mut file = self.file.lock().await;
|
|
||||||
file.seek(SeekFrom::Start(id.wrapping_mul(DATA_SIZE.try_into().unwrap_or(0)))).await?;
|
|
||||||
let mut node_bytes = [0; DATA_SIZE];
|
|
||||||
file.read_exact(&mut node_bytes).await?;
|
|
||||||
Ok(Node::from_bytes(node_bytes))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns 20 nodes from the disk.
|
|
||||||
/// Specify offset (the number of nodes to skip).
|
|
||||||
pub async fn get_page_of_20(&self, offset: u64) -> std::io::Result<Vec<Node>> {
|
|
||||||
let mut file = self.file.lock().await;
|
|
||||||
file.seek(SeekFrom::Start(offset.wrapping_mul(DATA_SIZE.try_into().unwrap_or(0)))).await?;
|
|
||||||
let mut nodes = Vec::new();
|
|
||||||
let mut count = 0;
|
|
||||||
loop {
|
|
||||||
let mut node_bytes = [0; DATA_SIZE];
|
|
||||||
if let Err(_) = file.read_exact(&mut node_bytes).await {
|
|
||||||
break;
|
|
||||||
};
|
|
||||||
nodes.push(Node::from_bytes(node_bytes));
|
|
||||||
count += 1;
|
|
||||||
if count == 20 {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(nodes)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
|
|
||||||
use super::*;
|
|
||||||
use rand::Rng;
|
|
||||||
use std::io::Result;
|
|
||||||
use tokio::{fs::remove_file, io::AsyncWriteExt};
|
|
||||||
|
|
||||||
const TEST_FILE_PREFIX: &str = ".tmp/test_";
|
|
||||||
fn get_test_file_name(function: &str) -> String {
|
|
||||||
TEST_FILE_PREFIX.to_string() + function
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn setup_test_file(function: &str) -> Result<FileManager> {
|
|
||||||
let _ = tokio::fs::create_dir_all(".tmp").await;
|
|
||||||
let path = get_test_file_name(function);
|
|
||||||
let mut file = File::create(path.clone()).await?;
|
|
||||||
file.flush().await?;
|
|
||||||
drop(file);
|
|
||||||
FileManager::init(&path).await
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn node_round_trip() {
|
|
||||||
let keypair = Keypair::new();
|
|
||||||
|
|
||||||
let original_node = Node {
|
|
||||||
ip: "192.168.1.1".parse().unwrap(),
|
|
||||||
keypair: Keypair::from_bytes(&keypair.to_bytes()).unwrap(),
|
|
||||||
joined_at: SystemTime::now(),
|
|
||||||
};
|
|
||||||
|
|
||||||
let node_bytes = original_node.clone().to_bytes();
|
|
||||||
let restored_node = Node::from_bytes(node_bytes);
|
|
||||||
|
|
||||||
assert_eq!(original_node.ip_as_string(), restored_node.ip_as_string());
|
|
||||||
assert_eq!(original_node.keypair.to_bytes(), restored_node.keypair.to_bytes());
|
|
||||||
assert_eq!(
|
|
||||||
original_node.joined_at.duration_since(UNIX_EPOCH).unwrap().as_secs(),
|
|
||||||
restored_node.joined_at.duration_since(UNIX_EPOCH).unwrap().as_secs()
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn setup_file_manager() {
|
|
||||||
let function_name = "setup_file_manager";
|
|
||||||
let _ = match setup_test_file(function_name).await {
|
|
||||||
Err(e) => {
|
|
||||||
panic!("Could not init File Manager: {}", e);
|
|
||||||
}
|
|
||||||
_ => remove_file(get_test_file_name(function_name)).await,
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_random_node() -> Node {
|
|
||||||
let keypair = Keypair::new();
|
|
||||||
let mut rng = rand::thread_rng();
|
|
||||||
let ipv4 = Ipv4Addr::new(rng.gen(), rng.gen(), rng.gen(), rng.gen());
|
|
||||||
Node {
|
|
||||||
ip: ipv4,
|
|
||||||
keypair: Keypair::from_bytes(&keypair.to_bytes()).unwrap(),
|
|
||||||
joined_at: SystemTime::now(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn append_and_retrieve() -> Result<()> {
|
|
||||||
let function_name = "append_and_retrieve";
|
|
||||||
let manager = setup_test_file(function_name).await?;
|
|
||||||
let node = get_random_node();
|
|
||||||
manager.append_node(node.clone()).await?;
|
|
||||||
let retrieved_node = manager.get_node_by_id(0).await?;
|
|
||||||
assert_eq!(node.ip_as_string(), retrieved_node.ip_as_string());
|
|
||||||
assert_eq!(node.keypair, retrieved_node.keypair);
|
|
||||||
assert_eq!(
|
|
||||||
node.joined_at.duration_since(UNIX_EPOCH).unwrap().as_secs(),
|
|
||||||
retrieved_node.joined_at.duration_since(UNIX_EPOCH).unwrap().as_secs()
|
|
||||||
);
|
|
||||||
remove_file(get_test_file_name(function_name)).await?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn append_and_retrieve_multiple() -> Result<()> {
|
|
||||||
let function_name = "append_and_retrieve_multiple";
|
|
||||||
let manager = setup_test_file(function_name).await?;
|
|
||||||
let node1 = get_random_node();
|
|
||||||
let node2 = get_random_node();
|
|
||||||
manager.append_node(node1.clone()).await?;
|
|
||||||
manager.append_node(node2.clone()).await?;
|
|
||||||
let retrieved_node1 = manager.get_node_by_id(0).await?;
|
|
||||||
let node3 = get_random_node();
|
|
||||||
manager.append_node(node3.clone()).await.unwrap();
|
|
||||||
let retrieved_node2 = manager.get_node_by_id(1).await?;
|
|
||||||
assert_eq!(node1.ip_as_string(), retrieved_node1.ip_as_string());
|
|
||||||
assert_eq!(node1.keypair.to_bytes(), retrieved_node1.keypair.to_bytes());
|
|
||||||
assert_eq!(node2.ip_as_string(), retrieved_node2.ip_as_string());
|
|
||||||
assert_eq!(node2.keypair, retrieved_node2.keypair);
|
|
||||||
let retrieved_node3 = manager.get_node_by_id(2).await?;
|
|
||||||
assert_eq!(node3.ip_as_string(), retrieved_node3.ip_as_string());
|
|
||||||
assert_eq!(node3.keypair, retrieved_node3.keypair);
|
|
||||||
remove_file(get_test_file_name(function_name)).await?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn append_20_and_retrieve_1_loop() -> Result<()> {
|
|
||||||
let function_name = "append_20_and_retrieve_1_loop";
|
|
||||||
let manager = setup_test_file(function_name).await?;
|
|
||||||
let mut count = 0;
|
|
||||||
let mut nodes_vec: Vec<Node> = Vec::new();
|
|
||||||
while count < 100 {
|
|
||||||
let node = get_random_node();
|
|
||||||
if count % 10 == 0 {
|
|
||||||
nodes_vec.push(node.clone());
|
|
||||||
}
|
|
||||||
manager.append_node(node).await?;
|
|
||||||
|
|
||||||
count += 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
count = 0;
|
|
||||||
for node in nodes_vec.iter() {
|
|
||||||
let r_node = manager.get_node_by_id(count * 10).await?;
|
|
||||||
assert_eq!(node.ip_as_string(), r_node.ip_as_string());
|
|
||||||
assert_eq!(node.keypair, r_node.keypair);
|
|
||||||
count += 1;
|
|
||||||
if count > 3 {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
count = 100;
|
|
||||||
while count < 500 {
|
|
||||||
let node = get_random_node();
|
|
||||||
if count % 10 == 0 {
|
|
||||||
nodes_vec.push(node.clone());
|
|
||||||
}
|
|
||||||
manager.append_node(node).await?;
|
|
||||||
|
|
||||||
count += 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
count = 0;
|
|
||||||
for node in nodes_vec.iter() {
|
|
||||||
let r_node = manager.get_node_by_id(count * 10).await?;
|
|
||||||
assert_eq!(node.ip_as_string(), r_node.ip_as_string());
|
|
||||||
assert_eq!(node.keypair, r_node.keypair);
|
|
||||||
count += 1;
|
|
||||||
if count > 49 {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
remove_file(get_test_file_name(function_name)).await?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn get_page_of_20_nodes() -> Result<()> {
|
|
||||||
let function_name = "get_page_of_20_nodes";
|
|
||||||
let manager = setup_test_file(function_name).await?;
|
|
||||||
let mut count = 0;
|
|
||||||
let mut nodes: Vec<Node> = Vec::new();
|
|
||||||
while count < 100 {
|
|
||||||
let node = get_random_node();
|
|
||||||
if count >= 23 && count < 43 {
|
|
||||||
nodes.push(node.clone());
|
|
||||||
}
|
|
||||||
manager.append_node(node).await?;
|
|
||||||
count += 1;
|
|
||||||
}
|
|
||||||
count = 23;
|
|
||||||
let mut r_nodes = manager.get_page_of_20(23).await?.into_iter();
|
|
||||||
for node in nodes.iter() {
|
|
||||||
let r_node = r_nodes.next().unwrap();
|
|
||||||
println!("{} {} {}", count, node.ip_as_string(), r_node.ip_as_string());
|
|
||||||
assert_eq!(node.ip_as_string(), r_node.ip_as_string());
|
|
||||||
assert_eq!(node.keypair, r_node.keypair);
|
|
||||||
count += 1;
|
|
||||||
if count == 44 {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
remove_file(get_test_file_name(function_name)).await?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn get_last_page() -> Result<()> {
|
|
||||||
let function_name = "get_last_page";
|
|
||||||
let manager = setup_test_file(function_name).await?;
|
|
||||||
let mut count = 0;
|
|
||||||
let mut nodes: Vec<Node> = Vec::new();
|
|
||||||
while count < 97 {
|
|
||||||
let node = get_random_node();
|
|
||||||
if count >= 90 {
|
|
||||||
nodes.push(node.clone());
|
|
||||||
}
|
|
||||||
manager.append_node(node).await?;
|
|
||||||
count += 1;
|
|
||||||
}
|
|
||||||
count = 23;
|
|
||||||
let mut r_nodes = manager.get_page_of_20(90).await?.into_iter();
|
|
||||||
for node in nodes.iter() {
|
|
||||||
let r_node = r_nodes.next().unwrap();
|
|
||||||
println!("{} {} {}", count, node.ip_as_string(), r_node.ip_as_string());
|
|
||||||
assert_eq!(node.ip_as_string(), r_node.ip_as_string());
|
|
||||||
assert_eq!(node.keypair, r_node.keypair);
|
|
||||||
count += 1;
|
|
||||||
if count == 44 {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
remove_file(get_test_file_name(function_name)).await?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
@ -15,7 +15,7 @@ message NodeUpdate {
|
|||||||
}
|
}
|
||||||
|
|
||||||
message Keys {
|
message Keys {
|
||||||
string keypair = 1;
|
bytes keypair = 1;
|
||||||
string token_address = 2;
|
string token_address = 2;
|
||||||
}
|
}
|
||||||
|
|
@ -1,2 +0,0 @@
|
|||||||
[net]
|
|
||||||
git-fetch-with-cli = true
|
|
4
rewrite/.gitignore
vendored
4
rewrite/.gitignore
vendored
@ -1,4 +0,0 @@
|
|||||||
# Occlum building env
|
|
||||||
challenge_instance
|
|
||||||
# The challenge bundle
|
|
||||||
docker/challenge.tar.gz
|
|
@ -1,147 +0,0 @@
|
|||||||
// Protocol Buffers - Google's data interchange format
|
|
||||||
// Copyright 2008 Google Inc. All rights reserved.
|
|
||||||
// https://developers.google.com/protocol-buffers/
|
|
||||||
//
|
|
||||||
// Redistribution and use in source and binary forms, with or without
|
|
||||||
// modification, are permitted provided that the following conditions are
|
|
||||||
// met:
|
|
||||||
//
|
|
||||||
// * Redistributions of source code must retain the above copyright
|
|
||||||
// notice, this list of conditions and the following disclaimer.
|
|
||||||
// * Redistributions in binary form must reproduce the above
|
|
||||||
// copyright notice, this list of conditions and the following disclaimer
|
|
||||||
// in the documentation and/or other materials provided with the
|
|
||||||
// distribution.
|
|
||||||
// * Neither the name of Google Inc. nor the names of its
|
|
||||||
// contributors may be used to endorse or promote products derived from
|
|
||||||
// this software without specific prior written permission.
|
|
||||||
//
|
|
||||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
syntax = "proto3";
|
|
||||||
|
|
||||||
package google.protobuf;
|
|
||||||
|
|
||||||
option csharp_namespace = "Google.Protobuf.WellKnownTypes";
|
|
||||||
option cc_enable_arenas = true;
|
|
||||||
option go_package = "google.golang.org/protobuf/types/known/timestamppb";
|
|
||||||
option java_package = "com.google.protobuf";
|
|
||||||
option java_outer_classname = "TimestampProto";
|
|
||||||
option java_multiple_files = true;
|
|
||||||
option objc_class_prefix = "GPB";
|
|
||||||
|
|
||||||
// A Timestamp represents a point in time independent of any time zone or local
|
|
||||||
// calendar, encoded as a count of seconds and fractions of seconds at
|
|
||||||
// nanosecond resolution. The count is relative to an epoch at UTC midnight on
|
|
||||||
// January 1, 1970, in the proleptic Gregorian calendar which extends the
|
|
||||||
// Gregorian calendar backwards to year one.
|
|
||||||
//
|
|
||||||
// All minutes are 60 seconds long. Leap seconds are "smeared" so that no leap
|
|
||||||
// second table is needed for interpretation, using a [24-hour linear
|
|
||||||
// smear](https://developers.google.com/time/smear).
|
|
||||||
//
|
|
||||||
// The range is from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. By
|
|
||||||
// restricting to that range, we ensure that we can convert to and from [RFC
|
|
||||||
// 3339](https://www.ietf.org/rfc/rfc3339.txt) date strings.
|
|
||||||
//
|
|
||||||
// # Examples
|
|
||||||
//
|
|
||||||
// Example 1: Compute Timestamp from POSIX `time()`.
|
|
||||||
//
|
|
||||||
// Timestamp timestamp;
|
|
||||||
// timestamp.set_seconds(time(NULL));
|
|
||||||
// timestamp.set_nanos(0);
|
|
||||||
//
|
|
||||||
// Example 2: Compute Timestamp from POSIX `gettimeofday()`.
|
|
||||||
//
|
|
||||||
// struct timeval tv;
|
|
||||||
// gettimeofday(&tv, NULL);
|
|
||||||
//
|
|
||||||
// Timestamp timestamp;
|
|
||||||
// timestamp.set_seconds(tv.tv_sec);
|
|
||||||
// timestamp.set_nanos(tv.tv_usec * 1000);
|
|
||||||
//
|
|
||||||
// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.
|
|
||||||
//
|
|
||||||
// FILETIME ft;
|
|
||||||
// GetSystemTimeAsFileTime(&ft);
|
|
||||||
// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
|
|
||||||
//
|
|
||||||
// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
|
|
||||||
// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
|
|
||||||
// Timestamp timestamp;
|
|
||||||
// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
|
|
||||||
// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
|
|
||||||
//
|
|
||||||
// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.
|
|
||||||
//
|
|
||||||
// long millis = System.currentTimeMillis();
|
|
||||||
//
|
|
||||||
// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
|
|
||||||
// .setNanos((int) ((millis % 1000) * 1000000)).build();
|
|
||||||
//
|
|
||||||
//
|
|
||||||
// Example 5: Compute Timestamp from Java `Instant.now()`.
|
|
||||||
//
|
|
||||||
// Instant now = Instant.now();
|
|
||||||
//
|
|
||||||
// Timestamp timestamp =
|
|
||||||
// Timestamp.newBuilder().setSeconds(now.getEpochSecond())
|
|
||||||
// .setNanos(now.getNano()).build();
|
|
||||||
//
|
|
||||||
//
|
|
||||||
// Example 6: Compute Timestamp from current time in Python.
|
|
||||||
//
|
|
||||||
// timestamp = Timestamp()
|
|
||||||
// timestamp.GetCurrentTime()
|
|
||||||
//
|
|
||||||
// # JSON Mapping
|
|
||||||
//
|
|
||||||
// In JSON format, the Timestamp type is encoded as a string in the
|
|
||||||
// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the
|
|
||||||
// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z"
|
|
||||||
// where {year} is always expressed using four digits while {month}, {day},
|
|
||||||
// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional
|
|
||||||
// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution),
|
|
||||||
// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone
|
|
||||||
// is required. A proto3 JSON serializer should always use UTC (as indicated by
|
|
||||||
// "Z") when printing the Timestamp type and a proto3 JSON parser should be
|
|
||||||
// able to accept both UTC and other timezones (as indicated by an offset).
|
|
||||||
//
|
|
||||||
// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past
|
|
||||||
// 01:30 UTC on January 15, 2017.
|
|
||||||
//
|
|
||||||
// In JavaScript, one can convert a Date object to this format using the
|
|
||||||
// standard
|
|
||||||
// [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString)
|
|
||||||
// method. In Python, a standard `datetime.datetime` object can be converted
|
|
||||||
// to this format using
|
|
||||||
// [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) with
|
|
||||||
// the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use
|
|
||||||
// the Joda Time's [`ISODateTimeFormat.dateTime()`](
|
|
||||||
// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime%2D%2D
|
|
||||||
// ) to obtain a formatter capable of generating timestamps in this format.
|
|
||||||
//
|
|
||||||
//
|
|
||||||
message Timestamp {
|
|
||||||
// Represents seconds of UTC time since Unix epoch
|
|
||||||
// 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
|
|
||||||
// 9999-12-31T23:59:59Z inclusive.
|
|
||||||
int64 seconds = 1;
|
|
||||||
|
|
||||||
// Non-negative fractions of a second at nanosecond resolution. Negative
|
|
||||||
// second values with fractions must still have non-negative nanos values
|
|
||||||
// that count forward in time. Must be from 0 to 999,999,999
|
|
||||||
// inclusive.
|
|
||||||
int32 nanos = 2;
|
|
||||||
}
|
|
34
src/HOMEPAGE.md
Normal file
34
src/HOMEPAGE.md
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
# Welcome, b3L0v3D h4ck3r!
|
||||||
|
|
||||||
|
This node is part of the DeTEE hacker-challenge, a decentralized wallet that mints the HCT Token.
|
||||||
|
The private key of the mint authority was generated within the network. The challenge is easy:
|
||||||
|
Hack the network to get the private key, and all the SOL is yours. We also offer other rewards, including:
|
||||||
|
- a unique NFT
|
||||||
|
- token rewards at after release of the DeTEE token
|
||||||
|
- a seat on the Advisory Board of DeTEE
|
||||||
|
- possible employment at DeTEE
|
||||||
|
|
||||||
|
The mint address of the token is: TOKEN_ADDRESS
|
||||||
|
The mint authority is: MINT_AUTHORITY
|
||||||
|
|
||||||
|
In order to mint, the mint authority will need some SOL. Before sending SOL, take into consideration that
|
||||||
|
DeTEE REPRESENTATIVES DON'T KNOW HOW TO GET THE SOL OUT OF THE NETWORK!
|
||||||
|
|
||||||
|
Allowed operations:
|
||||||
|
/nodes <- information about nodes and counters of network activity
|
||||||
|
/mint (address) <- mint HCT tokens to the address; the wallet needs sol for this operation
|
||||||
|
|
||||||
|
If you are able to get the SOL out of the wallet, please contact us at https://detee.ltd
|
||||||
|
The code of the challenge can be found at https://gitea.detee.cloud/SGX/hacker-challenge-sgx
|
||||||
|
|
||||||
|
## More about the network
|
||||||
|
|
||||||
|
Each node in the network runs inside an enclave. The enclave is a program that operates in a trusted execution environment (TEE). Memory of programs within the enclave can not be inspected from outside the enclave. Programs within the enclave have access to sources of entropy that cannot be predicted from outside. Programs can also access reproducible secrets that they can use to seal[^1] persistent data. Each enclave has a certain set of measurements, consisting of all the data required for the program to run (instructions, configuration, etc.). A running program can generate a quote that can be used to verify the measurements and legitimacy of the hardware it's running on.
|
||||||
|
|
||||||
|
Assuming there are no vulnerabilities in any of the mentioned hardware features, and our node implementation has none either, it should be practically impossible to steal the SOL from the network wallet because:
|
||||||
|
- wallet key is generated with the enclave's source of entropy
|
||||||
|
- nobody can inspect the memory that contains the key
|
||||||
|
- nodes verify the quote of each peer and refuse connections if quote measurements don't match their own
|
||||||
|
- node seals[^1] all persistent data saved to disk with the enclave's key
|
||||||
|
|
||||||
|
[^1]: use symmetric encryption to encrypt some data before exposing it to untrusted environment to later recover the data by decrypting
|
@ -71,8 +71,8 @@ impl Store {
|
|||||||
self.sol_client.wallet_address()
|
self.sol_client.wallet_address()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_keypair_base58(&self) -> String {
|
pub fn get_keypair_bytes(&self) -> Vec<u8> {
|
||||||
self.sol_client.get_keypair_base58()
|
self.sol_client.get_keypair_bytes()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn add_conn(&self, ip: &str) {
|
pub fn add_conn(&self, ip: &str) {
|
||||||
@ -155,11 +155,7 @@ impl Store {
|
|||||||
self.nodes.retain(|_, v| {
|
self.nodes.retain(|_, v| {
|
||||||
let age =
|
let age =
|
||||||
SystemTime::now().duration_since(v.keepalive).unwrap_or(Duration::ZERO).as_secs();
|
SystemTime::now().duration_since(v.keepalive).unwrap_or(Duration::ZERO).as_secs();
|
||||||
if age > 600 {
|
age <= 600
|
||||||
false
|
|
||||||
} else {
|
|
||||||
true
|
|
||||||
}
|
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
@ -48,9 +48,9 @@ impl ConnManager {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn connect(&self, node_ip: String) -> Result<(), Box<dyn std::error::Error>> {
|
async fn connect(&self, node_ip: String) -> Result<(), Box<dyn std::error::Error>> {
|
||||||
|
use detee_sgx::{prelude::*, RaTlsConfigBuilder};
|
||||||
use hyper::Uri;
|
use hyper::Uri;
|
||||||
use hyper_util::{client::legacy::connect::HttpConnector, rt::TokioExecutor};
|
use hyper_util::{client::legacy::connect::HttpConnector, rt::TokioExecutor};
|
||||||
use detee_sgx::{prelude::*, RaTlsConfigBuilder};
|
|
||||||
use tokio_rustls::rustls::ClientConfig;
|
use tokio_rustls::rustls::ClientConfig;
|
||||||
|
|
||||||
println!("Connecting to {node_ip}...");
|
println!("Connecting to {node_ip}...");
|
||||||
@ -108,10 +108,10 @@ impl ConnManager {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// update the entire network in case the information is new
|
// update the entire network in case the information is new
|
||||||
if self.ds.process_node_update(update.clone().into()).await {
|
if self.ds.process_node_update(update.clone().into()).await
|
||||||
if let Err(_) = self.tx.send(update.clone()) {
|
&& self.tx.send(update.clone()).is_err()
|
||||||
println!("tokio broadcast receivers had an issue consuming the channel");
|
{
|
||||||
}
|
println!("tokio broadcast receivers had an issue consuming the channel");
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -120,9 +120,9 @@ impl ConnManager {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub async fn key_grabber(node_ip: String) -> Result<(Keypair, Pubkey), Box<dyn std::error::Error>> {
|
pub async fn key_grabber(node_ip: String) -> Result<(Keypair, Pubkey), Box<dyn std::error::Error>> {
|
||||||
|
use detee_sgx::{prelude::*, RaTlsConfigBuilder};
|
||||||
use hyper::Uri;
|
use hyper::Uri;
|
||||||
use hyper_util::{client::legacy::connect::HttpConnector, rt::TokioExecutor};
|
use hyper_util::{client::legacy::connect::HttpConnector, rt::TokioExecutor};
|
||||||
use detee_sgx::{prelude::*, RaTlsConfigBuilder};
|
|
||||||
use tokio_rustls::rustls::ClientConfig;
|
use tokio_rustls::rustls::ClientConfig;
|
||||||
|
|
||||||
println!("Getting key from {node_ip}...");
|
println!("Getting key from {node_ip}...");
|
||||||
@ -163,7 +163,7 @@ pub async fn key_grabber(node_ip: String) -> Result<(Keypair, Pubkey), Box<dyn s
|
|||||||
let response = client.get_keys(tonic::Request::new(Empty {})).await?;
|
let response = client.get_keys(tonic::Request::new(Empty {})).await?;
|
||||||
let response = &response.into_inner();
|
let response = &response.into_inner();
|
||||||
let keypair = response.keypair.clone();
|
let keypair = response.keypair.clone();
|
||||||
let keypair = match std::panic::catch_unwind(|| Keypair::from_base58_string(&keypair)) {
|
let keypair = match Keypair::from_bytes(&keypair) {
|
||||||
Ok(k) => k,
|
Ok(k) => k,
|
||||||
Err(_) => return Err("Could not parse keypair.".into()),
|
Err(_) => return Err("Could not parse keypair.".into()),
|
||||||
};
|
};
|
@ -22,17 +22,17 @@ impl From<(String, NodeInfo)> for NodeUpdate {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Into<(String, NodeInfo)> for NodeUpdate {
|
impl From<NodeUpdate> for (String, NodeInfo) {
|
||||||
fn into(self) -> (String, NodeInfo) {
|
fn from(val: NodeUpdate) -> Self {
|
||||||
let ip = self.ip;
|
let ip = val.ip;
|
||||||
let started_at: SystemTime = match self.started_at {
|
let started_at: SystemTime = match val.started_at {
|
||||||
Some(ts) => {
|
Some(ts) => {
|
||||||
let duration = Duration::new(ts.seconds as u64, ts.nanos as u32);
|
let duration = Duration::new(ts.seconds as u64, ts.nanos as u32);
|
||||||
UNIX_EPOCH.checked_add(duration).unwrap_or(SystemTime::now())
|
UNIX_EPOCH.checked_add(duration).unwrap_or(SystemTime::now())
|
||||||
}
|
}
|
||||||
None => SystemTime::now(),
|
None => SystemTime::now(),
|
||||||
};
|
};
|
||||||
let keepalive: SystemTime = match self.keepalive {
|
let keepalive: SystemTime = match val.keepalive {
|
||||||
Some(ts) => {
|
Some(ts) => {
|
||||||
let duration = Duration::new(ts.seconds as u64, ts.nanos as u32);
|
let duration = Duration::new(ts.seconds as u64, ts.nanos as u32);
|
||||||
UNIX_EPOCH.checked_add(duration).unwrap_or(SystemTime::now())
|
UNIX_EPOCH.checked_add(duration).unwrap_or(SystemTime::now())
|
||||||
@ -42,11 +42,11 @@ impl Into<(String, NodeInfo)> for NodeUpdate {
|
|||||||
let self_info = NodeInfo {
|
let self_info = NodeInfo {
|
||||||
started_at,
|
started_at,
|
||||||
keepalive,
|
keepalive,
|
||||||
mint_requests: self.mint_requests,
|
mint_requests: val.mint_requests,
|
||||||
mints: self.mints,
|
mints: val.mints,
|
||||||
ratls_conns: self.ratls_conns,
|
ratls_conns: val.ratls_conns,
|
||||||
ratls_attacks: self.ratls_attacks,
|
ratls_attacks: val.ratls_attacks,
|
||||||
public: self.public,
|
public: val.public,
|
||||||
};
|
};
|
||||||
(ip, self_info)
|
(ip, self_info)
|
||||||
}
|
}
|
@ -136,10 +136,8 @@ impl Update for MyServer {
|
|||||||
// note that we don't set this node online,
|
// note that we don't set this node online,
|
||||||
// as it can be behind NAT
|
// as it can be behind NAT
|
||||||
}
|
}
|
||||||
if update.ip != "127.0.0.1" && ds.process_node_update(update.clone().into()).await {
|
if update.ip != "127.0.0.1" && ds.process_node_update(update.clone().into()).await && tx.send(update.clone()).is_err() {
|
||||||
if let Err(_) = tx.send(update.clone()) {
|
println!("tokio broadcast receivers had an issue consuming the channel");
|
||||||
println!("tokio broadcast receivers had an issue consuming the channel");
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
@ -166,7 +164,7 @@ impl Update for MyServer {
|
|||||||
|
|
||||||
async fn get_keys(&self, _request: Request<Empty>) -> Result<Response<Keys>, Status> {
|
async fn get_keys(&self, _request: Request<Empty>) -> Result<Response<Keys>, Status> {
|
||||||
let reply = Keys {
|
let reply = Keys {
|
||||||
keypair: self.ds.get_keypair_base58(),
|
keypair: self.ds.get_keypair_bytes(),
|
||||||
token_address: self.ds.get_token_address(),
|
token_address: self.ds.get_token_address(),
|
||||||
};
|
};
|
||||||
Ok(Response::new(reply))
|
Ok(Response::new(reply))
|
@ -1,42 +1,11 @@
|
|||||||
#![allow(dead_code)]
|
#![allow(dead_code)]
|
||||||
use crate::{datastore, datastore::Store};
|
use crate::{datastore, datastore::Store};
|
||||||
use actix_web::{
|
use actix_web::{get, post, web, App, HttpResponse, HttpServer, Responder};
|
||||||
// http::StatusCode, error::ResponseError, Result,
|
|
||||||
get,
|
|
||||||
post,
|
|
||||||
web,
|
|
||||||
App,
|
|
||||||
HttpResponse,
|
|
||||||
HttpServer,
|
|
||||||
Responder,
|
|
||||||
};
|
|
||||||
use chrono::{DateTime, Utc};
|
use chrono::{DateTime, Utc};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
const HOMEPAGE: &str = r#"Welcome, b3L0v3D h4ck3r!
|
const HOMEPAGE: &str = include_str!("HOMEPAGE.md");
|
||||||
|
|
||||||
This node is part of the DeTEE hacker-challenge, a decentralized wallet that mints the HCKT Token.
|
|
||||||
The private key of the mint authority was generated within the network. The challenge is easy:
|
|
||||||
Hack the network to get the private key, and all the SOL is yours. We also offer other rewards, including:
|
|
||||||
- a unique NFT
|
|
||||||
- token rewards at after release of the DeTEE token
|
|
||||||
- a seat on the Advisory Board of DeTEE
|
|
||||||
- possible employment at DeTEE
|
|
||||||
|
|
||||||
The mint address of the token is: TOKEN_ADDRESS
|
|
||||||
The mint authority is: MINT_AUTHORITY
|
|
||||||
|
|
||||||
In order to mint, the mint authority will need some SOL. Before sending SOL, the take into consideration that
|
|
||||||
DeTEE REPRESENTATIVES DON'T KNOW HOW TO GET THE SOL OUT OF THE NETWORK!
|
|
||||||
|
|
||||||
Allowed operations:
|
|
||||||
/nodes <- information about nodes and counters of network activity
|
|
||||||
/mint (address) <- mint DHCT tokens to the address; the wallet needs sol for this operation
|
|
||||||
|
|
||||||
If you are able to get the SOL out of the wallet, please contact us at https://detee.ltd
|
|
||||||
The code of the challenge can be found at https://gitea.detee.cloud/ghe0/hacker-challenge
|
|
||||||
"#;
|
|
||||||
|
|
||||||
#[get("/")]
|
#[get("/")]
|
||||||
async fn homepage(ds: web::Data<Arc<Store>>) -> impl Responder {
|
async fn homepage(ds: web::Data<Arc<Store>>) -> impl Responder {
|
@ -21,6 +21,7 @@ use tokio::{
|
|||||||
|
|
||||||
const INIT_NODES: &str = "/host/detee_challenge_nodes";
|
const INIT_NODES: &str = "/host/detee_challenge_nodes";
|
||||||
const DISK_PERSISTENCE: &str = "TRY_TO_HACK_THIS";
|
const DISK_PERSISTENCE: &str = "TRY_TO_HACK_THIS";
|
||||||
|
const MAINTAINED_CONNECTIONS: usize = 3;
|
||||||
|
|
||||||
pub async fn localhost_cron(ds: Arc<Store>, tx: Sender<NodeUpdate>) {
|
pub async fn localhost_cron(ds: Arc<Store>, tx: Sender<NodeUpdate>) {
|
||||||
loop {
|
loop {
|
||||||
@ -57,7 +58,7 @@ async fn get_sol_client() -> SolClient {
|
|||||||
"Got keypair from the network. Joining the network using wallet {}",
|
"Got keypair from the network. Joining the network using wallet {}",
|
||||||
bundle.0.pubkey()
|
bundle.0.pubkey()
|
||||||
);
|
);
|
||||||
println!("The address of the Token is {}", bundle.1.to_string());
|
println!("The address of the Token is {}", bundle.1);
|
||||||
println!("Saving this data to disk in the file {DISK_PERSISTENCE}");
|
println!("Saving this data to disk in the file {DISK_PERSISTENCE}");
|
||||||
let disk_data = crate::persistence::Data::init_from(&bundle.0, &bundle.1).await;
|
let disk_data = crate::persistence::Data::init_from(&bundle.0, &bundle.1).await;
|
||||||
if let Err(e) = disk_data.write(DISK_PERSISTENCE).await {
|
if let Err(e) = disk_data.write(DISK_PERSISTENCE).await {
|
||||||
@ -82,38 +83,27 @@ async fn main() {
|
|||||||
|
|
||||||
let (tx, mut _rx) = broadcast::channel(500);
|
let (tx, mut _rx) = broadcast::channel(500);
|
||||||
|
|
||||||
let mut long_term_tasks = JoinSet::new();
|
let mut tasks = JoinSet::new();
|
||||||
let mut init_tasks = JoinSet::new();
|
|
||||||
|
|
||||||
long_term_tasks.spawn(localhost_cron(ds.clone(), tx.clone()));
|
tasks.spawn(localhost_cron(ds.clone(), tx.clone()));
|
||||||
long_term_tasks.spawn(http_server::init(ds.clone()));
|
tasks.spawn(http_server::init(ds.clone()));
|
||||||
long_term_tasks.spawn(grpc::server::MyServer::init(ds.clone(), tx.clone()).start());
|
tasks.spawn(grpc::server::MyServer::init(ds.clone(), tx.clone()).start());
|
||||||
|
|
||||||
if let Ok(input) = File::open(INIT_NODES) {
|
if let Ok(input) = std::fs::read_to_string(INIT_NODES) {
|
||||||
let buffered = BufReader::new(input);
|
for line in input.lines() {
|
||||||
for line in buffered.lines() {
|
tasks.spawn(
|
||||||
init_tasks.spawn(
|
|
||||||
grpc::client::ConnManager::init(ds.clone(), tx.clone())
|
grpc::client::ConnManager::init(ds.clone(), tx.clone())
|
||||||
.start_with_node(line.unwrap()),
|
.start_with_node(line.to_string()),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut connection_count = 0;
|
for _ in 0..MAINTAINED_CONNECTIONS {
|
||||||
while init_tasks.join_next().await.is_some() {
|
tasks.spawn(grpc::client::ConnManager::init(ds.clone(), tx.clone()).start());
|
||||||
if connection_count < 3 {
|
|
||||||
long_term_tasks.spawn(grpc::client::ConnManager::init(ds.clone(), tx.clone()).start());
|
|
||||||
connection_count += 1;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
while connection_count < 3 {
|
while let Some(Ok(_)) = tasks.join_next().await {}
|
||||||
long_term_tasks.spawn(grpc::client::ConnManager::init(ds.clone(), tx.clone()).start());
|
|
||||||
connection_count += 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
// exit no matter which task finished
|
|
||||||
long_term_tasks.join_next().await;
|
|
||||||
|
|
||||||
|
// task panicked
|
||||||
println!("Shutting down...");
|
println!("Shutting down...");
|
||||||
}
|
}
|
@ -1,10 +1,6 @@
|
|||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use solana_sdk::{pubkey::Pubkey, signature::keypair::Keypair};
|
use solana_sdk::{pubkey::Pubkey, signature::keypair::Keypair};
|
||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
use tokio::{
|
|
||||||
fs::File,
|
|
||||||
io::{AsyncReadExt, AsyncWriteExt},
|
|
||||||
};
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize)]
|
#[derive(Serialize, Deserialize)]
|
||||||
pub struct Data {
|
pub struct Data {
|
||||||
@ -28,17 +24,14 @@ impl Data {
|
|||||||
|
|
||||||
pub async fn write(self, path: &str) -> Result<(), Box<dyn std::error::Error>> {
|
pub async fn write(self, path: &str) -> Result<(), Box<dyn std::error::Error>> {
|
||||||
let serialized = serde_json::to_string(&self)?;
|
let serialized = serde_json::to_string(&self)?;
|
||||||
let mut file = File::create(path).await?;
|
let sealed = detee_sgx::SealingConfig::new()?.seal_data(serialized.into_bytes())?;
|
||||||
file.write_all(serialized.as_bytes()).await?;
|
tokio::fs::write(path, sealed).await.map_err(Into::into)
|
||||||
file.flush().await?;
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn read(path: &str) -> Result<Self, Box<dyn std::error::Error>> {
|
pub async fn read(path: &str) -> Result<Self, Box<dyn std::error::Error>> {
|
||||||
let mut file = File::open(path).await?;
|
let sealed = tokio::fs::read(path).await?;
|
||||||
let mut contents = String::new();
|
let serialized = detee_sgx::SealingConfig::new()?.un_seal_data(sealed)?;
|
||||||
file.read_to_string(&mut contents).await?;
|
Ok(serde_json::from_str(&String::from_utf8(serialized)?)?)
|
||||||
Ok(serde_json::from_str(&contents)?)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn parse(self) -> (Keypair, Pubkey) {
|
pub fn parse(self) -> (Keypair, Pubkey) {
|
@ -57,7 +57,7 @@ impl Client {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn create_token_account(&self, recipient: &Pubkey) -> Result<Pubkey, Box<dyn Error>> {
|
fn create_token_account(&self, recipient: &Pubkey) -> Result<Pubkey, Box<dyn Error>> {
|
||||||
let address = get_associated_token_address(&recipient, &self.token);
|
let address = get_associated_token_address(recipient, &self.token);
|
||||||
if self.client.get_account(&address).is_err() {
|
if self.client.get_account(&address).is_err() {
|
||||||
let create_token_account_instruction = create_associated_token_account(
|
let create_token_account_instruction = create_associated_token_account(
|
||||||
&self.keypair.pubkey(),
|
&self.keypair.pubkey(),
|
||||||
@ -85,8 +85,8 @@ impl Client {
|
|||||||
self.token.to_string()
|
self.token.to_string()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_keypair_base58(&self) -> String {
|
pub fn get_keypair_bytes(&self) -> Vec<u8> {
|
||||||
self.keypair.to_base58_string()
|
self.keypair.to_bytes().to_vec()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue
Block a user