From 29b3ad45ca333580fa3581d00d9285fcea2d2838 Mon Sep 17 00:00:00 2001 From: ghe0 Date: Mon, 26 May 2025 04:11:24 +0300 Subject: [PATCH] network overlay and production surrealdb setup --- README.md | 3 +- create_ssh_config.sh | 30 +++++++ overlay-network/README.md | 15 ++++ overlay-network/create_vms.sh | 14 ++++ overlay-network/setup_wg_mesh.sh | 98 ++++++++++++++++++++++ overlay-network/vm_configs/template-1.yaml | 10 +++ overlay-network/vm_configs/template-2.yaml | 10 +++ overlay-network/vm_configs/template-3.yaml | 10 +++ overlay-network/vm_configs/template-n.yaml | 10 +++ overlay-network/vm_configs/template-x.yaml | 10 +++ surrealdb_tikv_prod/README.md | 32 +++++++ surrealdb_tikv_prod/deploy_db.sh | 43 ++++++++++ surrealdb_tikv_prod/deploy_nodes.sh | 52 ++++++++++++ surrealdb_tikv_prod/prepare_bastion.sh | 59 +++++++++++++ surrealdb_tikv_prod/prod_cluster.yaml | 24 ++++++ surrealdb_tikv_prod/staging_cluster.yaml | 24 ++++++ surrealdb_tikv_prod/surrealdb.service | 19 +++++ 17 files changed, 462 insertions(+), 1 deletion(-) create mode 100755 create_ssh_config.sh create mode 100644 overlay-network/README.md create mode 100755 overlay-network/create_vms.sh create mode 100755 overlay-network/setup_wg_mesh.sh create mode 100644 overlay-network/vm_configs/template-1.yaml create mode 100644 overlay-network/vm_configs/template-2.yaml create mode 100644 overlay-network/vm_configs/template-3.yaml create mode 100644 overlay-network/vm_configs/template-n.yaml create mode 100644 overlay-network/vm_configs/template-x.yaml create mode 100644 surrealdb_tikv_prod/README.md create mode 100755 surrealdb_tikv_prod/deploy_db.sh create mode 100755 surrealdb_tikv_prod/deploy_nodes.sh create mode 100644 surrealdb_tikv_prod/prepare_bastion.sh create mode 100644 surrealdb_tikv_prod/prod_cluster.yaml create mode 100644 surrealdb_tikv_prod/staging_cluster.yaml create mode 100644 surrealdb_tikv_prod/surrealdb.service diff --git a/README.md b/README.md index 05dfdea..b392199 100644 --- a/README.md +++ b/README.md @@ -3,5 +3,6 @@ This repository has various deployment examples of real world software to the DeTEE network. The examples currently include: - [Gitea on DeTEE](https://gitea.detee.cloud/general/examples/src/branch/master/gitea/deploy_gitea.sh) - A small bash script that deploys a Gitea server (just like this one) to a VM on DeTEE - [Ansible Postgres](https://gitea.detee.cloud/general/examples/src/branch/master/ansible-postgres) - Deploy a Postgres DB and a read replica via Ansible to two DeTEE VMs. -- [Wireguard Overlay](https://gitea.detee.cloud/general/examples/src/branch/master/wireguard-bastion) - Hide resources behind VPN, by leveraging VM deployments on DeTEE. +- [Wireguard DMZ](https://gitea.detee.cloud/general/examples/src/branch/master/wireguard-bastion) - Hide resources behind WireGuard VPN, by leveraging VM deployments on DeTEE. +- [Overlay Network](https://gitea.detee.cloud/general/examples/src/branch/master/overlay-network) - Automated deployment of an encrypted network overlay (full-mesh between VMs). - [Kubernetes (k3s)](https://gitea.detee.cloud/general/examples/src/branch/master/kubernetes) - Use k3s to deploy 5 Kubernetes nodes to DeTEE VMs, forming a small cluster. diff --git a/create_ssh_config.sh b/create_ssh_config.sh new file mode 100755 index 0000000..838792c --- /dev/null +++ b/create_ssh_config.sh @@ -0,0 +1,30 @@ +#!/bin/bash + +# This script will populate ~/.ssh/conf.d/detee.conf with SSH data for all DeTEE VMs. +# After running the script, you will be able to SSH into all VMs by using the hostname + +mkdir -p ~/.ssh/conf.d/ +config="${HOME}/.ssh/conf.d/detee.conf" +echo > "$config" +export FORMAT=YAML + +process_vm() { + vm_id="$1" + local tmp="/tmp/detee_vm_ssh_details" + detee-cli vm ssh $vm_id --just-print > $tmp || return + { + echo Host $(grep 'hostname: ' $tmp | awk '{ print $2 }' ) + echo " User root" + echo " Hostname $(grep 'ip: ' $tmp | awk '{ print $2 }' )" + echo " Port $(grep 'port: ' $tmp | cut -d "'" -f2 )" + echo + } >> $config +} + +detee-cli vm list | grep uuid | awk '{ print $NF}' | + while IFS= read -r vm_id; do + process_vm "$vm_id" +done + +grep 'Include ~/.ssh/conf.d/*.conf' "${HOME}/.ssh/config" > /dev/null 2>&1 || + echo 'Include ~/.ssh/conf.d/*.conf' >> "${HOME}/.ssh/config" diff --git a/overlay-network/README.md b/overlay-network/README.md new file mode 100644 index 0000000..5436abf --- /dev/null +++ b/overlay-network/README.md @@ -0,0 +1,15 @@ +# Overlay Network + +These scripts allow you to create an overlay network on top of DeTEE VMs. These +VMs do not need a public IP, as the VPN mesh will use the forwarded port. +Every VM in the network will get an IP in the subnet `10.254.254.0/24`. + +The VMs will be connected in a full-mesh topology, meaning each VM can +communicate with each other VM directly. Here is a graphical representation of a +full mesh from wikipedia: https://en.wikipedia.org/wiki/File:FullMeshNetwork.svg + +To create the VMs, run `./create_vms.sh`. + +To deploy the network overlay, run `./setup_wg_mesh.sh`. This will create an +overlay on top of all the VMs created previously using the `./create_vms.sh` +script, assigning IPs in alphabetical order. diff --git a/overlay-network/create_vms.sh b/overlay-network/create_vms.sh new file mode 100755 index 0000000..1037887 --- /dev/null +++ b/overlay-network/create_vms.sh @@ -0,0 +1,14 @@ +#!/bin/bash +script_dir=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) +cd $script_dir +set -e +export FORMAT=YAML +mkdir -p tmp/vms + +for vm_config in vm_configs/*; do + vm_name=$(echo $vm_config | cut -d '/' -f2 | cut -d '.' -f1) + detee-cli vm deploy --from-yaml $vm_config > tmp/vms/${vm_name}_install.yaml && + echo "The VM $vm_name got created." & +done + +wait diff --git a/overlay-network/setup_wg_mesh.sh b/overlay-network/setup_wg_mesh.sh new file mode 100755 index 0000000..58697e3 --- /dev/null +++ b/overlay-network/setup_wg_mesh.sh @@ -0,0 +1,98 @@ +#!/bin/bash +script_dir=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) +cd $script_dir +set -e +export FORMAT=YAML +mkdir -p tmp/wg +mkdir -p tmp/logs +rm tmp/vms/*inspect.yaml || true +vms=() + +# inspect VMs +for vm_config in $(grep -r uuid: tmp/vms/ | awk '{ print $2}'); do + vm_id=$(echo $vm_config | cut -d '/' -f2 | cut -d '.' -f1) + detee-cli vm inspect $vm_id > tmp/vms/${vm_id}_inspect.yaml + vm_name=$(grep 'hostname: ' tmp/vms/${vm_id}_inspect.yaml | + awk '{ print $2 }') + mv tmp/vms/${vm_id}_inspect.yaml tmp/vms/${vm_name}_inspect.yaml +done + +# define VM object +vm_count=0 +new_vm() { + (( vm_count++ )) || true + local vm_name="$1" + local vm_id="vm$vm_count" + + local vm_install_data="tmp/vms/${vm_name}_install.yaml" + local vm_inspect_data="tmp/vms/${vm_name}_inspect.yaml" + + vm_node_ip=$(grep 'ip: ' $vm_install_data | awk '{ print $2 }') + vm_port=$(grep exposed_ports -A 1 $vm_inspect_data | tail -1 | grep -oE "[0-9]*") + wg_privkey=$(wg genkey) + wg_pubkey=$(echo $wg_privkey | wg pubkey) + + declare -gA "$vm_id" + eval "$vm_id[id]=$vm_count" + eval "$vm_id[name]=$vm_name" + eval "$vm_id[port]=$vm_port" + eval "$vm_id[node_ip]=$vm_node_ip" + eval "$vm_id[private_ip]=10.254.254.$vm_count" + eval "$vm_id[wg_priv]=$wg_privkey" + eval "$vm_id[wg_pub]=$wg_pubkey" + + vms+=("$vm_id") +} + +# loops over all VMs +for vm_install_file in tmp/vms/*_install.yaml; do + vm_name=$(echo $vm_install_file | cut -d '/' -f3 | cut -d '_' -f1) + new_vm $vm_name +done + +# loops over all VMs in array +for main_vm_loop in "${vms[@]}"; do + declare -n main_vm_ref="$main_vm_loop" + wg_file="tmp/wg/${main_vm_ref[name]}.ini" + { + echo "[Interface]" + echo "Address = "${main_vm_ref[private_ip]}" " + echo "PrivateKey = "${main_vm_ref[wg_priv]}" " + echo "ListenPort = 22" + } > ${wg_file} + + ssh="ssh -p ${main_vm_ref[port]} root@${main_vm_ref[node_ip]}" + $ssh sed -i '/10.254.254./d' /etc/hosts + echo ${main_vm_ref[private_ip]} ${main_vm_ref[name]} | $ssh tee -a /etc/hosts > /dev/null + + for inner_vm_loop in "${vms[@]}"; do + declare -n inner_vm_ref="$inner_vm_loop" + [[ "${inner_vm_ref[id]}" == "${main_vm_ref[id]}" ]] && continue + echo ${inner_vm_ref[private_ip]} ${inner_vm_ref[name]} | $ssh tee -a /etc/hosts > /dev/null + { + echo + echo "[Peer]" + echo "PublicKey = ${inner_vm_ref[wg_pub]}" + echo "Endpoint = ${inner_vm_ref[node_ip]}:${inner_vm_ref[port]}" + echo "AllowedIPs = ${inner_vm_ref[private_ip]}" + echo "PersistentKeepalive = 25" + } >> ${wg_file} + done + echo WireGuard config written to ${wg_file} + + $ssh pacman -Syu --noconfirm > tmp/logs/${main_vm_ref[name]}.log 2>&1 + $ssh pacman -S wireguard-tools --needed --noconfirm >> tmp/logs/${main_vm_ref[name]}.log 2>&1 + echo Packages installed for ${main_vm_ref[name]} + + # TODO: enable this if needed, or delete from code + # $ssh sysctl -w net.ipv4.conf.all.forwarding=1 > /dev/null + cat ${wg_file} | $ssh tee /etc/wireguard/brain.conf > /dev/null + { + $ssh wg-quick down brain || true + $ssh wg-quick up brain || true + $ssh wg-quick up brain || true + $ssh systemctl enable wg-quick@brain || true + } >> tmp/logs/${main_vm_ref[name]}.log 2>&1 + + echo WireGuard started on ${main_vm_ref[name]} +done diff --git a/overlay-network/vm_configs/template-1.yaml b/overlay-network/vm_configs/template-1.yaml new file mode 100644 index 0000000..8afd05f --- /dev/null +++ b/overlay-network/vm_configs/template-1.yaml @@ -0,0 +1,10 @@ +hostname: template-1 +hours: 2 +price: 20000 +location: + country: "FR" +ipv4: !PublishPorts [ ] +public_ipv6: false +vcpus: 4 +memory_mb: 8000 +disk_size_gb: 60 diff --git a/overlay-network/vm_configs/template-2.yaml b/overlay-network/vm_configs/template-2.yaml new file mode 100644 index 0000000..cfdfa7d --- /dev/null +++ b/overlay-network/vm_configs/template-2.yaml @@ -0,0 +1,10 @@ +hostname: template-2 +hours: 2 +price: 20000 +location: + country: "GB" +ipv4: !PublishPorts [ ] +public_ipv6: false +vcpus: 4 +memory_mb: 8000 +disk_size_gb: 60 diff --git a/overlay-network/vm_configs/template-3.yaml b/overlay-network/vm_configs/template-3.yaml new file mode 100644 index 0000000..d79625c --- /dev/null +++ b/overlay-network/vm_configs/template-3.yaml @@ -0,0 +1,10 @@ +hostname: template-3 +hours: 2 +price: 20000 +location: + country: "US" +ipv4: !PublishPorts [ ] +public_ipv6: false +vcpus: 4 +memory_mb: 8000 +disk_size_gb: 60 diff --git a/overlay-network/vm_configs/template-n.yaml b/overlay-network/vm_configs/template-n.yaml new file mode 100644 index 0000000..869f0dd --- /dev/null +++ b/overlay-network/vm_configs/template-n.yaml @@ -0,0 +1,10 @@ +hostname: template-n +hours: 2 +price: 20000 +location: + country: "US" +ipv4: !PublishPorts [ ] +public_ipv6: false +vcpus: 2 +memory_mb: 4400 +disk_size_gb: 20 diff --git a/overlay-network/vm_configs/template-x.yaml b/overlay-network/vm_configs/template-x.yaml new file mode 100644 index 0000000..14b5f83 --- /dev/null +++ b/overlay-network/vm_configs/template-x.yaml @@ -0,0 +1,10 @@ +hostname: template-x +hours: 2 +price: 20000 +location: + country: "FR" +ipv4: !PublishPorts [ ] +public_ipv6: false +vcpus: 4 +memory_mb: 8000 +disk_size_gb: 60 diff --git a/surrealdb_tikv_prod/README.md b/surrealdb_tikv_prod/README.md new file mode 100644 index 0000000..bea41e1 --- /dev/null +++ b/surrealdb_tikv_prod/README.md @@ -0,0 +1,32 @@ +# Production SurrealDB setup with TiKV + +This repo uses the [WireGuard +Overlay](https://gitea.detee.cloud/general/examples/src/branch/master/wireguard-bastion) +under the hood to protect the DataBase nodes, as TLS is not fully supported for +SurrealDB + TiKV. We are using this setup internally for the contract platform +called "The Brain". + +TiKV is a distributed Key-Value database. SurrealDB is a database that offers an +SQL-like syntax to manage data saved in a production-grade TiKV cluster. This +respository will deploy two clusters, one for testing and one for production. +Each cluster has its own monitoring node. The cluster gets deployed the the +bastion node. As a result, the following nodes get created: +- `bastion-brain` +- `prod-brain-1` +- `prod-brain-2` +- `prod-brain-3` +- `prod-brain-mon` +- `staging-brain-1` +- `staging-brain-2` +- `staging-brain-3` +- `staging-brain-mon` + +Feel free to change the naming if you are deploying a similar setup for your own +purposes. + +Considering the database cluster lives on top of an encrypted network overlay, +internal communication is protected by WireGuard from outside attacks. In order +to access the cluster, feel free to create a secondary WireGuard VPN connection +from your own Laptop to the cluster. + +For any questions, don't hesitate to join us on Discord. diff --git a/surrealdb_tikv_prod/deploy_db.sh b/surrealdb_tikv_prod/deploy_db.sh new file mode 100755 index 0000000..aab6961 --- /dev/null +++ b/surrealdb_tikv_prod/deploy_db.sh @@ -0,0 +1,43 @@ +#!/bin/bash +script_dir=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) +cd $script_dir +set -e + +ssh_command() { + vm_name="$1" + vm_file="tmp/overlay-network/tmp/vms/${vm_name}_install.yaml" + ip="$(grep 'ip: ' $vm_file | awk '{ print $NF }')" + port="$(grep 'port: ' $vm_file | cut -d "'" -f2)" + echo ssh -p $port root@$ip +} +export ssh_command + +ssh_prod_brain1="$(ssh_command prod-brain-1)" +ssh_prod_brain2="$(ssh_command prod-brain-2)" +ssh_prod_brain3="$(ssh_command prod-brain-3)" +ssh_prod_mon="$(ssh_command prod-brain-mon)" +ssh_staging_brain1="$(ssh_command staging-brain-1)" +ssh_staging_brain2="$(ssh_command staging-brain-2)" +ssh_staging_brain3="$(ssh_command staging-brain-3)" +ssh_staging_mon="$(ssh_command staging-brain-mon)" +export ssh_bastion="$(ssh_command bastion-brain)" + +$ssh_bastion 'ssh-keygen -t ed25519 -f /root/.ssh/id_ed25519 -N ""' +bastion_pubkey=$($ssh_bastion cat /root/.ssh/id_ed25519.pub) +echo $bastion_pubkey | $ssh_prod_brain1 tee -a /root/.ssh/authorized_keys +echo $bastion_pubkey | $ssh_prod_brain2 tee -a /root/.ssh/authorized_keys +echo $bastion_pubkey | $ssh_prod_brain3 tee -a /root/.ssh/authorized_keys +echo $bastion_pubkey | $ssh_prod_mon tee -a /root/.ssh/authorized_keys +echo $bastion_pubkey | $ssh_staging_brain1 tee -a /root/.ssh/authorized_keys +echo $bastion_pubkey | $ssh_staging_brain2 tee -a /root/.ssh/authorized_keys +echo $bastion_pubkey | $ssh_staging_brain3 tee -a /root/.ssh/authorized_keys +echo $bastion_pubkey | $ssh_staging_mon tee -a /root/.ssh/authorized_keys + +$ssh_bastion curl -o /root/prepare.sh \ + https://gitea.detee.cloud/general/examples/raw/branch/master/surrealdb_tikv_prod/prepare_bastion.sh +$ssh_bastion curl -o /root/prod_cluster.yaml \ + https://gitea.detee.cloud/general/examples/raw/branch/master/surrealdb_tikv_prod/prod_cluster.yaml +$ssh_bastion curl -o /root/staging_cluster.yaml \ + https://gitea.detee.cloud/general/examples/raw/branch/master/surrealdb_tikv_prod/staging_cluster.yaml +$ssh_bastion chmod +x /root/prepare.sh +$ssh_bastion /root/prepare.sh diff --git a/surrealdb_tikv_prod/deploy_nodes.sh b/surrealdb_tikv_prod/deploy_nodes.sh new file mode 100755 index 0000000..1b614b0 --- /dev/null +++ b/surrealdb_tikv_prod/deploy_nodes.sh @@ -0,0 +1,52 @@ +#!/bin/bash +script_dir=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) +cd $script_dir +set -e +export FORMAT=YAML +mkdir -p tmp/ + +rm -rf tmp/overlay-network +cp -r ../overlay-network tmp/ +rm -rf tmp/overlay-network/tmp +cd tmp/overlay-network/vm_configs +find . -maxdepth 1 -type f ! -name 'template-n.yaml' -exec rm -- '{}' + + +setup_vm () { + vm_name="$1" + cp template-n.yaml $vm_name.yaml + sed -i "s/template-n/$vm_name/" $vm_name.yaml + sed -i '/PublishPorts/d' $vm_name.yaml + echo "ipv4: !PublishPorts [ 31337 ]" >> $vm_name.yaml + sed -i '/hours:/d' $vm_name.yaml + echo "hours: 800" >> $vm_name.yaml + country_options=(GB FR CA US) + country=${country_options[RANDOM % ${#country_options[@]}]} + [[ "$vm_name" == "bastion-brain" ]] && country="FR" + sed -i '/location:/d' $vm_name.yaml + sed -i '/country:/d' $vm_name.yaml + echo "location:" >> $vm_name.yaml + echo " country: \"$country\"" >> $vm_name.yaml + [[ "$vm_name" == "*mon" ]] && { + sed -i '/vcpus:/d' $vm_name.yaml + sed -i '/memory:/d' $vm_name.yaml + echo "vcpus: 1" >> $vm_name.yaml + echo "memory_mb: 2200" >> $vm_name.yaml + } +} + +setup_vm staging-brain-1 +setup_vm staging-brain-2 +setup_vm staging-brain-3 +setup_vm staging-brain-mon +setup_vm prod-brain-1 +setup_vm prod-brain-2 +setup_vm prod-brain-3 +setup_vm prod-brain-mon +setup_vm bastion-brain +rm template-n.yaml + +cd ../ +echo CREATING VMS! +./create_vms.sh +sleep 10 +./setup_wg_mesh.sh diff --git a/surrealdb_tikv_prod/prepare_bastion.sh b/surrealdb_tikv_prod/prepare_bastion.sh new file mode 100644 index 0000000..4ca645e --- /dev/null +++ b/surrealdb_tikv_prod/prepare_bastion.sh @@ -0,0 +1,59 @@ +#!/bin/bash +curl -sSf https://tiup-mirrors.pingcap.com/install.sh | sh + +LOG_DIR=/root/brain_logs +mkdir -p "$LOG_DIR" + +nodes=( + prod-brain-1 + prod-brain-2 + prod-brain-3 + prod-brain-mon + staging-brain-1 + staging-brain-2 + staging-brain-3 + staging-brain-mon +) + +surreal_pass=$(openssl rand -base64 20 | tr -d '=/+') + +for host in "${nodes[@]}"; do + ssh -o StrictHostKeyChecking=no "$host" -- \ + pacman -S --noconfirm sudo \ + >> "${LOG_DIR}/${host}" 2>&1 + + ssh "$host" iptables -I INPUT 1 \ + -p tcp -s 10.254.254.0/24 \ + -m multiport --dports 20180,9115,9100,20160,2380,2379,8080 \ + -j ACCEPT + + ssh "$host" iptables -I INPUT 1 \ + -p tcp -s 127.0.0.0/8 \ + -m multiport --dports 20180,9115,9100,20160,2380,2379,8080 \ + -j ACCEPT + + ssh "$host" iptables -A INPUT \ + -p tcp \ + -m multiport --dports 20180,9115,9100,20160,2380,2379,8080 \ + -j DROP + + echo $host | grep mon > /dev/null && continue + + curl -sSf https://install.surrealdb.com | ssh $host sh + echo SURREAL_PASS=$surreal_pass | ssh $host tee /opt/surreal_env > /dev/null + ssh $host curl -o /etc/systemd/system/surrealdb.service \ + https://gitea.detee.cloud/general/examples/raw/branch/master/surrealdb_tikv_prod/surrealdb.service + ssh $host systemctl daemon-reload + ssh $host systemctl enable --now surrealdb.service +done + +/root/.tiup/bin/tiup cluster deploy \ + staging-brain v8.5.1 /root/staging_cluster.yaml \ + --user root -i ~/.ssh/id_ed25519 + +/root/.tiup/bin/tiup cluster deploy \ + prod-brain v8.5.1 /root/prod_cluster.yaml \ + --user root -i ~/.ssh/id_ed25519 + +/root/.tiup/bin/tiup cluster start staging-brain --init +/root/.tiup/bin/tiup cluster start prod-brain --init diff --git a/surrealdb_tikv_prod/prod_cluster.yaml b/surrealdb_tikv_prod/prod_cluster.yaml new file mode 100644 index 0000000..77ade6e --- /dev/null +++ b/surrealdb_tikv_prod/prod_cluster.yaml @@ -0,0 +1,24 @@ +global: + user: "brain" + ssh_port: 22 + deploy_dir: "/opt/brain_deployment" + data_dir: "/opt/brain_data" + +pd_servers: + - host: prod-brain-1 + - host: prod-brain-2 + - host: prod-brain-3 + +tikv_servers: + - host: prod-brain-1 + - host: prod-brain-2 + - host: prod-brain-3 + +monitoring_servers: + - host: prod-brain-mon + +grafana_servers: + - host: prod-brain-mon + +alertmanager_servers: + - host: prod-brain-mon diff --git a/surrealdb_tikv_prod/staging_cluster.yaml b/surrealdb_tikv_prod/staging_cluster.yaml new file mode 100644 index 0000000..9fe5563 --- /dev/null +++ b/surrealdb_tikv_prod/staging_cluster.yaml @@ -0,0 +1,24 @@ +global: + user: "brain" + ssh_port: 22 + deploy_dir: "/opt/brain_deployment" + data_dir: "/opt/brain_data" + +pd_servers: + - host: staging-brain-1 + - host: staging-brain-2 + - host: staging-brain-3 + +tikv_servers: + - host: staging-brain-1 + - host: staging-brain-2 + - host: staging-brain-3 + +monitoring_servers: + - host: staging-brain-mon + +grafana_servers: + - host: staging-brain-mon + +alertmanager_servers: + - host: staging-brain-mon diff --git a/surrealdb_tikv_prod/surrealdb.service b/surrealdb_tikv_prod/surrealdb.service new file mode 100644 index 0000000..5efd9b3 --- /dev/null +++ b/surrealdb_tikv_prod/surrealdb.service @@ -0,0 +1,19 @@ +[Unit] +Description=SurrealDB server +After=network-online.target +Wants=network-online.target +Requires=tikv-20160.service +After=tikv-20160.service + +[Service] +Type=simple +EnvironmentFile=/opt/surreal_env +ExecStart=/usr/local/bin/surreal start \ + --user root --pass ${SURREAL_PASS} \ + --bind 0.0.0.0:8080 \ + tikv://127.0.0.1:2379 +Restart=on-failure +RestartSec=15s + +[Install] +WantedBy=multi-user.target