network overlay and production surrealdb setup
This commit is contained in:
parent
ae3999d4f7
commit
b6e0a3bf9f
@ -3,5 +3,6 @@
|
||||
This repository has various deployment examples of real world software to the DeTEE network. The examples currently include:
|
||||
- [Gitea on DeTEE](https://gitea.detee.cloud/general/examples/src/branch/master/gitea/deploy_gitea.sh) - A small bash script that deploys a Gitea server (just like this one) to a VM on DeTEE
|
||||
- [Ansible Postgres](https://gitea.detee.cloud/general/examples/src/branch/master/ansible-postgres) - Deploy a Postgres DB and a read replica via Ansible to two DeTEE VMs.
|
||||
- [Wireguard Overlay](https://gitea.detee.cloud/general/examples/src/branch/master/wireguard-bastion) - Hide resources behind VPN, by leveraging VM deployments on DeTEE.
|
||||
- [Wireguard DMZ](https://gitea.detee.cloud/general/examples/src/branch/master/wireguard-bastion) - Hide resources behind WireGuard VPN, by leveraging VM deployments on DeTEE.
|
||||
- [Overlay Network](https://gitea.detee.cloud/general/examples/src/branch/master/overlay-network) - Automated deployment of an encrypted network overlay (full-mesh between VMs).
|
||||
- [Kubernetes (k3s)](https://gitea.detee.cloud/general/examples/src/branch/master/kubernetes) - Use k3s to deploy 5 Kubernetes nodes to DeTEE VMs, forming a small cluster.
|
||||
|
30
create_ssh_config.sh
Executable file
30
create_ssh_config.sh
Executable file
@ -0,0 +1,30 @@
|
||||
#!/bin/bash
|
||||
|
||||
# This script will populate ~/.ssh/conf.d/detee.conf with SSH data for all DeTEE VMs.
|
||||
# After running the script, you will be able to SSH into all VMs by using the hostname
|
||||
|
||||
mkdir -p ~/.ssh/conf.d/
|
||||
config="${HOME}/.ssh/conf.d/detee.conf"
|
||||
echo > "$config"
|
||||
export FORMAT=YAML
|
||||
|
||||
process_vm() {
|
||||
vm_id="$1"
|
||||
local tmp="/tmp/detee_vm_ssh_details"
|
||||
detee-cli vm ssh $vm_id --just-print > $tmp || return
|
||||
{
|
||||
echo Host $(grep 'hostname: ' $tmp | awk '{ print $2 }' )
|
||||
echo " User root"
|
||||
echo " Hostname $(grep 'ip: ' $tmp | awk '{ print $2 }' )"
|
||||
echo " Port $(grep 'port: ' $tmp | cut -d "'" -f2 )"
|
||||
echo
|
||||
} >> $config
|
||||
}
|
||||
|
||||
detee-cli vm list | grep uuid | awk '{ print $NF}' |
|
||||
while IFS= read -r vm_id; do
|
||||
process_vm "$vm_id"
|
||||
done
|
||||
|
||||
grep 'Include ~/.ssh/conf.d/*.conf' "${HOME}/.ssh/config" > /dev/null 2>&1 ||
|
||||
echo 'Include ~/.ssh/conf.d/*.conf' >> "${HOME}/.ssh/config"
|
17
overlay-network/README.md
Normal file
17
overlay-network/README.md
Normal file
@ -0,0 +1,17 @@
|
||||
# Overlay Network
|
||||
|
||||
These scripts allow you to create an overlay network on top of DeTEE VMs. These
|
||||
VMs do not need a public IP, as the VPN mesh will use the forwarded port.
|
||||
Every VM in the network will get an IP in the subnet `10.254.254.0/24`.
|
||||
|
||||
|
||||
The VMs will be connected in a full-mesh topology, meaning each VM can
|
||||
communicate with each other VM directly.
|
||||
|
||||

|
||||
|
||||
To create the VMs, run `./create_vms.sh`.
|
||||
|
||||
To deploy the network overlay, run `./setup_wg_mesh.sh`. This will create an
|
||||
overlay on top of all the VMs created previously using the `./create_vms.sh`
|
||||
script, assigning IPs in alphabetical order.
|
14
overlay-network/create_vms.sh
Executable file
14
overlay-network/create_vms.sh
Executable file
@ -0,0 +1,14 @@
|
||||
#!/bin/bash
|
||||
script_dir=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
||||
cd $script_dir
|
||||
set -e
|
||||
export FORMAT=YAML
|
||||
mkdir -p tmp/vms
|
||||
|
||||
for vm_config in vm_configs/*; do
|
||||
vm_name=$(echo $vm_config | cut -d '/' -f2 | cut -d '.' -f1)
|
||||
detee-cli vm deploy --from-yaml $vm_config > tmp/vms/${vm_name}_install.yaml &&
|
||||
echo "The VM $vm_name got created." &
|
||||
done
|
||||
|
||||
wait
|
98
overlay-network/setup_wg_mesh.sh
Executable file
98
overlay-network/setup_wg_mesh.sh
Executable file
@ -0,0 +1,98 @@
|
||||
#!/bin/bash
|
||||
script_dir=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
||||
cd $script_dir
|
||||
set -e
|
||||
export FORMAT=YAML
|
||||
mkdir -p tmp/wg
|
||||
mkdir -p tmp/logs
|
||||
rm tmp/vms/*inspect.yaml || true
|
||||
vms=()
|
||||
|
||||
# inspect VMs
|
||||
for vm_config in $(grep -r uuid: tmp/vms/ | awk '{ print $2}'); do
|
||||
vm_id=$(echo $vm_config | cut -d '/' -f2 | cut -d '.' -f1)
|
||||
detee-cli vm inspect $vm_id > tmp/vms/${vm_id}_inspect.yaml
|
||||
vm_name=$(grep 'hostname: ' tmp/vms/${vm_id}_inspect.yaml |
|
||||
awk '{ print $2 }')
|
||||
mv tmp/vms/${vm_id}_inspect.yaml tmp/vms/${vm_name}_inspect.yaml
|
||||
done
|
||||
|
||||
# define VM object
|
||||
vm_count=0
|
||||
new_vm() {
|
||||
(( vm_count++ )) || true
|
||||
local vm_name="$1"
|
||||
local vm_id="vm$vm_count"
|
||||
|
||||
local vm_install_data="tmp/vms/${vm_name}_install.yaml"
|
||||
local vm_inspect_data="tmp/vms/${vm_name}_inspect.yaml"
|
||||
|
||||
vm_node_ip=$(grep 'ip: ' $vm_install_data | awk '{ print $2 }')
|
||||
vm_port=$(grep exposed_ports -A 1 $vm_inspect_data | tail -1 | grep -oE "[0-9]*")
|
||||
wg_privkey=$(wg genkey)
|
||||
wg_pubkey=$(echo $wg_privkey | wg pubkey)
|
||||
|
||||
declare -gA "$vm_id"
|
||||
eval "$vm_id[id]=$vm_count"
|
||||
eval "$vm_id[name]=$vm_name"
|
||||
eval "$vm_id[port]=$vm_port"
|
||||
eval "$vm_id[node_ip]=$vm_node_ip"
|
||||
eval "$vm_id[private_ip]=10.254.254.$vm_count"
|
||||
eval "$vm_id[wg_priv]=$wg_privkey"
|
||||
eval "$vm_id[wg_pub]=$wg_pubkey"
|
||||
|
||||
vms+=("$vm_id")
|
||||
}
|
||||
|
||||
# loops over all VMs
|
||||
for vm_install_file in tmp/vms/*_install.yaml; do
|
||||
vm_name=$(echo $vm_install_file | cut -d '/' -f3 | cut -d '_' -f1)
|
||||
new_vm $vm_name
|
||||
done
|
||||
|
||||
# loops over all VMs in array
|
||||
for main_vm_loop in "${vms[@]}"; do
|
||||
declare -n main_vm_ref="$main_vm_loop"
|
||||
wg_file="tmp/wg/${main_vm_ref[name]}.ini"
|
||||
{
|
||||
echo "[Interface]"
|
||||
echo "Address = "${main_vm_ref[private_ip]}" "
|
||||
echo "PrivateKey = "${main_vm_ref[wg_priv]}" "
|
||||
echo "ListenPort = 22"
|
||||
} > ${wg_file}
|
||||
|
||||
ssh="ssh -p ${main_vm_ref[port]} root@${main_vm_ref[node_ip]}"
|
||||
$ssh sed -i '/10.254.254./d' /etc/hosts
|
||||
echo ${main_vm_ref[private_ip]} ${main_vm_ref[name]} | $ssh tee -a /etc/hosts > /dev/null
|
||||
|
||||
for inner_vm_loop in "${vms[@]}"; do
|
||||
declare -n inner_vm_ref="$inner_vm_loop"
|
||||
[[ "${inner_vm_ref[id]}" == "${main_vm_ref[id]}" ]] && continue
|
||||
echo ${inner_vm_ref[private_ip]} ${inner_vm_ref[name]} | $ssh tee -a /etc/hosts > /dev/null
|
||||
{
|
||||
echo
|
||||
echo "[Peer]"
|
||||
echo "PublicKey = ${inner_vm_ref[wg_pub]}"
|
||||
echo "Endpoint = ${inner_vm_ref[node_ip]}:${inner_vm_ref[port]}"
|
||||
echo "AllowedIPs = ${inner_vm_ref[private_ip]}"
|
||||
echo "PersistentKeepalive = 25"
|
||||
} >> ${wg_file}
|
||||
done
|
||||
echo WireGuard config written to ${wg_file}
|
||||
|
||||
$ssh pacman -Syu --noconfirm > tmp/logs/${main_vm_ref[name]}.log 2>&1
|
||||
$ssh pacman -S wireguard-tools --needed --noconfirm >> tmp/logs/${main_vm_ref[name]}.log 2>&1
|
||||
echo Packages installed for ${main_vm_ref[name]}
|
||||
|
||||
# TODO: enable this if needed, or delete from code
|
||||
# $ssh sysctl -w net.ipv4.conf.all.forwarding=1 > /dev/null
|
||||
cat ${wg_file} | $ssh tee /etc/wireguard/brain.conf > /dev/null
|
||||
{
|
||||
$ssh wg-quick down brain || true
|
||||
$ssh wg-quick up brain || true
|
||||
$ssh wg-quick up brain || true
|
||||
$ssh systemctl enable wg-quick@brain || true
|
||||
} >> tmp/logs/${main_vm_ref[name]}.log 2>&1
|
||||
|
||||
echo WireGuard started on ${main_vm_ref[name]}
|
||||
done
|
10
overlay-network/vm_configs/template-1.yaml
Normal file
10
overlay-network/vm_configs/template-1.yaml
Normal file
@ -0,0 +1,10 @@
|
||||
hostname: template-1
|
||||
hours: 2
|
||||
price: 20000
|
||||
location:
|
||||
country: "FR"
|
||||
ipv4: !PublishPorts [ ]
|
||||
public_ipv6: false
|
||||
vcpus: 4
|
||||
memory_mb: 8000
|
||||
disk_size_gb: 60
|
10
overlay-network/vm_configs/template-2.yaml
Normal file
10
overlay-network/vm_configs/template-2.yaml
Normal file
@ -0,0 +1,10 @@
|
||||
hostname: template-2
|
||||
hours: 2
|
||||
price: 20000
|
||||
location:
|
||||
country: "GB"
|
||||
ipv4: !PublishPorts [ ]
|
||||
public_ipv6: false
|
||||
vcpus: 4
|
||||
memory_mb: 8000
|
||||
disk_size_gb: 60
|
10
overlay-network/vm_configs/template-3.yaml
Normal file
10
overlay-network/vm_configs/template-3.yaml
Normal file
@ -0,0 +1,10 @@
|
||||
hostname: template-3
|
||||
hours: 2
|
||||
price: 20000
|
||||
location:
|
||||
country: "US"
|
||||
ipv4: !PublishPorts [ ]
|
||||
public_ipv6: false
|
||||
vcpus: 4
|
||||
memory_mb: 8000
|
||||
disk_size_gb: 60
|
10
overlay-network/vm_configs/template-n.yaml
Normal file
10
overlay-network/vm_configs/template-n.yaml
Normal file
@ -0,0 +1,10 @@
|
||||
hostname: template-n
|
||||
hours: 2
|
||||
price: 20000
|
||||
location:
|
||||
country: "US"
|
||||
ipv4: !PublishPorts [ ]
|
||||
public_ipv6: false
|
||||
vcpus: 2
|
||||
memory_mb: 4400
|
||||
disk_size_gb: 20
|
10
overlay-network/vm_configs/template-x.yaml
Normal file
10
overlay-network/vm_configs/template-x.yaml
Normal file
@ -0,0 +1,10 @@
|
||||
hostname: template-x
|
||||
hours: 2
|
||||
price: 20000
|
||||
location:
|
||||
country: "FR"
|
||||
ipv4: !PublishPorts [ ]
|
||||
public_ipv6: false
|
||||
vcpus: 4
|
||||
memory_mb: 8000
|
||||
disk_size_gb: 60
|
BIN
overlay-network/wireguard_full_mesh.png
Normal file
BIN
overlay-network/wireguard_full_mesh.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 39 KiB |
32
surrealdb_tikv_prod/README.md
Normal file
32
surrealdb_tikv_prod/README.md
Normal file
@ -0,0 +1,32 @@
|
||||
# Production SurrealDB setup with TiKV
|
||||
|
||||
This repo uses the [WireGuard
|
||||
Overlay](https://gitea.detee.cloud/general/examples/src/branch/master/overlay-network)
|
||||
under the hood to protect the DataBase nodes, as TLS is not fully supported for
|
||||
SurrealDB + TiKV. We are using this setup internally for the contract platform
|
||||
called "The Brain".
|
||||
|
||||
TiKV is a distributed Key-Value database. SurrealDB is a database that offers an
|
||||
SQL-like syntax to manage data saved in a production-grade TiKV cluster. This
|
||||
respository will deploy two clusters, one for testing and one for production.
|
||||
Each cluster has its own monitoring node. The cluster gets deployed by the
|
||||
bastion node. As a result, the following nodes get created:
|
||||
- `bastion-brain`
|
||||
- `prod-brain-1`
|
||||
- `prod-brain-2`
|
||||
- `prod-brain-3`
|
||||
- `prod-brain-mon`
|
||||
- `staging-brain-1`
|
||||
- `staging-brain-2`
|
||||
- `staging-brain-3`
|
||||
- `staging-brain-mon`
|
||||
|
||||
Feel free to change the naming if you are deploying a similar setup for your own
|
||||
purposes.
|
||||
|
||||
Considering the database cluster lives on top of an encrypted network overlay,
|
||||
internal communication is protected by WireGuard from outside attacks. In order
|
||||
to access the cluster, feel free to create a secondary WireGuard VPN connection
|
||||
from your own Laptop to the cluster.
|
||||
|
||||
For any questions, don't hesitate to join us on Discord.
|
43
surrealdb_tikv_prod/deploy_db.sh
Executable file
43
surrealdb_tikv_prod/deploy_db.sh
Executable file
@ -0,0 +1,43 @@
|
||||
#!/bin/bash
|
||||
script_dir=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
||||
cd $script_dir
|
||||
set -e
|
||||
|
||||
ssh_command() {
|
||||
vm_name="$1"
|
||||
vm_file="tmp/overlay-network/tmp/vms/${vm_name}_install.yaml"
|
||||
ip="$(grep 'ip: ' $vm_file | awk '{ print $NF }')"
|
||||
port="$(grep 'port: ' $vm_file | cut -d "'" -f2)"
|
||||
echo ssh -p $port root@$ip
|
||||
}
|
||||
export ssh_command
|
||||
|
||||
ssh_prod_brain1="$(ssh_command prod-brain-1)"
|
||||
ssh_prod_brain2="$(ssh_command prod-brain-2)"
|
||||
ssh_prod_brain3="$(ssh_command prod-brain-3)"
|
||||
ssh_prod_mon="$(ssh_command prod-brain-mon)"
|
||||
ssh_staging_brain1="$(ssh_command staging-brain-1)"
|
||||
ssh_staging_brain2="$(ssh_command staging-brain-2)"
|
||||
ssh_staging_brain3="$(ssh_command staging-brain-3)"
|
||||
ssh_staging_mon="$(ssh_command staging-brain-mon)"
|
||||
export ssh_bastion="$(ssh_command bastion-brain)"
|
||||
|
||||
$ssh_bastion 'ssh-keygen -t ed25519 -f /root/.ssh/id_ed25519 -N ""'
|
||||
bastion_pubkey=$($ssh_bastion cat /root/.ssh/id_ed25519.pub)
|
||||
echo $bastion_pubkey | $ssh_prod_brain1 tee -a /root/.ssh/authorized_keys
|
||||
echo $bastion_pubkey | $ssh_prod_brain2 tee -a /root/.ssh/authorized_keys
|
||||
echo $bastion_pubkey | $ssh_prod_brain3 tee -a /root/.ssh/authorized_keys
|
||||
echo $bastion_pubkey | $ssh_prod_mon tee -a /root/.ssh/authorized_keys
|
||||
echo $bastion_pubkey | $ssh_staging_brain1 tee -a /root/.ssh/authorized_keys
|
||||
echo $bastion_pubkey | $ssh_staging_brain2 tee -a /root/.ssh/authorized_keys
|
||||
echo $bastion_pubkey | $ssh_staging_brain3 tee -a /root/.ssh/authorized_keys
|
||||
echo $bastion_pubkey | $ssh_staging_mon tee -a /root/.ssh/authorized_keys
|
||||
|
||||
$ssh_bastion curl -o /root/prepare.sh \
|
||||
https://gitea.detee.cloud/general/examples/raw/branch/master/surrealdb_tikv_prod/prepare_bastion.sh
|
||||
$ssh_bastion curl -o /root/prod_cluster.yaml \
|
||||
https://gitea.detee.cloud/general/examples/raw/branch/master/surrealdb_tikv_prod/prod_cluster.yaml
|
||||
$ssh_bastion curl -o /root/staging_cluster.yaml \
|
||||
https://gitea.detee.cloud/general/examples/raw/branch/master/surrealdb_tikv_prod/staging_cluster.yaml
|
||||
$ssh_bastion chmod +x /root/prepare.sh
|
||||
$ssh_bastion /root/prepare.sh
|
48
surrealdb_tikv_prod/deploy_nodes.sh
Executable file
48
surrealdb_tikv_prod/deploy_nodes.sh
Executable file
@ -0,0 +1,48 @@
|
||||
#!/bin/bash
|
||||
script_dir=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
||||
cd $script_dir
|
||||
set -e
|
||||
export FORMAT=YAML
|
||||
mkdir -p tmp/
|
||||
|
||||
rm -rf tmp/overlay-network
|
||||
cp -r ../overlay-network tmp/
|
||||
rm -rf tmp/overlay-network/tmp
|
||||
cd tmp/overlay-network/vm_configs
|
||||
find . -maxdepth 1 -type f ! -name 'template-n.yaml' -exec rm -- '{}' +
|
||||
|
||||
setup_vm () {
|
||||
vm_name="$1"
|
||||
cp template-n.yaml $vm_name.yaml
|
||||
sed -i "s/template-n/$vm_name/" $vm_name.yaml
|
||||
sed -i '/PublishPorts/d' $vm_name.yaml
|
||||
echo "ipv4: !PublishPorts [ 31337 ]" >> $vm_name.yaml
|
||||
sed -i '/hours:/d' $vm_name.yaml
|
||||
echo "hours: 800" >> $vm_name.yaml
|
||||
sed -i '/location:/d' $vm_name.yaml
|
||||
sed -i '/country:/d' $vm_name.yaml
|
||||
echo "location:" >> $vm_name.yaml
|
||||
# echo " country: \"$country\"" >> $vm_name.yaml
|
||||
[[ "$vm_name" == "bastion-brain" ]] && echo ' country: "FR"'>> $vm_name.yaml || :
|
||||
[[ "$vm_name" == *-mon ]] && echo ' country: "FR"'>> $vm_name.yaml || :
|
||||
[[ "$vm_name" == *-1 ]] && echo ' city: "Montréal"'>> $vm_name.yaml || :
|
||||
[[ "$vm_name" == *-2 ]] && echo ' city: "Vancouver"'>> $vm_name.yaml || :
|
||||
[[ "$vm_name" == *-3 ]] && echo ' country: "US"'>> $vm_name.yaml || :
|
||||
}
|
||||
|
||||
setup_vm staging-brain-1
|
||||
setup_vm staging-brain-2
|
||||
setup_vm staging-brain-3
|
||||
setup_vm staging-brain-mon
|
||||
setup_vm prod-brain-1
|
||||
setup_vm prod-brain-2
|
||||
setup_vm prod-brain-3
|
||||
setup_vm prod-brain-mon
|
||||
setup_vm bastion-brain
|
||||
rm template-n.yaml
|
||||
|
||||
cd ../
|
||||
echo CREATING VMS!
|
||||
./create_vms.sh
|
||||
sleep 10
|
||||
./setup_wg_mesh.sh
|
65
surrealdb_tikv_prod/prepare_bastion.sh
Normal file
65
surrealdb_tikv_prod/prepare_bastion.sh
Normal file
@ -0,0 +1,65 @@
|
||||
#!/bin/bash
|
||||
curl -sSf https://tiup-mirrors.pingcap.com/install.sh | sh
|
||||
|
||||
LOG_DIR=/root/brain_logs
|
||||
mkdir -p "$LOG_DIR"
|
||||
|
||||
nodes=(
|
||||
prod-brain-1
|
||||
prod-brain-2
|
||||
prod-brain-3
|
||||
prod-brain-mon
|
||||
staging-brain-1
|
||||
staging-brain-2
|
||||
staging-brain-3
|
||||
staging-brain-mon
|
||||
)
|
||||
|
||||
for host in "${nodes[@]}"; do
|
||||
ssh -o StrictHostKeyChecking=no "$host" -- \
|
||||
pacman -S --noconfirm sudo \
|
||||
>> "${LOG_DIR}/${host}" 2>&1
|
||||
|
||||
ssh "$host" iptables -I INPUT 1 \
|
||||
-p tcp -s 10.254.254.0/24 \
|
||||
-m multiport --dports 20180,9115,9100,20160,2380,2379,8080 \
|
||||
-j ACCEPT
|
||||
|
||||
ssh "$host" iptables -I INPUT 1 \
|
||||
-p tcp -s 127.0.0.0/8 \
|
||||
-m multiport --dports 20180,9115,9100,20160,2380,2379,8080 \
|
||||
-j ACCEPT
|
||||
|
||||
ssh "$host" iptables -A INPUT \
|
||||
-p tcp \
|
||||
-m multiport --dports 20180,9115,9100,20160,2380,2379,8080 \
|
||||
-j DROP
|
||||
|
||||
ssh $host iptables-save | ssh $host tee /etc/iptables/iptables.rules
|
||||
ssh $host systemctl enable --now iptables.service
|
||||
done
|
||||
|
||||
/root/.tiup/bin/tiup cluster deploy \
|
||||
staging-brain v8.5.1 /root/staging_cluster.yaml \
|
||||
--user root -i ~/.ssh/id_ed25519
|
||||
|
||||
/root/.tiup/bin/tiup cluster deploy \
|
||||
prod-brain v8.5.1 /root/prod_cluster.yaml \
|
||||
--user root -i ~/.ssh/id_ed25519
|
||||
|
||||
/root/.tiup/bin/tiup cluster start staging-brain --init
|
||||
/root/.tiup/bin/tiup cluster start prod-brain --init
|
||||
|
||||
surreal_pass=$(openssl rand -base64 20 | tr -d '=/+')
|
||||
|
||||
for host in "${nodes[@]}"; do
|
||||
echo $host | grep mon > /dev/null && continue
|
||||
|
||||
curl -sSf https://install.surrealdb.com | ssh $host sh
|
||||
echo SURREAL_PASS=$surreal_pass | ssh $host tee /opt/surreal_env > /dev/null
|
||||
ssh $host curl -o /etc/systemd/system/surrealdb.service \
|
||||
https://gitea.detee.cloud/general/examples/raw/branch/master/surrealdb_tikv_prod/surrealdb.service
|
||||
ssh $host systemctl daemon-reload
|
||||
ssh $host systemctl enable surrealdb.service
|
||||
ssh $host systemctl start surrealdb.service
|
||||
done
|
24
surrealdb_tikv_prod/prod_cluster.yaml
Normal file
24
surrealdb_tikv_prod/prod_cluster.yaml
Normal file
@ -0,0 +1,24 @@
|
||||
global:
|
||||
user: "brain"
|
||||
ssh_port: 22
|
||||
deploy_dir: "/opt/brain_deployment"
|
||||
data_dir: "/opt/brain_data"
|
||||
|
||||
pd_servers:
|
||||
- host: prod-brain-1
|
||||
- host: prod-brain-2
|
||||
- host: prod-brain-3
|
||||
|
||||
tikv_servers:
|
||||
- host: prod-brain-1
|
||||
- host: prod-brain-2
|
||||
- host: prod-brain-3
|
||||
|
||||
monitoring_servers:
|
||||
- host: prod-brain-mon
|
||||
|
||||
grafana_servers:
|
||||
- host: prod-brain-mon
|
||||
|
||||
alertmanager_servers:
|
||||
- host: prod-brain-mon
|
24
surrealdb_tikv_prod/staging_cluster.yaml
Normal file
24
surrealdb_tikv_prod/staging_cluster.yaml
Normal file
@ -0,0 +1,24 @@
|
||||
global:
|
||||
user: "brain"
|
||||
ssh_port: 22
|
||||
deploy_dir: "/opt/brain_deployment"
|
||||
data_dir: "/opt/brain_data"
|
||||
|
||||
pd_servers:
|
||||
- host: staging-brain-1
|
||||
- host: staging-brain-2
|
||||
- host: staging-brain-3
|
||||
|
||||
tikv_servers:
|
||||
- host: staging-brain-1
|
||||
- host: staging-brain-2
|
||||
- host: staging-brain-3
|
||||
|
||||
monitoring_servers:
|
||||
- host: staging-brain-mon
|
||||
|
||||
grafana_servers:
|
||||
- host: staging-brain-mon
|
||||
|
||||
alertmanager_servers:
|
||||
- host: staging-brain-mon
|
19
surrealdb_tikv_prod/surrealdb.service
Normal file
19
surrealdb_tikv_prod/surrealdb.service
Normal file
@ -0,0 +1,19 @@
|
||||
[Unit]
|
||||
Description=SurrealDB server
|
||||
After=network-online.target
|
||||
Wants=network-online.target
|
||||
Requires=tikv-20160.service
|
||||
After=tikv-20160.service
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
EnvironmentFile=/opt/surreal_env
|
||||
ExecStart=/usr/local/bin/surreal start \
|
||||
--user root --pass ${SURREAL_PASS} \
|
||||
--bind 0.0.0.0:8080 \
|
||||
tikv://127.0.0.1:2379
|
||||
Restart=on-failure
|
||||
RestartSec=15s
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
Loading…
Reference in New Issue
Block a user