Compare commits

..

10 Commits

Author SHA1 Message Date
531f9a1241 Merge pull request 'Update README' (#1) from noormohammedb/detee-occlum:fix_readme into main
Some checks failed
SGX Hardware Mode Test / Fish_test ([self-hosted SGX2-HW EDMM]) (push) Has been cancelled
SGX Hardware Mode Test / Fish_test ([self-hosted SGX2-HW]) (push) Has been cancelled
SGX Hardware Mode Test / Xgboost_test ([self-hosted SGX2-HW EDMM]) (push) Has been cancelled
SGX Hardware Mode Test / Python_musl_support_test ([self-hosted SGX2-HW]) (push) Has been cancelled
SGX Hardware Mode Test / Openvino_test ([self-hosted SGX2-HW EDMM]) (push) Has been cancelled
SGX Hardware Mode Test / Openvino_test ([self-hosted SGX2-HW]) (push) Has been cancelled
SGX Hardware Mode Test / Grpc_test ([self-hosted SGX2-HW EDMM]) (push) Has been cancelled
SGX Hardware Mode Test / Grpc_test ([self-hosted SGX2-HW]) (push) Has been cancelled
SGX Hardware Mode Test / Init_RA_grpc ([self-hosted SGX2-HW EDMM]) (push) Has been cancelled
SGX Hardware Mode Test / Gvisor_test ([self-hosted SGX2-HW EDMM]) (push) Has been cancelled
SGX Hardware Mode Test / Gvisor_test ([self-hosted SGX2-HW]) (push) Has been cancelled
SGX Hardware Mode Test / Test_deb_deploy ([self-hosted SGX2-HW]) (push) Has been cancelled
SGX Hardware Mode Test / Tensorflow_serving_test ([self-hosted SGX2-HW EDMM]) (push) Has been cancelled
SGX Hardware Mode Test / Tensorflow_serving_test ([self-hosted SGX2-HW]) (push) Has been cancelled
SGX Hardware Mode Test / Remote_attestation_test ([self-hosted SGX2-HW EDMM]) (push) Has been cancelled
SGX Hardware Mode Test / Init_RA_grpc ([self-hosted SGX2-HW]) (push) Has been cancelled
SGX Hardware Mode Test / Init_RA_AECS ([self-hosted SGX2-HW EDMM]) (push) Has been cancelled
SGX Hardware Mode Test / Init_RA_AECS ([self-hosted SGX2-HW]) (push) Has been cancelled
SGX Hardware Mode Test / MySQL_test ([self-hosted SGX2-HW EDMM]) (push) Has been cancelled
SGX Hardware Mode Test / MySQL_test ([self-hosted SGX2-HW]) (push) Has been cancelled
SGX Hardware Mode Test / Stress_test_with_musl ([self-hosted SGX2-HW EDMM]) (push) Has been cancelled
SGX Hardware Mode Test / Stress_test_with_musl ([self-hosted SGX2-HW]) (push) Has been cancelled
SGX Hardware Mode Test / Stress_test_with_glibc ([self-hosted SGX2-HW EDMM]) (push) Has been cancelled
SGX Hardware Mode Test / Stress_test_with_glibc ([self-hosted SGX2-HW]) (push) Has been cancelled
Benchmarks Test / Sysbench_Test ([self-hosted SGX2-HW benchmark]) (push) Has been cancelled
Benchmarks Test / Iperf3_Test ([self-hosted SGX2-HW benchmark]) (push) Has been cancelled
Benchmarks Test for dev branch / Sysbench_Test ([self-hosted SGX2-HW benchmark]) (push) Has been cancelled
Benchmarks Test for dev branch / Iperf3_Test ([self-hosted SGX2-HW benchmark]) (push) Has been cancelled
Benchmarks Test for dev branch / SEFS_FIO_Test ([self-hosted SGX2-HW benchmark]) (push) Has been cancelled
Benchmarks Test for dev branch / AsyncSFS_FIO_Test ([self-hosted SGX2-HW benchmark]) (push) Has been cancelled
fixed occlum_utils library installation scripts
correct repository clone URL

Reviewed-on: SGX/occlum#1
Reviewed-by: Valentyn Faychuk <valy@detee.ltd>
2024-12-03 10:09:37 +00:00
cd594d293d Update README
fixed occlum_utils library installation scripts
correct repository clone URL
2024-12-02 14:36:13 +05:30
fb93be46a2 added utils_lib 2024-10-27 15:51:53 +02:00
98550a1ebc
sealing key generation 2024-10-27 15:51:53 +02:00
ClawSeven
be4de47940 [Demos] Fix broken mnist source in paddlepaddle demo 2024-06-13 12:00:13 +08:00
Qi Zheng
814b573304 [demos] Specify protoc-gen-go-grpc version for go pingpong demo 2024-06-13 12:00:13 +08:00
Qi Zheng
ea6e33e6f1 [demos] Fix flask demo run failure 2024-06-13 12:00:13 +08:00
Qi Zheng
8f9e8d52cb [demos] Fix golang webserver build error 2024-06-13 12:00:13 +08:00
Hui, Chunyang
0c9a44fc60 Add kernel_heap_monitor as the default feature 2024-05-29 14:03:40 +08:00
Qi Zheng
473eec584e Update cargo lock 2024-03-19 10:19:50 +08:00
255 changed files with 2319 additions and 21968 deletions

@ -5,7 +5,7 @@ on:
workflow_dispatch:
inputs:
OS:
description: 'OS name (must choose from <anolis, ubuntu20, ubuntu22>)'
description: 'OS name (must choose from <anolis, ubuntu20>)'
required: true
default: 'ubuntu20'
release:
@ -73,9 +73,9 @@ jobs:
tags: occlum/occlum:${{ env.IMAGE_TAG }}-anolis8.8
generate-ubuntu-image:
generate-ubuntu20-image:
runs-on: ubuntu-20.04
if: github.event.inputs.OS == 'ubuntu20' || github.event.inputs.OS == 'ubuntu22'
if: github.event.inputs.OS == 'ubuntu20'
steps:
- name: Checkout code
@ -105,13 +105,6 @@ jobs:
- name: Get image tage name for test image
if: github.event.inputs.release == 'T'
run: echo "IMAGE_TAG=${{ env.RELEASE_VERSION }}-test" >> $GITHUB_ENV
- name: Choose image base OS
run: |
if [[ "${{ github.event.inputs.OS }}" == "ubuntu22" ]]; then
echo "IMAGE_BASE=ubuntu22.04" >> $GITHUB_ENV
else
echo "IMAGE_BASE=ubuntu20.04" >> $GITHUB_ENV
fi;
- name: Set up QEMU
uses: docker/setup-qemu-action@v1
@ -131,8 +124,8 @@ jobs:
uses: docker/build-push-action@v2
with:
context: .
file: ./tools/docker/Dockerfile.${{ env.IMAGE_BASE }}
file: ./tools/docker/Dockerfile.ubuntu20.04
platforms: linux/amd64
build-args: OCCLUM_BRANCH=${{ env.OCCLUM_BRANCH }}
push: true
tags: occlum/occlum:${{ env.IMAGE_TAG }}-${{ env.IMAGE_BASE }}
tags: occlum/occlum:${{ env.IMAGE_TAG }}-ubuntu20.04

@ -5,26 +5,26 @@ on:
workflow_dispatch:
inputs:
OS:
description: 'OS name (must choose from <ubuntu20, ubuntu22>)'
description: 'OS name (must choose from <ubuntu20>)'
required: true
default: 'ubuntu20'
OCCLUM_VERSION:
description: 'The Occlum version is built on, e.g "0.30.1"'
description: 'The Occlum version is built on, e.g "0.29.7"'
required: true
default: '0.30.1'
default: '0.29.7'
SGX_PSW_VERSION:
description: 'The SGX PSW version libraries expected to be installed, e.g "2.20.100.4"'
description: 'The SGX PSW version libraries expected to be installed, e.g "2.17.100.3"'
required: true
default: '2.20.100.4'
default: '2.17.100.3'
SGX_DCAP_VERSION:
description: 'The SGX DCAP version libraries expected to be installed, e.g "1.17.100.4"'
description: 'The SGX DCAP version libraries expected to be installed, e.g "1.14.100.3"'
required: true
default: '1.17.100.4'
default: '1.14.100.3'
jobs:
generate-ubuntu-rt-image:
generate-ubuntu20-rt-image:
runs-on: ubuntu-20.04
if: github.event.inputs.OS == 'ubuntu20' || github.event.inputs.OS == 'ubuntu22'
if: github.event.inputs.OS == 'ubuntu20'
steps:
- name: Checkout code
@ -50,23 +50,15 @@ jobs:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Choose image base OS
run: |
if [[ "${{ github.event.inputs.OS }}" == "ubuntu22" ]]; then
echo "IMAGE_BASE=ubuntu22.04" >> $GITHUB_ENV
else
echo "IMAGE_BASE=ubuntu20.04" >> $GITHUB_ENV
fi;
- name: Build and push
uses: docker/build-push-action@v2
with:
context: ./tools/docker
file: ./tools/docker/Dockerfile.${{ env.IMAGE_BASE }}-rt
file: ./tools/docker/Dockerfile.ubuntu20.04-rt
platforms: linux/amd64
build-args: |
"OCCLUM_VERSION=${{ github.event.inputs.OCCLUM_VERSION }}"
"SGX_PSW_VERSION=${{ github.event.inputs.SGX_PSW_VERSION }}"
"SGX_DCAP_VERSION=${{ github.event.inputs.SGX_DCAP_VERSION }}"
push: true
tags: occlum/occlum:${{ github.event.inputs.OCCLUM_VERSION }}-rt-${{ env.IMAGE_BASE }}
tags: occlum/occlum:${{ github.event.inputs.OCCLUM_VERSION }}-rt-ubuntu20.04

@ -87,10 +87,6 @@ runs:
- name: Configure Occlum features
run: |
if [[ "${{ matrix.self_runner[2] }}" == "EDMM" ]]; then
if [[ "${{ matrix.self_runner[3] }}" == "IO_Uring" ]]; then
docker exec ${{ env.CONTAINER_NAME }} bash -c "jq '.feature.enable_posix_shm = true | .feature.enable_edmm = true | .feature.io_uring = 1' /opt/occlum/etc/template/Occlum.json > /tmp.json && mv /tmp.json /opt/occlum/etc/template/Occlum.json";
else
docker exec ${{ env.CONTAINER_NAME }} bash -c "jq '.feature.enable_posix_shm = true | .feature.enable_edmm = true' /opt/occlum/etc/template/Occlum.json > /tmp.json && mv /tmp.json /opt/occlum/etc/template/Occlum.json";
fi;
docker exec ${{ env.CONTAINER_NAME }} bash -c "jq '.feature.enable_posix_shm = true | .feature.enable_edmm = true' /opt/occlum/etc/template/Occlum.json > /tmp.json && mv /tmp.json /opt/occlum/etc/template/Occlum.json"
fi;
shell: bash
shell: bash

@ -12,14 +12,11 @@ runs:
run: |
docker exec ${{ inputs.container-name }} bash -c "git config --global --add safe.directory /root/occlum";
docker exec ${{ inputs.container-name }} bash -c "git config --global --add safe.directory /root/occlum/deps/grpc-rust";
docker exec ${{ inputs.container-name }} bash -c "git config --global --add safe.directory /root/occlum/deps/ext2-rs";
docker exec ${{ inputs.container-name }} bash -c "git config --global --add safe.directory /root/occlum/deps/itoa-sgx";
docker exec ${{ inputs.container-name }} bash -c "git config --global --add safe.directory /root/occlum/deps/mlsdisk";
docker exec ${{ inputs.container-name }} bash -c "git config --global --add safe.directory /root/occlum/deps/resolv-conf";
docker exec ${{ inputs.container-name }} bash -c "git config --global --add safe.directory /root/occlum/deps/ringbuf";
docker exec ${{ inputs.container-name }} bash -c "git config --global --add safe.directory /root/occlum/deps/rust-sgx-sdk";
docker exec ${{ inputs.container-name }} bash -c "git config --global --add safe.directory /root/occlum/deps/sefs";
docker exec ${{ inputs.container-name }} bash -c "git config --global --add safe.directory /root/occlum/deps/serde-json-sgx";
docker exec ${{ inputs.container-name }} bash -c "git config --global --add safe.directory /root/occlum/deps/serde-sgx";
docker exec ${{ inputs.container-name }} bash -c "git config --global --add safe.directory /root/occlum/deps/io-uring"
docker exec ${{ inputs.container-name }} bash -c "git config --global --add safe.directory /root/occlum/deps/serde-sgx"
shell: bash

@ -25,7 +25,7 @@ runs:
shell: bash
- name: Create container
run: docker run -itd --name=${{ inputs.container-name }} --privileged -v $GITHUB_WORKSPACE:/root/occlum occlum/occlum:${{ env.OCCLUM_VERSION }}-${{ inputs.os }}
run: docker run -itd --name=${{ inputs.container-name }} -v $GITHUB_WORKSPACE:/root/occlum occlum/occlum:${{ env.OCCLUM_VERSION }}-${{ inputs.os }}
shell: bash
- uses: ./.github/workflows/composite_action/prebuild
@ -40,14 +40,6 @@ runs:
run: docker exec ${{ inputs.container-name }} bash -c "source /opt/intel/sgxsdk/environment; cd /root/occlum; ${{ inputs.build-envs}} make install"
shell: bash
# When there comes new features, the configuration should be enabled accordingly
- name: Configure Occlum features
run: |
if [[ "${{ matrix.features }}" == "IO_Uring" ]]; then
docker exec ${{ inputs.container-name }} bash -c "jq '.feature.io_uring = 1' /opt/occlum/etc/template/Occlum.json > /tmp.json && mv /tmp.json /opt/occlum/etc/template/Occlum.json";
fi;
shell: bash
- name: Remove occlum installation package
run: docker exec ${{ inputs.container-name }} bash -c "cd /root/occlum; rm -rf ./build; rm -rf ./src; rm -rf ./deps"
shell: bash

@ -14,9 +14,6 @@ concurrency:
jobs:
C_cpp_rust_golang_embedded_mode_support_test:
runs-on: ubuntu-20.04
strategy:
matrix:
features: [Legacy, IO_Uring]
steps:
- name: Remove unnecessary files
run: |
@ -115,9 +112,6 @@ jobs:
Java_support_test:
runs-on: ubuntu-20.04
strategy:
matrix:
features: [Legacy, IO_Uring]
steps:
- name: Remove unnecessary files
run: |
@ -152,9 +146,6 @@ jobs:
Fish_test:
runs-on: ubuntu-20.04
strategy:
matrix:
features: [Legacy, IO_Uring]
steps:
- name: Remove unnecessary files
run: |
@ -186,9 +177,6 @@ jobs:
Bazel_test:
runs-on: ubuntu-20.04
strategy:
matrix:
features: [Legacy, IO_Uring]
steps:
- name: Remove unnecessary files
run: |
@ -225,9 +213,6 @@ jobs:
Https_server_test:
runs-on: ubuntu-20.04
strategy:
matrix:
features: [Legacy, IO_Uring]
steps:
- name: Remove unnecessary files
run: |
@ -261,9 +246,6 @@ jobs:
Local_attestation_test:
runs-on: ubuntu-20.04
strategy:
matrix:
features: [Legacy, IO_Uring]
steps:
- name: Remove unnecessary files
run: |
@ -293,9 +275,6 @@ jobs:
Sqlite_test:
runs-on: ubuntu-20.04
strategy:
matrix:
features: [Legacy, IO_Uring]
steps:
- name: Remove unnecessary files
run: |
@ -324,9 +303,6 @@ jobs:
Xgboost_test:
runs-on: ubuntu-20.04
strategy:
matrix:
features: [Legacy, IO_Uring]
steps:
- name: Remove unnecessary files
run: |
@ -358,9 +334,6 @@ jobs:
Tensorflow_lite_test:
runs-on: ubuntu-20.04
strategy:
matrix:
features: [Legacy, IO_Uring]
steps:
- name: Remove unnecessary files
run: |
@ -392,9 +365,6 @@ jobs:
Pytorch_test:
runs-on: ubuntu-20.04
strategy:
matrix:
features: [Legacy, IO_Uring]
steps:
- name: Remove unnecessary files
run: |
@ -423,9 +393,6 @@ jobs:
Distributed_Pytorch_test:
runs-on: ubuntu-20.04
strategy:
matrix:
features: [Legacy, IO_Uring]
steps:
- name: Remove unnecessary files
run: |
@ -464,9 +431,6 @@ jobs:
Tensorflow_test:
runs-on: ubuntu-20.04
strategy:
matrix:
features: [Legacy, IO_Uring]
steps:
- name: Remove unnecessary files
run: |
@ -496,9 +460,6 @@ jobs:
# Below tests needs test image to run faster
Grpc_musl_test:
runs-on: ubuntu-20.04
strategy:
matrix:
features: [Legacy, IO_Uring]
steps:
- name: Remove unnecessary files
run: |
@ -543,9 +504,6 @@ jobs:
Grpc_glibc_test:
runs-on: ubuntu-20.04
strategy:
matrix:
features: [Legacy, IO_Uring]
steps:
- name: Remove unnecessary files
run: |
@ -594,9 +552,6 @@ jobs:
Grpc_tls_test:
runs-on: ubuntu-20.04
strategy:
matrix:
features: [Legacy, IO_Uring]
steps:
- name: Remove unnecessary files
run: |
@ -638,9 +593,6 @@ jobs:
Openvino_test:
runs-on: ubuntu-20.04
strategy:
matrix:
features: [Legacy, IO_Uring]
steps:
- name: Remove unnecessary files
run: |
@ -678,9 +630,6 @@ jobs:
# Python test also needs its own image because in Alpine environment, modules are built locally and consumes a lot of time.
Python_musl_support_test:
runs-on: ubuntu-20.04
strategy:
matrix:
features: [Legacy, IO_Uring]
steps:
- name: Remove unnecessary files
run: |
@ -721,9 +670,6 @@ jobs:
# Python glibc support test
Python_glibc_support_test:
runs-on: ubuntu-20.04
strategy:
matrix:
features: [Legacy, IO_Uring]
steps:
- name: Remove unnecessary files
run: |
@ -760,9 +706,6 @@ jobs:
# Redis test
Redis_support_test:
runs-on: ubuntu-20.04
strategy:
matrix:
features: [Legacy, IO_Uring]
steps:
- name: Remove unnecessary files
run: |
@ -802,9 +745,6 @@ jobs:
Flink_test:
runs-on: ubuntu-20.04
strategy:
matrix:
features: [Legacy, IO_Uring]
steps:
- name: Remove unnecessary files
run: |
@ -825,24 +765,16 @@ jobs:
- name: Preinstall dependencies and download flink
run: docker exec ${{ github.job }} bash -c "cd /root/occlum/demos/flink && ./preinstall_deps.sh && ./download_flink.sh"
- name: Build Occlum instance
run: docker exec ${{ github.job }} bash -c "cd /root/occlum/demos/flink && SGX_MODE=SIM ./build_occlum_instance.sh"
- name: Run jobmanager on host
run: docker exec ${{ github.job }} bash -c "cd /root/occlum/demos/flink && SGX_MODE=SIM ./run_flink_jobmanager_on_host.sh"
- name: Run flink job manager on Occlum
run: docker exec ${{ github.job }} bash -c "cd /root/occlum/demos/flink && ./run_flink_on_occlum.sh jm"
- name: Run flink taskmanager
run: docker exec ${{ github.job }} bash -c "cd /root/occlum/demos/flink && SGX_MODE=SIM ./run_flink_on_occlum_glibc.sh tm > flink.log 2>&1 &"
- name: Run flink task manager on Occlum
run: |
sleep ${{ env.nap_time }};
docker exec ${{ github.job }} bash -c "cd /root/occlum/demos/flink && ./run_flink_on_occlum.sh tm > flink.log 2>&1 &"
- name: Run flink task
run: |
sleep ${{ env.nap_time }};
docker exec ${{ github.job }} bash -c "cd /root/occlum/demos/flink && ./run_flink_on_occlum.sh task"
- name: Check flink job manager's log
if: ${{ always() }}
run: docker exec ${{ github.job }} bash -c "cd /root/occlum/demos/flink; cat occlum_instance_jobmanager/flink--standalonesession-0.log"
docker exec ${{ github.job }} bash -c "cd /root/occlum/demos/flink && SGX_MODE=SIM ./run_flink_on_occlum_glibc.sh task"
- name: Check flink task manager's log
if: ${{ always() }}
@ -857,9 +789,6 @@ jobs:
Cluster_serving_test:
runs-on: ubuntu-20.04
strategy:
matrix:
features: [Legacy, IO_Uring]
steps:
- name: Remove unnecessary files
run: |
@ -888,9 +817,6 @@ jobs:
Enclave_RA_TLS_test:
runs-on: ubuntu-20.04
strategy:
matrix:
features: [Legacy, IO_Uring]
steps:
- name: Remove unnecessary files
run: |
@ -925,9 +851,6 @@ jobs:
Vault_test:
runs-on: ubuntu-20.04
strategy:
matrix:
features: [Legacy, IO_Uring]
steps:
- name: Remove unnecessary files
run: |
@ -965,9 +888,6 @@ jobs:
Sofaboot_test:
runs-on: ubuntu-20.04
strategy:
matrix:
features: [Legacy, IO_Uring]
steps:
- name: Remove unnecessary files
run: |
@ -1008,9 +928,6 @@ jobs:
Netty_UT_test:
runs-on: ubuntu-20.04
strategy:
matrix:
features: [Legacy, IO_Uring]
steps:
- name: Remove unnecessary files
run: |
@ -1036,24 +953,12 @@ jobs:
- name: Run netty unit test demo
run: docker exec ${{ github.job }} bash -c "cd /root/occlum/demos/netty_ut && SGX_MODE=SIM ./run_netty_ut_jdk8.sh"
- name: Check netty unit demo results
run: |
if [[ "${{ matrix.features }}" == "IO_Uring" ]]; then
docker exec ${{ github.job }} bash -c "cd /root/occlum/demos/netty_ut && cat netty-test-heap512m.log | grep '189 tests successful'";
else
docker exec ${{ github.job }} bash -c "cd /root/occlum/demos/netty_ut && cat netty-test-heap512m.log | grep '190 tests successful'";
fi
shell: bash
- name: Clean Netty test
run: docker exec ${{ github.job }} bash -c "cd /root/occlum/demos && rm -rf ./netty_ut"
Bash_test:
runs-on: ubuntu-20.04
strategy:
matrix:
features: [Legacy, IO_Uring]
steps:
- name: Remove unnecessary files
run: |
@ -1082,9 +987,6 @@ jobs:
Sysbench_test:
runs-on: ubuntu-20.04
strategy:
matrix:
features: [Legacy, IO_Uring]
steps:
- name: Remove unnecessary files
run: |
@ -1117,9 +1019,6 @@ jobs:
Gvisor_syscalls_test:
runs-on: ubuntu-20.04
strategy:
matrix:
features: [Legacy, IO_Uring]
steps:
- name: Remove unnecessary files
run: |
@ -1155,19 +1054,10 @@ jobs:
run: docker exec $gvisor_test bash -c "git clone https://github.com/occlum/gvisor.git"
- name: Run gvisor syscall test
run: |
if [[ "${{ matrix.features }}" == "IO_Uring" ]]; then
docker exec $gvisor_test bash -c "cd /root/gvisor/occlum && SGX_MODE=SIM ./run_occlum_passed_tests.sh uring";
else
docker exec $gvisor_test bash -c "cd /root/gvisor/occlum && SGX_MODE=SIM ./run_occlum_passed_tests.sh";
fi;
shell: bash
run: docker exec $gvisor_test bash -c "cd /root/gvisor/occlum && SGX_MODE=SIM ./run_occlum_passed_tests.sh"
Flask_tls_test:
runs-on: ubuntu-20.04
strategy:
matrix:
features: [Legacy, IO_Uring]
steps:
- name: Remove unnecessary files
run: |
@ -1210,9 +1100,6 @@ jobs:
Iperf2_test:
runs-on: ubuntu-20.04
strategy:
matrix:
features: [Legacy, IO_Uring]
steps:
- name: Remove unnecessary files
run: |
@ -1248,9 +1135,6 @@ jobs:
Linux_LTP_test:
runs-on: ubuntu-20.04
strategy:
matrix:
features: [Legacy, IO_Uring]
steps:
- name: Remove unnecessary files
run: |
@ -1283,9 +1167,6 @@ jobs:
FIO_test:
runs-on: ubuntu-20.04
strategy:
matrix:
features: [Legacy, IO_Uring]
steps:
- name: Remove unnecessary files
run: |
@ -1314,9 +1195,6 @@ jobs:
PaddlePaddle_test:
runs-on: ubuntu-20.04
strategy:
matrix:
features: [Legacy, IO_Uring]
steps:
- name: Remove unnecessary files
run: |
@ -1345,9 +1223,6 @@ jobs:
RuntimeBoot_test:
runs-on: ubuntu-20.04
strategy:
matrix:
features: [Legacy, IO_Uring]
steps:
- name: Remove unnecessary files
run: |
@ -1376,9 +1251,6 @@ jobs:
Swtpm_test:
runs-on: ubuntu-20.04
strategy:
matrix:
features: [Legacy, IO_Uring]
steps:
- name: Remove unnecessary files
run: |
@ -1409,25 +1281,3 @@ jobs:
- name: Clean Swtpm test
run: docker exec ${{ github.job }} bash -c "cd /root/occlum/demos && rm -rf ./swtpm"
Filebench_test:
runs-on: ubuntu-20.04
strategy:
matrix:
features: [Legacy, IO_Uring]
steps:
- uses: actions/checkout@v1
with:
submodules: true
- uses: ./.github/workflows/composite_action/sim
with:
container-name: ${{ github.job }}
build-envs: 'OCCLUM_RELEASE_BUILD=1'
- name: Preinstall dependencies and build Filebench
run: docker exec ${{ github.job }} bash -c "cd /root/occlum/demos/benchmarks/filebench && ./preinstall_deps.sh && ./dl_and_build_filebench.sh"
- name: Prepare occlum instance and Run Filebench on Ext2
run: docker exec ${{ github.job }} bash -c "cd /root/occlum/demos/benchmarks/filebench && SGX_MODE=SIM ./run_workload.sh readfiles"

@ -25,7 +25,7 @@ jobs:
runs-on: ${{ matrix.self_runner }}
strategy:
matrix:
self_runner: [[self-hosted, SGX2-HW], [self-hosted, SGX2-HW, EDMM, IO_Uring]]
self_runner: [[self-hosted, SGX2-HW], [self-hosted, SGX2-HW, EDMM]]
steps:
- name: Clean before running
@ -80,7 +80,7 @@ jobs:
runs-on: ${{ matrix.self_runner }}
strategy:
matrix:
self_runner: [[self-hosted, SGX2-HW], [self-hosted, SGX2-HW, EDMM, IO_Uring]]
self_runner: [[self-hosted, SGX2-HW], [self-hosted, SGX2-HW, EDMM]]
steps:
- name: Clean before running
@ -179,7 +179,7 @@ jobs:
runs-on: ${{ matrix.self_runner }}
strategy:
matrix:
self_runner: [[self-hosted, SGX2-HW], [self-hosted, SGX2-HW, EDMM, IO_Uring, PKU]]
self_runner: [[self-hosted, SGX2-HW], [self-hosted, SGX2-HW, EDMM, PKU]]
steps:
- name: Clean before running
@ -220,7 +220,7 @@ jobs:
- name: Run hello PKU
run: |
if [[ "${{ matrix.self_runner[4] }}" == "PKU" ]]; then
if [[ "${{ matrix.self_runner[3] }}" == "PKU" ]]; then
docker exec ${{ env.CONTAINER_NAME }} bash -c "cd /root/occlum/demos/java && ./run_java_on_occlum.sh hello_pku"
else
echo "Skip PKU test"
@ -237,7 +237,7 @@ jobs:
runs-on: ${{ matrix.self_runner }}
strategy:
matrix:
self_runner: [[self-hosted, SGX2-HW], [self-hosted, SGX2-HW, EDMM, IO_Uring]]
self_runner: [[self-hosted, SGX2-HW], [self-hosted, SGX2-HW, EDMM]]
steps:
- name: Clean before running
@ -289,7 +289,7 @@ jobs:
runs-on: ${{ matrix.self_runner }}
strategy:
matrix:
self_runner: [[self-hosted, SGX2-HW], [self-hosted, SGX2-HW, EDMM, IO_Uring]]
self_runner: [[self-hosted, SGX2-HW], [self-hosted, SGX2-HW, EDMM]]
steps:
- name: Clean before running
@ -336,7 +336,7 @@ jobs:
runs-on: ${{ matrix.self_runner }}
strategy:
matrix:
self_runner: [[self-hosted, SGX2-HW], [self-hosted, SGX2-HW, EDMM, IO_Uring]]
self_runner: [[self-hosted, SGX2-HW], [self-hosted, SGX2-HW, EDMM]]
steps:
- name: Clean before running
@ -383,7 +383,7 @@ jobs:
runs-on: ${{ matrix.self_runner }}
strategy:
matrix:
self_runner: [[self-hosted, SGX2-HW], [self-hosted, SGX2-HW, EDMM, IO_Uring]]
self_runner: [[self-hosted, SGX2-HW], [self-hosted, SGX2-HW, EDMM]]
steps:
- name: Clean before running
@ -427,7 +427,7 @@ jobs:
runs-on: ${{ matrix.self_runner }}
strategy:
matrix:
self_runner: [[self-hosted, SGX2-HW], [self-hosted, SGX2-HW, EDMM, IO_Uring]]
self_runner: [[self-hosted, SGX2-HW], [self-hosted, SGX2-HW, EDMM]]
steps:
- name: Clean before running
@ -505,7 +505,7 @@ jobs:
runs-on: ${{ matrix.self_runner }}
strategy:
matrix:
self_runner: [[self-hosted, SGX2-HW], [self-hosted, SGX2-HW, EDMM, IO_Uring]]
self_runner: [[self-hosted, SGX2-HW], [self-hosted, SGX2-HW, EDMM]]
steps:
- name: Clean before running
@ -580,7 +580,7 @@ jobs:
runs-on: ${{ matrix.self_runner }}
strategy:
matrix:
self_runner: [[self-hosted, SGX2-HW], [self-hosted, SGX2-HW, EDMM, IO_Uring]]
self_runner: [[self-hosted, SGX2-HW], [self-hosted, SGX2-HW, EDMM]]
steps:
- name: Clean before running
@ -677,7 +677,7 @@ jobs:
runs-on: ${{ matrix.self_runner }}
strategy:
matrix:
self_runner: [[self-hosted, SGX2-HW], [self-hosted, SGX2-HW, EDMM, IO_Uring]]
self_runner: [[self-hosted, SGX2-HW], [self-hosted, SGX2-HW, EDMM]]
steps:
- name: Clean before running
@ -741,15 +741,8 @@ jobs:
- name: Clone gvisor code
run: docker exec $gvisor_test bash -c "git clone https://github.com/occlum/gvisor.git"
- name: Run gvisor syscall test
run: |
if [[ "${{ matrix.self_runner[3] }}" == "IO_Uring" ]]; then
docker exec $gvisor_test bash -c "cd /root/gvisor/occlum; ./run_occlum_passed_tests.sh uring";
else
docker exec $gvisor_test bash -c "cd /root/gvisor/occlum; ./run_occlum_passed_tests.sh";
fi;
shell: bash
run: docker exec $gvisor_test bash -c "cd /root/gvisor/occlum; ./run_occlum_passed_tests.sh"
- name: Clean the environment
if: ${{ always() }}
@ -841,7 +834,7 @@ jobs:
strategy:
matrix:
# Tensorflow serving test requires AVX512 instruction support. Only the SGX2-HW machine has support for that.
self_runner: [[self-hosted, SGX2-HW], [self-hosted, SGX2-HW, EDMM, IO_Uring]]
self_runner: [[self-hosted, SGX2-HW], [self-hosted, SGX2-HW, EDMM]]
steps:
- name: Clean before running
@ -927,7 +920,7 @@ jobs:
runs-on: ${{ matrix.self_runner }}
strategy:
matrix:
self_runner: [[self-hosted, SGX2-HW], [self-hosted, SGX2-HW, EDMM, IO_Uring]]
self_runner: [[self-hosted, SGX2-HW], [self-hosted, SGX2-HW, EDMM]]
steps:
- name: Clean before running
@ -967,7 +960,7 @@ jobs:
runs-on: ${{ matrix.self_runner }}
strategy:
matrix:
self_runner: [[self-hosted, SGX2-HW], [self-hosted, SGX2-HW, EDMM, IO_Uring]]
self_runner: [[self-hosted, SGX2-HW], [self-hosted, SGX2-HW, EDMM]]
steps:
- name: Clean before running
@ -1021,7 +1014,7 @@ jobs:
runs-on: ${{ matrix.self_runner }}
strategy:
matrix:
self_runner: [[self-hosted, SGX2-HW], [self-hosted, SGX2-HW, EDMM, IO_Uring]]
self_runner: [[self-hosted, SGX2-HW], [self-hosted, SGX2-HW, EDMM]]
steps:
- name: Clean before running
@ -1076,7 +1069,7 @@ jobs:
runs-on: ${{ matrix.self_runner }}
strategy:
matrix:
self_runner: [[self-hosted, SGX2-HW], [self-hosted, SGX2-HW, EDMM, IO_Uring]]
self_runner: [[self-hosted, SGX2-HW], [self-hosted, SGX2-HW, EDMM]]
steps:
- name: Clean before running
@ -1124,7 +1117,7 @@ jobs:
runs-on: ${{ matrix.self_runner }}
strategy:
matrix:
self_runner: [[self-hosted, SGX2-HW], [self-hosted, SGX2-HW, EDMM, IO_Uring]]
self_runner: [[self-hosted, SGX2-HW], [self-hosted, SGX2-HW, EDMM]]
steps:
- name: Clean before running
@ -1159,15 +1152,6 @@ jobs:
- name: Run netty unit test demo
run: docker exec ${{ env.CONTAINER_NAME }} bash -c "cd /root/occlum/demos/netty_ut && ./run_netty_ut_jdk8.sh"
- name: Check netty unit demo results
run: |
if [[ "${{ matrix.self_runner[3] }}" == "IO_Uring" ]]; then
docker exec ${{ github.job }} bash -c "cd /root/occlum/demos/netty_ut && cat netty-test-heap512m.log | grep '189 tests successful'";
else
docker exec ${{ github.job }} bash -c "cd /root/occlum/demos/netty_ut && cat netty-test-heap512m.log | grep '190 tests successful'";
fi
shell: bash
- name: Clean the environment
if: ${{ always() }}
@ -1179,7 +1163,7 @@ jobs:
runs-on: ${{ matrix.self_runner }}
strategy:
matrix:
self_runner: [[self-hosted, SGX2-HW], [self-hosted, SGX2-HW, EDMM, IO_Uring]]
self_runner: [[self-hosted, SGX2-HW], [self-hosted, SGX2-HW, EDMM]]
steps:
- name: Clean before running
@ -1214,7 +1198,7 @@ jobs:
runs-on: ${{ matrix.self_runner }}
strategy:
matrix:
self_runner: [[self-hosted, SGX2-HW], [self-hosted, SGX2-HW, EDMM, IO_Uring]]
self_runner: [[self-hosted, SGX2-HW], [self-hosted, SGX2-HW, EDMM]]
steps:
- name: Clean before running

@ -4,14 +4,6 @@ name: Set up Package Repository and Test (Manual Trigger)
on:
workflow_dispatch:
inputs:
ubuntu_focal:
description: 'Need build ubuntu 20.04 package? <Y/N>'
required: true
default: 'Y'
ubuntu_jammy:
description: 'Need build ubuntu 22.04 package? <Y/N>'
required: true
default: 'Y'
update_musl:
description: 'Need build new musl package? <Y/N>'
required: true
@ -34,10 +26,10 @@ on:
jobs:
Package_repository_setup_and_test:
runs-on: ubuntu-22.04
runs-on: ubuntu-20.04
if: github.event.inputs.only_test == 'N'
env:
TOKEN: ${{ secrets.PAT_TOKEN }}
TOKEN: ${{ secrets.PAT_TOKEN }}
# Map a step output to a job output
outputs:
occlum_version: ${{ steps.occlum_version.outputs.version }}
@ -73,113 +65,56 @@ jobs:
id: occlum_version
run: echo "::set-output name=version::${{ env.OCCLUM_VERSION }}"
- name: Start ubuntu 20.04 occlum container
if: github.event.inputs.ubuntu_focal == 'Y'
run: docker run -itd --name=occlum-focal -v $GITHUB_WORKSPACE:/root/workspace occlum/occlum:${{ env.OCCLUM_VERSION }}-ubuntu20.04
- name: Create ubuntu container
run: docker run -itd --name=ubuntu -v $GITHUB_WORKSPACE:/root/workspace occlum/occlum:${{ env.OCCLUM_VERSION }}-ubuntu20.04
- name: Build focal deb packages and copy out
if: github.event.inputs.ubuntu_focal == 'Y'
run: |
docker exec occlum-focal bash -c "cd /root/workspace/occlum/tools/installer/deb; make clean && make"
if [ "${{ github.event.inputs.update_musl }}" == "Y" ]; then
docker exec occlum-focal bash -c "cd /root/workspace/occlum/tools/installer/deb; make musl-gcc"
fi
if [ "${{ github.event.inputs.update_glibc }}" == "Y" ]; then
docker exec occlum-focal bash -c "cd /root/workspace/occlum/tools/installer/deb; make glibc"
fi
- name: Build deb packages
run: docker exec ubuntu bash -c "cd /root/workspace/occlum/tools/installer/deb; make"
if [ "${{ github.event.inputs.update_golang }}" == "Y" ]; then
docker exec occlum-focal bash -c "cd /root/workspace/occlum/tools/installer/deb; make golang"
fi
- name: Build musl toolchain package
if: github.event.inputs.update_musl == 'Y'
run: docker exec ubuntu bash -c "cd /root/workspace/occlum/tools/installer/deb; make musl-gcc"
docker exec occlum-focal bash -c "mkdir -p /root/workspace/focal-debs; cp -rf /root/workspace/occlum/build/debs/* /root/workspace/focal-debs/"
- name: Build glibc toolchain package
if: github.event.inputs.update_glibc == 'Y'
run: docker exec ubuntu bash -c "cd /root/workspace/occlum/tools/installer/deb; make glibc"
- name: Start ubuntu 22.04 occlum container
if: github.event.inputs.ubuntu_jammy == 'Y'
run: docker run -itd --name=occlum-jammy -v $GITHUB_WORKSPACE:/root/workspace occlum/occlum:${{ env.OCCLUM_VERSION }}-test-ubuntu22.04
- name: Build golang toolchain package
if: github.event.inputs.update_golang == 'Y'
run: docker exec ubuntu bash -c "cd /root/workspace/occlum/tools/installer/deb; make golang"
- name: Build jammy deb packages and copy out
if: github.event.inputs.ubuntu_jammy == 'Y'
run: |
docker exec occlum-jammy bash -c "cd /root/workspace/occlum/tools/installer/deb; make clean && make"
if [ "${{ github.event.inputs.update_musl }}" == "Y" ]; then
docker exec occlum-jammy bash -c "cd /root/workspace/occlum/tools/installer/deb; make musl-gcc"
fi
if [ "${{ github.event.inputs.update_glibc }}" == "Y" ]; then
docker exec occlum-jammy bash -c "cd /root/workspace/occlum/tools/installer/deb; make glibc"
fi
if [ "${{ github.event.inputs.update_golang }}" == "Y" ]; then
docker exec occlum-jammy bash -c "cd /root/workspace/occlum/tools/installer/deb; make golang"
fi
docker exec occlum-jammy bash -c "mkdir -p /root/workspace/jammy-debs; cp -rf /root/workspace/occlum/build/debs/* /root/workspace/jammy-debs/"
- name: Prepare tools and keys
run: |
sudo apt update && sudo apt install -y tree wget apt-utils rng-tools gnupg xz-utils bzip2
wget https://github.com/aptly-dev/aptly/releases/download/v1.5.0/aptly_1.5.0_linux_amd64.tar.gz
tar zxf aptly_1.5.0_linux_amd64.tar.gz -C /usr/local/bin
echo "${{ secrets.DEB_PRIVATE_KEY }}" > deb_private_key
gpg --allow-secret-key-import --import deb_private_key
gpg --export -a "deb_gpg_key" > ~/public.key
gpg --import ~/public.key
gpg --list-keys
sudo apt-key add ~/public.key && apt-key list
- name: Prepare tools and keys # Since aptly still use gpg1 by default, we all use gpg1 as gpg tool.
run: docker exec ubuntu bash -c 'apt-get update; apt-get install -y tree apt-utils gnupg reprepro rng-tools aptly; rm -rf /root/.gnupg;
echo "${{ secrets.DEB_PRIVATE_KEY }}" > /root/deb_private_key; gpg1 --allow-secret-key-import --import /root/deb_private_key;
gpg1 --export -a "deb_gpg_key" > /root/public.key;
gpg1 --import /root/public.key;
gpg1 --list-keys;
apt-key add /root/public.key && apt-key list'
- name: Inherit apt repo for Ubuntu 18.04
run: |
export PATH=/usr/local/bin/aptly_1.5.0_linux_amd64:$PATH
aptly -architectures="amd64" -keyring=/etc/apt/trusted.gpg mirror create bionic-mirror https://occlum.io/occlum-package-repos/debian bionic main
aptly -keyring=/etc/apt/trusted.gpg mirror update bionic-mirror
aptly snapshot create bionic-main from mirror bionic-mirror
aptly publish snapshot -distribution=bionic bionic-main
run: docker exec ubuntu bash -c 'aptly -architectures="amd64" -keyring=/etc/apt/trusted.gpg mirror create bionic-mirror https://occlum.io/occlum-package-repos/debian/ bionic main;
aptly -keyring=/etc/apt/trusted.gpg mirror update bionic-mirror;
aptly snapshot create bionic-main from mirror bionic-mirror;
aptly publish snapshot -distribution=bionic bionic-main'
- name: Update apt repo for Ubuntu 20.04
run: |
export PATH=/usr/local/bin/aptly_1.5.0_linux_amd64:$PATH
aptly -architectures="amd64" -keyring=/etc/apt/trusted.gpg mirror create focal-mirror https://occlum.io/occlum-package-repos/debian focal main
aptly -keyring=/etc/apt/trusted.gpg mirror update focal-mirror
aptly snapshot create focal-old from mirror focal-mirror
aptly -distribution='focal' -architectures=amd64 repo create deb-focal-new
aptly repo add deb-focal-new $GITHUB_WORKSPACE/focal-debs/*
aptly snapshot create focal-new from repo deb-focal-new
aptly -no-remove snapshot merge focal-main focal-old focal-new
aptly publish snapshot -distribution=focal focal-main
- name: Update apt repo for Ubuntu 22.04
run: |
export PATH=/usr/local/bin/aptly_1.5.0_linux_amd64:$PATH
aptly -architectures="amd64" -keyring=/etc/apt/trusted.gpg mirror create jammy-mirror https://occlum.io/occlum-package-repos/debian jammy main
aptly -keyring=/etc/apt/trusted.gpg mirror update jammy-mirror
aptly snapshot create jammy-old from mirror jammy-mirror
aptly -distribution='jammy' -architectures=amd64 repo create deb-jammy-new
aptly repo add deb-jammy-new $GITHUB_WORKSPACE/jammy-debs/*
aptly snapshot create jammy-new from repo deb-jammy-new
aptly -no-remove snapshot merge jammy-main jammy-old jammy-new
aptly publish snapshot -distribution=jammy jammy-main
# - name: Update apt repo for Ubuntu 22.04 for first time
# run: |
# export PATH=/usr/local/bin/aptly_1.5.0_linux_amd64:$PATH
# aptly -distribution='jammy' -architectures=amd64 repo create deb-jammy-new
# aptly repo add deb-jammy-new $GITHUB_WORKSPACE/jammy-debs/*
# aptly snapshot create jammy-new from repo deb-jammy-new
# aptly publish snapshot -distribution=jammy jammy-new
run: docker exec ubuntu bash -c 'aptly -architectures="amd64" -keyring=/etc/apt/trusted.gpg mirror create focal-mirror https://occlum.io/occlum-package-repos/debian/ focal main;
aptly -keyring=/etc/apt/trusted.gpg mirror update focal-mirror;
aptly snapshot create focal-old from mirror focal-mirror;
aptly -distribution='focal' -architectures=amd64 repo create deb-focal-new;
aptly repo add deb-focal-new /root/workspace/occlum/build/debs/*;
aptly snapshot create focal-new from repo deb-focal-new;
aptly -no-remove snapshot merge focal-main focal-old focal-new;
aptly publish snapshot -distribution=focal focal-main && tree /root/.aptly/public'
- name: Update Git repo
run: |
cd $GITHUB_WORKSPACE/occlum-package-repos
rm -rf debian
cp -r ~/.aptly/public/ $GITHUB_WORKSPACE/occlum-package-repos/debian
cp ~/public.key $GITHUB_WORKSPACE/occlum-package-repos/debian
tree $GITHUB_WORKSPACE/occlum-package-repos/debian/
run: docker exec ubuntu bash -c 'cd /root/workspace/occlum-package-repos; rm -rf debian; cp -r /root/.aptly/public/ /root/workspace/occlum-package-repos/debian;
cp /root/public.key /root/workspace/occlum-package-repos/debian'
- name: Clean ubuntu container and image
run: |
docker rm -f occlum-focal occlum-jammy
docker rm -f ubuntu
docker rmi -f occlum/occlum:${{ env.OCCLUM_VERSION }}-ubuntu20.04
# - name: Create centos container
# run: docker run -itd --name=centos -v $GITHUB_WORKSPACE:/root/workspace occlum/occlum:${{ env.OCCLUM_VERSION }}-centos8.2
@ -201,7 +136,7 @@ jobs:
- name: Commit files
run: |
cd $GITHUB_WORKSPACE/occlum-package-repos
cd occlum-package-repos
git config --local user.email "action@github.com"
git config --local user.name "GitHub Action"
git add -A
@ -220,14 +155,14 @@ jobs:
run: sleep 600
Test_deb_focal_package:
Test_deb_package:
if: ${{ always() }}
needs: Package_repository_setup_and_test
runs-on: ubuntu-20.04
steps:
- name: Create a clean ubuntu container
run: docker run --rm -itd --name=ubuntu ubuntu:20.04
run: docker run -itd --name=ubuntu ubuntu:20.04
- name: Get occlum version from user inputs
run: echo "OCCLUM_VERSION=${{github.event.inputs.test_version}}" >> $GITHUB_ENV
@ -265,58 +200,6 @@ jobs:
- name: Run occlum python glibc test
run: docker exec ubuntu bash -c "source /etc/profile; cd /root && git clone https://github.com/occlum/occlum.git && cd /root/occlum/demos/python/python_glibc && ./install_python_with_conda.sh && SGX_MODE=SIM ./run_python_on_occlum.sh && cat occlum_instance/smvlight.dat"
- name: Clean ubuntu container
run: |
docker rm -f ubuntu
Test_deb_jammy_package:
if: ${{ always() }}
needs: Package_repository_setup_and_test
runs-on: ubuntu-22.04
steps:
- name: Create a clean ubuntu container
run: docker run --rm -itd --name=ubuntu ubuntu:22.04
- name: Get occlum version from user inputs
run: echo "OCCLUM_VERSION=${{github.event.inputs.test_version}}" >> $GITHUB_ENV
- name: Update occlum version from previous job
if: github.event.inputs.only_test == 'N'
run: echo "OCCLUM_VERSION=${{needs.Package_repository_setup_and_test.outputs.occlum_version}}" >> $GITHUB_ENV
- name: Configure sgx and occlum deb repo
run: |
# Set the default timezone to make tzdata work
docker exec ubuntu bash -c "ln -snf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && echo Asia/Shanghai > /etc/timezone"
docker exec ubuntu bash -c "apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends ca-certificates gnupg jq make gdb wget libfuse-dev libtool tzdata;
echo 'deb [arch=amd64] https://download.01.org/intel-sgx/sgx_repo/ubuntu jammy main' | tee /etc/apt/sources.list.d/intel-sgx.list;
wget -qO - https://download.01.org/intel-sgx/sgx_repo/ubuntu/intel-sgx-deb.key | apt-key add -"
docker exec ubuntu bash -c "echo 'deb [arch=amd64] https://occlum.io/occlum-package-repos/debian jammy main' | tee /etc/apt/sources.list.d/occlum.list;
wget -qO - https://occlum.io/occlum-package-repos/debian/public.key | apt-key add -;"
- name: Install sgx dependencies and occlum
run: docker exec ubuntu bash -c "apt-get update; apt-cache policy occlum | grep -n5 ${{ env.OCCLUM_VERSION }} && apt-get install -y occlum libsgx-uae-service libsgx-dcap-ql"
- name: Hello world test
run: docker exec ubuntu bash -c "source /etc/profile; cd /root; wget https://raw.githubusercontent.com/occlum/occlum/master/demos/hello_c/hello_world.c; occlum-gcc -o hello_world hello_world.c;
occlum new occlum-instance; cp hello_world /root/occlum-instance/image/bin; cd /root/occlum-instance && SGX_MODE=SIM occlum build; occlum run /bin/hello_world"
# If there is no match, it will return 1.
- name: Check installed version
run: docker exec ubuntu bash -c "cat /opt/occlum/include/occlum_version.h | grep -n5 ${{ env.OCCLUM_VERSION }}"
- name: Install occlum-glibc toolchain
run: |
docker exec ubuntu bash -c "apt-get install -y occlum-toolchains-glibc"
docker exec ubuntu bash -c "apt-get install -y git python3 python3-pip python-is-python3 rsync"
- name: Run occlum python glibc test
run: docker exec ubuntu bash -c "source /etc/profile; cd /root && git clone https://github.com/occlum/occlum.git && cd /root/occlum/demos/python/python_glibc && ./install_python_with_conda.sh && SGX_MODE=SIM ./run_python_on_occlum.sh && cat occlum_instance/smvlight.dat"
- name: Clean ubuntu container
run: |
docker rm -f ubuntu
# Test_rpm_package:
# if: ${{ always() }}

9
.gitmodules vendored

@ -24,12 +24,3 @@
[submodule "deps/resolv-conf"]
path = deps/resolv-conf
url = https://github.com/tailhook/resolv-conf.git
[submodule "deps/io-uring"]
path = deps/io-uring
url = https://github.com/occlum/io-uring.git
[submodule "deps/mlsdisk"]
path = deps/mlsdisk
url = https://github.com/asterinas/mlsdisk
[submodule "deps/ext2-rs"]
path = deps/ext2-rs
url = https://github.com/liqinggd/ext2-rs

@ -42,7 +42,6 @@ submodule: githooks init-submodule
@cp deps/sefs/sefs-cli/lib/libsefs-cli_sim.so build/lib
@cp deps/sefs/sefs-cli/lib/libsefs-cli.signed.so build/lib
@cp deps/sefs/sefs-cli/enclave/Enclave.config.xml build/sefs-cli.Enclave.xml
@cd deps/io-uring/ocalls && cargo clean && cargo build --release
else
submodule: githooks init-submodule
@rm -rf build
@ -61,7 +60,6 @@ submodule: githooks init-submodule
@cp deps/sefs/sefs-cli/lib/libsefs-cli_sim.so build/lib
@cp deps/sefs/sefs-cli/lib/libsefs-cli.signed.so build/lib
@cp deps/sefs/sefs-cli/enclave/Enclave.config.xml build/sefs-cli.Enclave.xml
@cd deps/io-uring/ocalls && cargo clean && cargo build --release
endif
init-submodule:
@ -71,7 +69,6 @@ init-submodule:
cd deps/serde-json-sgx && git apply ../serde-json-sgx.patch >/dev/null 2>&1 || git apply ../serde-json-sgx.patch -R --check
cd deps/ringbuf && git apply ../ringbuf.patch >/dev/null 2>&1 || git apply ../ringbuf.patch -R --check
cd deps/resolv-conf && git apply ../resolv-conf.patch >/dev/null 2>&1 || git apply ../resolv-conf.patch -R --check
cd deps/mlsdisk && git apply ../mlsdisk.patch >/dev/null 2>&1 || git apply ../mlsdisk.patch -R --check
src:
@$(MAKE) --no-print-directory -C src

@ -1,3 +0,0 @@
filebench
filebench*.tar.gz
occlum_instance

@ -1,25 +0,0 @@
## Run Filebench on Occlum
[Filebench](https://github.com/Filebench/Filebench) is a benchmark tool aiming to test the file system and the storage system under certain workloads. This demo demonstrates how can Filebench run on Occlum.
### Step 1: Preinstall dependencies
Related dependencies: bison flex
```
cd demos/benchmarks/filebench && ./preinstall_deps.sh
```
### Step 2: Build Filebench from source
```
cd demos/benchmarks/filebench && ./dl_and_build_Filebench.sh
```
The script will download the source code, make some adaptation then compile Filebench into a binary.
### Step 3: Run Filebench workloads
```
cd demos/benchmarks/filebench && ./run_workload.sh <workload_name>
```
The script will run user-specific workloads under `filebench/workloads`. The corresponding results will be outputed.
Refer to [Filebench/wiki/Workload-model-language](https://github.com/Filebench/Filebench/wiki/Workload-model-language) and see more information about workloads.

@ -1,63 +0,0 @@
diff --color -ruN filebench/config.h modify/config.h
--- filebench/config.h 2022-05-10 11:24:44.393676003 +0800
+++ modify/config.h 2022-05-10 11:15:23.769794727 +0800
@@ -295,6 +295,7 @@
/* Define if you have SYSV sems */
#define HAVE_SYSV_SEM 1
+#undef HAVE_SYSV_SEM
/* Define to 1 if you have the <sys/dir.h> header file, and it defines `DIR'.
*/
diff --color -ruN filebench/ipc.c modify/ipc.c
--- filebench/ipc.c 2022-05-10 11:24:44.393676003 +0800
+++ modify/ipc.c 2022-05-10 11:16:06.768145518 +0800
@@ -400,21 +400,22 @@
int
ipc_attach(void *shmaddr, char *shmpath)
{
- int shmfd;
+ // int shmfd;
- if ((shmfd = open(shmpath, O_RDWR)) < 0) {
- filebench_log(LOG_FATAL, "Could not open shared memory "
- "file %s: %s", shmpath, strerror(errno));
- return (-1);
- }
+ // if ((shmfd = open(shmpath, O_RDWR)) < 0) {
+ // filebench_log(LOG_FATAL, "Could not open shared memory "
+ // "file %s: %s", shmpath, strerror(errno));
+ // return (-1);
+ // }
- if ((filebench_shm = (filebench_shm_t *)mmap(shmaddr,
- sizeof (filebench_shm_t), PROT_READ | PROT_WRITE,
- MAP_SHARED | MAP_FIXED, shmfd, 0)) == MAP_FAILED) {
- filebench_log(LOG_FATAL, "Could not mmap the shared "
- "memory file: %s", strerror(errno));
- return (-1);
- }
+ // if ((filebench_shm = (filebench_shm_t *)mmap(shmaddr,
+ // sizeof (filebench_shm_t), PROT_READ | PROT_WRITE,
+ // MAP_SHARED | MAP_FIXED, shmfd, 0)) == MAP_FAILED) {
+ // filebench_log(LOG_FATAL, "Could not mmap the shared "
+ // "memory file: %s", strerror(errno));
+ // return (-1);
+ // }
+ filebench_shm = (filebench_shm_t *)shmaddr;
if (filebench_shm != shmaddr) {
filebench_log(LOG_FATAL, "Could not mmap the shared "
diff --color -ruN filebench/procflow.c modify/procflow.c
--- filebench/procflow.c 2022-05-10 11:24:44.393676003 +0800
+++ modify/procflow.c 2022-05-10 11:16:35.772217279 +0800
@@ -90,7 +90,8 @@
return (-1);
}
#else
- if ((pid = fork()) < 0) {
+ // if ((pid = fork()) < 0) {
+ if ((pid = vfork()) < 0) {
filebench_log(LOG_ERROR,
"procflow_createproc fork failed: %s",
strerror(errno));

@ -1,32 +0,0 @@
#!/bin/bash
set -e
BLUE='\033[1;34m'
NC='\033[0m'
echo -e "${BLUE}Start building filebench from tarball.${NC}"
# Download release tarball
VERSION="1.5-alpha3"
TARBALL="filebench-${VERSION}.tar.gz"
rm -f ${TARBALL}
wget https://github.com/filebench/filebench/releases/download/${VERSION}/${TARBALL}
rm -rf filebench && mkdir filebench
tar -zxvf filebench-${VERSION}.tar.gz -C filebench --strip-components 1
pushd filebench
./configure
popd
# Make modification to
# 1. Replace fork to vfork
# 2. Prepare shared memory region for child processes
# 3. Disable SYSV semaphores
patch -s -p0 < apply-filebench-to-occlum.patch
pushd filebench
# Build and install filebench tool
make -j$(nproc)
make install
echo -e "${BLUE}Finish building filebench from tarball.${NC}"
popd

@ -1,18 +0,0 @@
includes:
- base.yaml
# filebench
targets:
# copy filebench, bash and busybox
- target: /bin
copy:
- files:
- /usr/local/bin/filebench
# copy workload files
- target: /
copy:
- dirs:
- ../workloads
- target: /opt/occlum/glibc/lib
copy:
- files:
- /lib/x86_64-linux-gnu/libgcc_s.so.1

@ -1,18 +0,0 @@
#!/bin/bash
set -e
BLUE='\033[1;34m'
NC='\033[0m'
echo -e "${BLUE}Start installing dependencies.${NC}"
DEPS="bison flex"
OS=`awk -F= '/^NAME/{print $2}' /etc/os-release`
if [ "$OS" == "\"Ubuntu\"" ]; then
apt-get update -y && apt-get install -y ${DEPS}
else
echo "Unsupported OS: $OS"
exit 1
fi
echo -e "${BLUE}Finish installing dependencies.${NC}"

@ -1,38 +0,0 @@
#!/bin/bash
set -e
WORKLOAD_LIST=("readfiles" "videoserver" "fileserver" "varmail" "oltp")
# More about workload model language at
# https://github.com/filebench/filebench/wiki/Workload-model-language
WORKLOAD_FILE=$1
if [[ ! " ${WORKLOAD_LIST[@]} " =~ " ${WORKLOAD_FILE} " ]]; then
echo "Please select a workload from: readfiles | videoserver | fileserver | varmail | oltp"
exit
fi
# 1. Init Occlum Workspace
rm -rf occlum_instance && occlum new occlum_instance
cd occlum_instance
# Enlarge "kernel_space_heap_size" when "pre-allocating files failed" occurs
# Enlarge "user_space_size" when "procflow exec proc failed" occurs
TCS_NUM=$(($(nproc) * 2))
new_json="$(jq --argjson THREAD_NUM ${TCS_NUM} '.resource_limits.user_space_size="2000MB" |
.resource_limits.kernel_space_heap_max_size="1000MB" |
.resource_limits.max_num_of_threads = $THREAD_NUM |
.mount += [{"target": "/ext2", "type": "ext2", "options": {"disk_size": "10GB"}}]' Occlum.json)" && \
echo "${new_json}" > Occlum.json
# 2. Copy files into Occlum Workspace and build
rm -rf image
copy_bom -f ../filebench.yaml --root image --include-dir /opt/occlum/etc/template
occlum build
# 3. Run benchmark under different workloads
BLUE='\033[1;34m'
NC='\033[0m'
echo -e "${BLUE}Run filebench on Occlum.${NC}"
occlum run /bin/filebench -f /workloads/${WORKLOAD_FILE}.f

@ -1,56 +0,0 @@
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or http://www.opensolaris.org/os/licensing.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
# Copyright 2008 Sun Microsystems, Inc. All rights reserved.
# Use is subject to license terms.
#
set $dir=/ext2/fbtest_ext2
set $nfiles=10000
set $meandirwidth=20
set $filesize=128k
set $nthreads=16
set $iosize=1m
set $meanappendsize=16k
define fileset name=bigfileset,path=$dir,size=$filesize,entries=$nfiles,dirwidth=$meandirwidth,prealloc=80
define process name=filereader,instances=1
{
thread name=filereaderthread,memsize=10m,instances=$nthreads
{
flowop createfile name=createfile1,filesetname=bigfileset,fd=1
flowop writewholefile name=wrtfile1,srcfd=1,fd=1,iosize=$iosize
flowop closefile name=closefile1,fd=1
flowop openfile name=openfile1,filesetname=bigfileset,fd=1
flowop appendfilerand name=appendfilerand1,iosize=$meanappendsize,fd=1
flowop closefile name=closefile2,fd=1
flowop openfile name=openfile2,filesetname=bigfileset,fd=1
flowop readwholefile name=readfile1,fd=1,iosize=$iosize
flowop closefile name=closefile3,fd=1
flowop deletefile name=deletefile1,filesetname=bigfileset
flowop statfile name=statfile1,filesetname=bigfileset
}
}
echo "File-server Version 3.0 personality successfully loaded"
run 60

@ -1,82 +0,0 @@
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or http://www.opensolaris.org/os/licensing.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
# Copyright 2009 Sun Microsystems, Inc. All rights reserved.
# Use is subject to license terms.
#
# Workload "oltp" from filebench cannot run directly on Occlum
# since aio write/wait and semaphore-related flowops are not supported.
# We replace some flowops to achieve similar performance quota.
set $dir=/root/fbtest
set $eventrate=0
set $iosize=2k
set $nshadows=200
set $ndbwriters=10
set $usermode=200000
set $filesize=10m
set $memperthread=1m
set $workingset=0
set $logfilesize=10m
set $nfiles=10
set $nlogfiles=1
set $directio=0
eventgen rate = $eventrate
# Define a datafile and logfile
define fileset name=datafiles,path=$dir,size=$filesize,entries=$nfiles,dirwidth=1024,prealloc=100,reuse
define fileset name=logfile,path=$dir,size=$logfilesize,entries=$nlogfiles,dirwidth=1024,prealloc=100,reuse
define process name=lgwr,instances=1
{
thread name=lgwr,memsize=$memperthread
{
flowop write name=lg-write,filesetname=logfile,
iosize=256k,random,directio=$directio,dsync
}
}
# Define database writer processes
define process name=dbwr,instances=$ndbwriters
{
thread name=dbwr,memsize=$memperthread
{
flowop write name=dbwrite-a,filesetname=datafiles,
iosize=$iosize,workingset=$workingset,random,iters=100,opennext,directio=$directio,dsync
flowop hog name=dbwr-hog,value=10000
}
}
define process name=shadow,instances=$nshadows
{
thread name=shadow,memsize=$memperthread
{
flowop read name=shadowread,filesetname=datafiles,
iosize=$iosize,workingset=$workingset,random,opennext,directio=$directio
flowop hog name=shadowhog,value=$usermode
flowop eventlimit name=random-rate
}
}
echo "OLTP Version 3.0 personality successfully loaded"
run 60

@ -1,11 +0,0 @@
# A simple readfiles workload
define fileset name="testF",entries=16,filesize=4k,path="/ext2/fbtest_ext2",prealloc
define process name="readerP",instances=1 {
thread name="readerT",instances=1 {
flowop openfile name="openOP",filesetname="testF"
flowop readwholefile name="readOP",filesetname="testF"
flowop closefile name="closeOP"
}
}
run 30

@ -1,60 +0,0 @@
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or http://www.opensolaris.org/os/licensing.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
# Copyright 2007 Sun Microsystems, Inc. All rights reserved.
# Use is subject to license terms.
#
# flowop openfile name=openfile2,filesetname=bigfileset,fd=1
#
set $dir=/root/fbtest
set $nfiles=8000
set $meandirwidth=1000
set $filesize=16k
set $nthreads=16
set $iosize=1m
set $meanappendsize=16k
define fileset name=bigfileset,path=$dir,size=$filesize,entries=$nfiles,dirwidth=$meandirwidth,prealloc=80
define process name=filereader,instances=1
{
thread name=filereaderthread,memsize=10m,instances=$nthreads
{
flowop deletefile name=deletefile1,filesetname=bigfileset
flowop createfile name=createfile2,filesetname=bigfileset,fd=1
flowop appendfilerand name=appendfilerand2,iosize=$meanappendsize,fd=1
flowop fsync name=fsyncfile2,fd=1
flowop closefile name=closefile2,fd=1
flowop openfile name=openfile3,filesetname=bigfileset,fd=1
flowop readwholefile name=readfile3,fd=1,iosize=$iosize
flowop appendfilerand name=appendfilerand3,iosize=$meanappendsize,fd=1
flowop fsync name=fsyncfile3,fd=1
flowop closefile name=closefile3,fd=1
flowop openfile name=openfile4,filesetname=bigfileset,fd=1
flowop readwholefile name=readfile4,fd=1,iosize=$iosize
flowop closefile name=closefile4,fd=1
}
}
echo "Varmail Version 3.0 personality successfully loaded"
run 60

@ -1,76 +0,0 @@
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or http://www.opensolaris.org/os/licensing.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
# Copyright 2009 Sun Microsystems, Inc. All rights reserved.
# Use is subject to license terms.
#
# This workloads emulates a video server. It has two filesets, one of videos
# being actively served, and one of videos availabe but currently inactive
# (passive). However, one thread, vidwriter, is writing new videos to replace
# no longer viewed videos in the passive set. Meanwhile $nthreads threads are
# serving up videos from the activevids fileset. If the desired rate is R mb/s,
# and $nthreads is set to T, then set the $srvbwrate to R * T to get the
# desired rate per video stream. The video replacement rate of one video
# file per replacement interval, is set by $repintval which defaults to
# 1 second. Thus the write bandwidth will be set as $filesize/$repintval.
#
set $dir=/root/fbtest
set $eventrate=96
set $filesize=1g
set $nthreads=48
set $numactivevids=5
set $numpassivevids=30
set $reuseit=false
set $readiosize=256k
set $writeiosize=1m
set $passvidsname=passivevids
set $actvidsname=activevids
set $repintval=10
eventgen rate=$eventrate
define fileset name=$actvidsname,path=$dir,size=$filesize,entries=$numactivevids,dirwidth=4,prealloc,paralloc,reuse=$reuseit
define fileset name=$passvidsname,path=$dir,size=$filesize,entries=$numpassivevids,dirwidth=20,prealloc=50,paralloc,reuse=$reuseit
define process name=vidwriter,instances=1
{
thread name=vidwriter,memsize=10m,instances=1
{
flowop deletefile name=vidremover,filesetname=$passvidsname
flowop createfile name=wrtopen,filesetname=$passvidsname,fd=1
flowop writewholefile name=newvid,iosize=$writeiosize,fd=1,srcfd=1
flowop closefile name=wrtclose, fd=1
flowop delay name=replaceinterval, value=$repintval
}
}
define process name=vidreaders,instances=1
{
thread name=vidreaders,memsize=10m,instances=$nthreads
{
flowop read name=vidreader,filesetname=$actvidsname,iosize=$readiosize
flowop bwlimit name=serverlimit, target=vidreader
}
}
echo "Video Server Version 3.0 personality successfully loaded"
run 60

@ -61,33 +61,6 @@ HF_DATASETS_CACHE=/root/cache \
For both examples, more arguments info could refer to BigDL-LLM [chatglm2](https://github.com/intel-analytics/BigDL/tree/main/python/llm/example/CPU/HF-Transformers-AutoModels/Model/chatglm2).
## LLM Inference Benchmark
Based on the [benchmark](https://github.com/intel-analytics/BigDL/tree/main/python/llm/dev/benchmark) demo from BigDL, a simple [benchmark](./benchmarks/) is provided to measure the performance of LLM inference both in host and in TEE.
Output will be like:
```
=========First token cost xx.xxxxs=========
=========Last token cost average xx.xxxxs (xx tokens in all)=========
```
The following **model_path** could be the path of chatglm2-6b or Qwen-7B-Chat.
**OMP_NUM_THREADS** is used to set the number of threads for OpenMP.
### Benchmark in Host
```bash
OMP_NUM_THREADS=16 ./python-occlum/bin/python \
./benchmarks/bench.py --repo-id-or-model-path <model_path>
```
### Benchmark in TEE
```bash
cd occlum_instance
OMP_NUM_THREADS=16 occlum run /bin/python3 \
/benchmarks/bench.py --repo-id-or-model-path <model_path>
```
By our benchmark result in Intel Ice Lake server, LLM inference performance within a TEE is approximately 30% less compared to on a host environment.
## Do inference with webui
@ -114,7 +87,7 @@ This controller manages the distributed workers.
```bash
cd occlum_instance
occlum start
HF_DATASETS_CACHE=/root/cache occlum exec /bin/python3 -m ipex_llm.serving.fastchat.model_worker --model-path /models/chatglm2-6b --device cpu --host 0.0.0.0
HF_DATASETS_CACHE=/root/cache occlum exec /bin/python3 -m bigdl.llm.serving.model_worker --model-path /models/chatglm2-6b --device cpu --host 0.0.0.0
```
Wait until the process finishes loading the model and you see "Uvicorn running on ...". The model worker will register itself to the controller.

@ -1,22 +0,0 @@
import argparse
import torch
from ipex_llm.transformers import AutoModel, AutoModelForCausalLM
from transformers import AutoTokenizer
from benchmark_util import BenchmarkWrapper
parser = argparse.ArgumentParser(description='Predict Tokens using `generate()` API for ChatGLM2 model')
parser.add_argument('--repo-id-or-model-path', type=str, default="THUDM/chatglm2-6b",
help='The huggingface repo id for the ChatGLM2 model to be downloaded'
', or the path to the huggingface checkpoint folder')
args = parser.parse_args()
model_path = args.repo_id_or_model_path
model = AutoModelForCausalLM.from_pretrained(model_path, trust_remote_code=True, load_in_4bit=True)
model = BenchmarkWrapper(model, do_print=True)
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
prompt = "今天睡不着怎么办"
with torch.inference_mode():
input_ids = tokenizer.encode(prompt, return_tensors="pt")
output = model.generate(input_ids, do_sample=False, max_new_tokens=512)
output_str = tokenizer.decode(output[0], skip_special_tokens=True)

File diff suppressed because it is too large Load Diff

@ -19,7 +19,7 @@ import time
import argparse
import numpy as np
from ipex_llm.transformers import AutoModel, AutoModelForCausalLM
from bigdl.llm.transformers import AutoModel, AutoModelForCausalLM
from transformers import AutoTokenizer
# you could tune the prompt based on your own model,

@ -19,7 +19,7 @@ import time
import argparse
import numpy as np
from ipex_llm.transformers import AutoModel
from bigdl.llm.transformers import AutoModel
from transformers import AutoTokenizer

@ -7,10 +7,8 @@ script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
[ -d miniconda ] || bash ./Miniconda3-latest-Linux-x86_64.sh -b -p $script_dir/miniconda
$script_dir/miniconda/bin/conda create \
--prefix $script_dir/python-occlum -y \
python=3.10.0
python=3.9.11
# Install BigDL LLM
$script_dir/python-occlum/bin/pip install torch==2.2.1 --index-url https://download.pytorch.org/whl/cpu
$script_dir/python-occlum/bin/pip install --pre --upgrade ipex-llm[all] ipex-llm[serving]
# $script_dir/python-occlum/bin/pip install intel-extension-for-pytorch
$script_dir/python-occlum/bin/pip install transformers_stream_generator einops tiktoken
$script_dir/python-occlum/bin/pip install torch==2.1.0 --index-url https://download.pytorch.org/whl/cpu
$script_dir/python-occlum/bin/pip install --pre --upgrade bigdl-llm[all] bigdl-llm[serving]

@ -18,7 +18,6 @@ targets:
copy:
- dirs:
- ../chatglm2
- ../benchmarks
- target: /opt/occlum/glibc/lib
copy:
- files:

@ -20,7 +20,7 @@ import argparse
# load Hugging Face Transformers model with INT4 optimizations
from ipex_llm.transformers import AutoModelForCausalLM
from bigdl.llm.transformers import AutoModelForCausalLM
from transformers import AutoTokenizer

@ -1,47 +0,0 @@
FROM ubuntu:20.04
LABEL maintainer="Qi Zheng <huaiqing.zq@antgroup.com>"
# Install SGX DCAP and Occlum runtime
ENV APT_KEY_DONT_WARN_ON_DANGEROUS_USAGE=1
ARG PSW_VERSION=2.20.100.4
ARG DCAP_VERSION=1.17.100.4
ARG OCCLUM_VERSION=0.30.1
RUN apt update && DEBIAN_FRONTEND="noninteractive" apt install -y --no-install-recommends gnupg wget ca-certificates jq && \
echo 'deb [arch=amd64] https://download.01.org/intel-sgx/sgx_repo/ubuntu focal main' | tee /etc/apt/sources.list.d/intel-sgx.list && \
wget -qO - https://download.01.org/intel-sgx/sgx_repo/ubuntu/intel-sgx-deb.key | apt-key add - && \
echo 'deb [arch=amd64] https://occlum.io/occlum-package-repos/debian focal main' | tee /etc/apt/sources.list.d/occlum.list && \
wget -qO - https://occlum.io/occlum-package-repos/debian/public.key | apt-key add - && \
apt update && apt install -y --no-install-recommends \
libsgx-launch=$PSW_VERSION-focal1 \
libsgx-epid=$PSW_VERSION-focal1 \
libsgx-quote-ex=$PSW_VERSION-focal1 \
libsgx-urts=$PSW_VERSION-focal1 \
libsgx-enclave-common=$PSW_VERSION-focal1 \
libsgx-uae-service=$PSW_VERSION-focal1 \
libsgx-ae-pce=$PSW_VERSION-focal1 \
libsgx-ae-qe3=$DCAP_VERSION-focal1 \
libsgx-ae-id-enclave=$DCAP_VERSION-focal1 \
libsgx-ae-qve=$DCAP_VERSION-focal1 \
libsgx-dcap-ql=$DCAP_VERSION-focal1 \
libsgx-pce-logic=$DCAP_VERSION-focal1 \
libsgx-qe3-logic=$DCAP_VERSION-focal1 \
libsgx-dcap-default-qpl=$DCAP_VERSION-focal1 \
libsgx-dcap-quote-verify=$DCAP_VERSION-focal1 \
occlum-runtime=$OCCLUM_VERSION-1 \
gettext openjdk-11-jdk \
&& \
apt clean && \
rm -rf /var/lib/apt/lists/*
COPY docker-entrypoint.sh /
RUN mkdir -p /opt/flink
COPY flink-1.15.2 /opt/flink
ADD occlum_instance_k8s/occlum_instance_k8s.tar.gz /opt/flink
ENV FLINK_HOME=/opt/flink
ENV JAVA_HOME=/usr/lib/jvm/java-11-openjdk-amd64
ENV PATH="/opt/occlum/build/bin:/usr/local/occlum/bin:/opt/flink/bin:$PATH"
WORKDIR /opt/flink
ENTRYPOINT ["/docker-entrypoint.sh"]

@ -1,46 +1,26 @@
# Run Flink on Occlum
This is for how to run Flink job manager and task manager in Occlum.
For how to start Flink K8S cluster in Occlum, please refer to [kubernetes](./kubernetes/).
### Preinstall dependencies
Related dependencies: openjdk-11
```
./preinstall_deps.sh
```
### Download flink
### Run the flink jobmanager
```
./download_flink.sh
./run_flink_jobmanager_on_host.sh
```
### Build Occlum instance
### Run the taskManager
```
./build_occlum_instance.sh
./run_flink_on_occlum_glibc.sh tm
```
### Run flink job manager on Occlum
### Run flink jobs example
```
./run_flink_on_occlum.sh jm
```
Wait a while for job manager started successfully. You can check the log `occlum_instance_jobmanager/flink--standalonesession-0.log` for detail status.
### Run flink task manager on Occlum
Once the job manager is up, you can run the task manager.
```
./run_flink_on_occlum.sh tm
```
Wait a while for task manager started successfully. You can check the log `occlum_instance_taskmanager/flink--taskmanager-0.log` for detail status.
### Submit a flink job to occlum
You can submit an example flink job by using the following command:
```
./run_flink_on_occlum.sh task
./run_flink_on_occlum_glibc.sh task
```
**Note:**
If running the jobmanager in docker, please export the port 8081 and 6123.
1. If running the jobmanager in docker, please export the port 8081 and 6123
2. Step 2 may report warning for not finding shared objects. It doesn't matter. To avoid these warnings, you can **REPLACE the FIRST LINE** of config file `/opt/occlum/etc/template/occlum_elf_loader.config` with `/opt/occlum/glibc/lib/ld-linux-x86-64.so.2 /usr/lib/x86_64-linux-gnu:/lib/x86_64-linux-gnu:/usr/lib/jvm/java-11-openjdk-amd64/lib/server`.

@ -1,73 +0,0 @@
#!/bin/bash
set -e
BLUE='\033[1;34m'
NC='\033[0m'
RPC_BIND_PORT=8089
OCCLUM_USER_SPACE_SIZE=8GB
build_instance() {
postfix=$1
rm -rf occlum_instance*
occlum new occlum_instance_$postfix
cd occlum_instance_$postfix
new_json="$(jq '.resource_limits.user_space_size = "1MB" |
.resource_limits.user_space_max_size = "OCCLUM_USER_SPACE_SIZE" |
.resource_limits.kernel_space_heap_size="1MB" |
.resource_limits.kernel_space_heap_max_size="256MB" |
.resource_limits.max_num_of_threads = 256 |
.entry_points = [ "/usr/lib/jvm/java-11-openjdk-amd64/bin" ] |
.env.default = [ "LD_LIBRARY_PATH=/usr/lib/jvm/java-11-openjdk-amd64/lib/server:/usr/lib/jvm/java-11-openjdk-amd64/lib" ] |
.env.default = [ "FLINK_HOME=/opt/flink" ] |
.env.default = [ "JAVA_HOME=/usr/lib/jvm/java-11-openjdk-amd64" ] |
.env.default = [ "HOME=/root" ] |
.env.untrusted += [ "TZ", "FLINK_CONF_DIR" ]' Occlum.json)" && \
echo "${new_json}" > Occlum.json
# Copy JVM and class file into Occlum instance and build
rm -rf image
copy_bom -f ../flink.yaml --root image --include-dir /opt/occlum/etc/template
# Use hostfs for flink conf in k8s mode
if [ "$postfix" == "k8s" ]; then
# Increase user space size for k8s mode
OCCLUM_USER_SPACE_SIZE=16GB
rm -rf image/opt/flink*/conf/*
new_json="$(cat Occlum.json | jq '.mount+=[{"target": "/opt/flink/conf", "type": "hostfs","source": "/opt/flink/conf-copy"}]')" && \
echo "${new_json}" > Occlum.json
# use host secrets
mkdir -p image/var/run/secrets
new_json="$(cat Occlum.json | jq '.mount+=[{"target": "/var/run/secrets", "type": "hostfs","source": "/var/run/secrets-copy"}]')" && \
echo "${new_json}" > Occlum.json
# k8s pod template
mkdir -p image/opt/flink/pod-template
new_json="$(cat Occlum.json | jq '.mount+=[{"target": "/opt/flink/pod-template", "type": "hostfs","source": "/opt/flink/pod-template-copy"}]')" && \
echo "${new_json}" > Occlum.json
fi
# Update user size
sed -i "s/OCCLUM_USER_SPACE_SIZE/$OCCLUM_USER_SPACE_SIZE/g" Occlum.json
occlum build
occlum package --debug
cd ..
}
update_flink_conf() {
echo "rest.port: $RPC_BIND_PORT" >> flink-1.15.2/conf/flink-conf.yaml
}
if [ "$1" == "k8s" ]; then
echo "do occlum instance build for k8s mode"
build_instance k8s
else
update_flink_conf
build_instance jobmanager
# flink job manager and taks manager use the same occlum instance
cp -rf occlum_instance_jobmanager occlum_instance_taskmanager
fi

@ -1,154 +0,0 @@
#!/usr/bin/env bash
###############################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
COMMAND_STANDALONE="standalone-job"
COMMAND_HISTORY_SERVER="history-server"
# If unspecified, the hostname of the container is taken as the JobManager address
JOB_MANAGER_RPC_ADDRESS=${JOB_MANAGER_RPC_ADDRESS:-$(hostname -f)}
CONF_FILE="${FLINK_HOME}/conf/flink-conf.yaml"
drop_privs_cmd() {
if [ $(id -u) != 0 ]; then
# Don't need to drop privs if EUID != 0
return
elif [ -x /sbin/su-exec ]; then
# Alpine
echo su-exec flink
else
# Others
echo gosu flink
fi
}
copy_plugins_if_required() {
if [ -z "$ENABLE_BUILT_IN_PLUGINS" ]; then
return 0
fi
echo "Enabling required built-in plugins"
for target_plugin in $(echo "$ENABLE_BUILT_IN_PLUGINS" | tr ';' ' '); do
echo "Linking ${target_plugin} to plugin directory"
plugin_name=${target_plugin%.jar}
mkdir -p "${FLINK_HOME}/plugins/${plugin_name}"
if [ ! -e "${FLINK_HOME}/opt/${target_plugin}" ]; then
echo "Plugin ${target_plugin} does not exist. Exiting."
exit 1
else
ln -fs "${FLINK_HOME}/opt/${target_plugin}" "${FLINK_HOME}/plugins/${plugin_name}"
echo "Successfully enabled ${target_plugin}"
fi
done
}
set_config_option() {
local option=$1
local value=$2
# escape periods for usage in regular expressions
local escaped_option=$(echo ${option} | sed -e "s/\./\\\./g")
# either override an existing entry, or append a new one
if grep -E "^${escaped_option}:.*" "${CONF_FILE}" > /dev/null; then
sed -i -e "s/${escaped_option}:.*/$option: $value/g" "${CONF_FILE}"
else
echo "${option}: ${value}" >> "${CONF_FILE}"
fi
}
prepare_configuration() {
set_config_option jobmanager.rpc.address ${JOB_MANAGER_RPC_ADDRESS}
set_config_option blob.server.port 6124
set_config_option query.server.port 6125
if [ -n "${TASK_MANAGER_NUMBER_OF_TASK_SLOTS}" ]; then
set_config_option taskmanager.numberOfTaskSlots ${TASK_MANAGER_NUMBER_OF_TASK_SLOTS}
fi
if [ -n "${FLINK_PROPERTIES}" ]; then
echo "${FLINK_PROPERTIES}" >> "${CONF_FILE}"
fi
envsubst < "${CONF_FILE}" > "${CONF_FILE}.tmp" && mv "${CONF_FILE}.tmp" "${CONF_FILE}"
}
maybe_enable_jemalloc() {
if [ "${DISABLE_JEMALLOC:-false}" == "false" ]; then
JEMALLOC_PATH="/usr/lib/$(uname -m)-linux-gnu/libjemalloc.so"
JEMALLOC_FALLBACK="/usr/lib/x86_64-linux-gnu/libjemalloc.so"
if [ -f "$JEMALLOC_PATH" ]; then
export LD_PRELOAD=$LD_PRELOAD:$JEMALLOC_PATH
elif [ -f "$JEMALLOC_FALLBACK" ]; then
export LD_PRELOAD=$LD_PRELOAD:$JEMALLOC_FALLBACK
else
if [ "$JEMALLOC_PATH" = "$JEMALLOC_FALLBACK" ]; then
MSG_PATH=$JEMALLOC_PATH
else
MSG_PATH="$JEMALLOC_PATH and $JEMALLOC_FALLBACK"
fi
echo "WARNING: attempted to load jemalloc from $MSG_PATH but the library couldn't be found. glibc will be used instead."
fi
fi
}
maybe_enable_jemalloc
copy_plugins_if_required
prepare_configuration
args=("$@")
if [ "$1" = "help" ]; then
printf "Usage: $(basename "$0") (jobmanager|${COMMAND_STANDALONE}|taskmanager|${COMMAND_HISTORY_SERVER})\n"
printf " Or $(basename "$0") help\n\n"
printf "By default, Flink image adopts jemalloc as default memory allocator. This behavior can be disabled by setting the 'DISABLE_JEMALLOC' environment variable to 'true'.\n"
exit 0
elif [ "$1" = "jobmanager" ]; then
args=("${args[@]:1}")
echo "Starting Job Manager"
exec $(drop_privs_cmd) "$FLINK_HOME/bin/jobmanager.sh" start-foreground "${args[@]}"
elif [ "$1" = ${COMMAND_STANDALONE} ]; then
args=("${args[@]:1}")
echo "Starting Job Manager"
exec $(drop_privs_cmd) "$FLINK_HOME/bin/standalone-job.sh" start-foreground "${args[@]}"
elif [ "$1" = ${COMMAND_HISTORY_SERVER} ]; then
args=("${args[@]:1}")
echo "Starting History Server"
exec $(drop_privs_cmd) "$FLINK_HOME/bin/historyserver.sh" start-foreground "${args[@]}"
elif [ "$1" = "taskmanager" ]; then
args=("${args[@]:1}")
echo "Starting Task Manager"
exec $(drop_privs_cmd) "$FLINK_HOME/bin/taskmanager.sh" start-foreground "${args[@]}"
fi
args=("${args[@]}")
# Running command in pass-through mode
# exec $(drop_privs_cmd) "${args[@]}"
# Do not run with flink user to avoid permission issue in Occlum hostfs mount
exec "${args[@]}"

@ -1,8 +1,8 @@
#!/bin/bash
set -e
rm -rf flink-1.15.2*
wget https://archive.apache.org/dist/flink/flink-1.15.2/flink-1.15.2-bin-scala_2.12.tgz
tar -xvzf flink-1.15.2-bin-scala_2.12.tgz
rm -rf flink-1.10.1*
wget https://archive.apache.org/dist/flink/flink-1.10.1/flink-1.10.1-bin-scala_2.11.tgz
tar -xvzf flink-1.10.1-bin-scala_2.11.tgz
echo "Download Flink Success"

@ -1,28 +1,18 @@
includes:
- base.yaml
- java-11-openjdk-amd64.yaml
targets:
targets:
# copy flink directory
- target: /opt/flink
- target: /bin
copy:
- dirs:
- ../flink-1.15.2/
# add timezone
- target: /opt/occlum/glibc/share/
copy:
- dirs:
- /usr/share/zoneinfo
# etc files
- from: ../flink-1.10.1
# copy localtime
- target: /etc
copy:
- dirs:
- /etc/ssl
- files:
- /etc/nsswitch.conf
copy:
- files:
- /etc/localtime
# copy libnss_files
- target: /opt/occlum/glibc/lib
copy:
- files:
copy:
- files:
- /opt/occlum/glibc/lib/libnss_files.so.2
- /opt/occlum/glibc/lib/libnss_dns.so.2
- /opt/occlum/glibc/lib/libresolv.so.2

@ -1,103 +0,0 @@
# Deploy Flink in K8S
There are several ways to deploy Flink on Kubernetes, such as [native kubernetes deployment](https://nightlies.apache.org/flink/flink-docs-release-1.19/zh/docs/deployment/resource-providers/native_kubernetes/) and [Flink Kubernetes Operator](https://nightlies.apache.org/flink/flink-kubernetes-operator-docs-release-1.8/). This tutorial shows how to use the kubernetes operator deployment.
## Prerequisites
* A Kubernetes cluster with at least one node.
* The `kubectl` command line tool is installed and configured to connect to your Kubernetes cluster.
* The `helm` command line tool is also installed and configured to connect to your Kubernetes cluster.
### Install the Flink Kubernetes Operator
Just follow the [quick start](https://nightlies.apache.org/flink/flink-kubernetes-operator-docs-release-1.8/docs/try-flink-kubernetes-operator/quick-start/) to install the Flink Kubernetes Operator.
## Build Flink K8S docker image
First, please make sure `docker` is installed successfully in your host. Then start the Occlum container (use version `latest-ubuntu20.04` for example) as below.
```
$ sudo docker run --rm -itd --network host \
-v $(which docker):/usr/bin/docker -v /var/run/docker.sock:/var/run/docker.sock \
occlum/occlum:latest-ubuntu20.04
```
All the following are running in the above container.
### Build
Just run the script [build.sh](./build.sh). It builds a docker image for Flink K8S.
```bash
Build Occlum Flink container images for k8s deployment.
usage: build.sh [OPTION]...
-r <container image registry> the container image registry
-g <tag> container image tag
-h <usage> usage help
```
For example, if you want to build the image named `demo/occlum-flink:0.1`, just run
```bash
$ ./build.sh -r demo -g 0.1
```
Notice, during the build process, a customized [flink-console.sh](./flink-console.sh) is used to replace the original one. Users could refer to the script for details.
Once the build is done, you can push the image for next steps -- [Deploy](#deploy).
## Deploy
Based on the original yaml files in the [github](https://github.com/apache/flink-kubernetes-operator/tree/release-1.8/examples), below customized example yaml files are provided.
* [basic.yaml](./basic.yaml)
* [basic-session-deployment-and-job.yaml](./basic-session-deployment-and-job.yaml)
* [basic-session-deployment-only. yaml](./basic-session-deployment-only.yaml)
* [basic-session-job-only.yaml](./basic-session-job-only.yaml)
They have the same meaning just like their original counterparts besides some SGX/Occlum related customization settings.
You can deploy each of them.
Just notice the **image** in the yaml file should be the one you built before.
### Examples
#### Basic Application Deployment example
This is a simple deployment defined by a minimal deployment file.
The configuration contains the following:
- Defines the job to run
- Assigns the resources available for the job
- Defines the parallelism used
To run the job submit the yaml file using kubectl:
```bash
kubectl apply -f basic.yaml
```
#### Basic Session Deployment example
This example shows how to create a basic Session Cluster and then how to submit specific jobs to this cluster if needed.
##### Without jobs
The Flink Deployment could be created without any jobs.
In this case the Flink jobs could be created later by submitting the jobs
separately.
To create a Flink Deployment with the specific resources without any jobs run the following command:
```bash
kubectl apply -f basic-session-deployment-only.yaml
```
##### Adding jobs
If the Flink Deployment is created by `basic-session-deployment-only.yaml` new job could be added
by the following command:
```bash
kubectl apply -f basic-session-job-only.yaml
```
##### Creating Deployment and Jobs together
Alternatively the Flink Deployment and the Flink Session Job configurations can be submitted together.
To try out this run the following command:
```bash
kubectl apply -f basic-session-deployment-and-job.yaml
```

@ -1,83 +0,0 @@
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
apiVersion: flink.apache.org/v1beta1
kind: FlinkDeployment
metadata:
name: basic-session-deployment-example
spec:
image: occlum_flink:0.1
flinkVersion: v1_15
jobManager:
resource:
memory: "2048m"
cpu: 1
taskManager:
resource:
memory: "2048m"
cpu: 1
serviceAccount: flink
podTemplate:
spec:
containers:
- name: flink-main-container
volumeMounts:
- name: device-plugin
mountPath: /var/lib/kubelet/device-plugins
resources:
requests:
sgx.intel.com/epc: 21474836480
sgx.intel.com/enclave: 1
sgx.intel.com/provision: 1
limits:
sgx.intel.com/epc: 21474836480
sgx.intel.com/enclave: 1
sgx.intel.com/provision: 1
# env:
# - name: OCCLUM_LOG_LEVEL
# value: "off"
volumes:
- name: device-plugin
hostPath:
path: /var/lib/kubelet/device-plugins
---
apiVersion: flink.apache.org/v1beta1
kind: FlinkSessionJob
metadata:
name: basic-session-job-example
spec:
deploymentName: basic-session-deployment-example
job:
jarURI: https://repo1.maven.org/maven2/org/apache/flink/flink-examples-streaming_2.12/1.16.1/flink-examples-streaming_2.12-1.16.1-TopSpeedWindowing.jar
parallelism: 2
upgradeMode: stateless
---
apiVersion: flink.apache.org/v1beta1
kind: FlinkSessionJob
metadata:
name: basic-session-job-example2
spec:
deploymentName: basic-session-deployment-example
job:
jarURI: https://repo1.maven.org/maven2/org/apache/flink/flink-examples-streaming_2.12/1.16.1/flink-examples-streaming_2.12-1.16.1.jar
parallelism: 2
upgradeMode: stateless
entryClass: org.apache.flink.streaming.examples.statemachine.StateMachineExample

@ -1,59 +0,0 @@
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
apiVersion: flink.apache.org/v1beta1
kind: FlinkDeployment
metadata:
name: basic-session-deployment-only-example
spec:
image: occlum_flink:0.1
flinkVersion: v1_15
flinkConfiguration:
taskmanager.numberOfTaskSlots: "2"
serviceAccount: flink
jobManager:
resource:
memory: "2048m"
cpu: 1
taskManager:
resource:
memory: "2048m"
cpu: 1
podTemplate:
spec:
containers:
- name: flink-main-container
volumeMounts:
- name: device-plugin
mountPath: /var/lib/kubelet/device-plugins
resources:
requests:
sgx.intel.com/epc: 21474836480
sgx.intel.com/enclave: 1
sgx.intel.com/provision: 1
limits:
sgx.intel.com/epc: 21474836480
sgx.intel.com/enclave: 1
sgx.intel.com/provision: 1
# env:
# - name: OCCLUM_LOG_LEVEL
# value: "off"
volumes:
- name: device-plugin
hostPath:
path: /var/lib/kubelet/device-plugins

@ -1,28 +0,0 @@
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
apiVersion: flink.apache.org/v1beta1
kind: FlinkSessionJob
metadata:
name: basic-session-job-only-example
spec:
deploymentName: basic-session-deployment-only-example
job:
jarURI: https://repo1.maven.org/maven2/org/apache/flink/flink-examples-streaming_2.12/1.16.1/flink-examples-streaming_2.12-1.16.1-TopSpeedWindowing.jar
parallelism: 4
upgradeMode: stateless

@ -1,79 +0,0 @@
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
apiVersion: flink.apache.org/v1beta1
kind: FlinkDeployment
metadata:
name: basic-example
spec:
image: occlum_flink:0.1
flinkVersion: v1_15
flinkConfiguration:
taskmanager.numberOfTaskSlots: "2"
serviceAccount: flink
jobManager:
resource:
memory: "2048m"
cpu: 1
# podTemplate:
# spec:
# containers:
# # Do not change the main container name
# - name: flink-main-container
# args:
# - bash
# - -c
# - 'kubernetes-jobmanager.sh kubernetes-application '
taskManager:
resource:
memory: "2048m"
cpu: 2
podTemplate:
spec:
containers:
- name: flink-main-container
env:
- name: OCCLUM_LOG_LEVEL
value: "off"
job:
jarURI: local:///opt/flink/examples/streaming/StateMachineExample.jar
parallelism: 2
upgradeMode: stateless
podTemplate:
spec:
containers:
- name: flink-main-container
volumeMounts:
- name: device-plugin
mountPath: /var/lib/kubelet/device-plugins
resources:
requests:
sgx.intel.com/epc: 21474836480
sgx.intel.com/enclave: 1
sgx.intel.com/provision: 1
limits:
sgx.intel.com/epc: 21474836480
sgx.intel.com/enclave: 1
sgx.intel.com/provision: 1
# env:
# - name: OCCLUM_LOG_LEVEL
# value: "off"
volumes:
- name: device-plugin
hostPath:
path: /var/lib/kubelet/device-plugins

@ -1,60 +0,0 @@
#!/bin/bash
set -e
script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
top_dir=$(dirname "${script_dir}")
registry="demo"
tag="latest"
function usage {
cat << EOM
Build Occlum Flink container images for k8s deployment.
usage: $(basename "$0") [OPTION]...
-r <container image registry> the container image registry
-g <tag> container image tag
-h <usage> usage help
EOM
exit 0
}
function process_args {
while getopts ":r:g:h" option; do
case "${option}" in
r) registry=${OPTARG};;
g) tag=${OPTARG};;
h) usage;;
esac
done
}
process_args "$@"
echo ""
echo "############################"
echo "Build Occlum Flink container image for k8s deployment"
echo " Container images registry: ${registry}"
echo " Container images tag: ${tag}"
echo ""
pushd ${top_dir}
echo "Install openjdk 11 first ..."
./preinstall_deps.sh
echo "Download Flink ..."
./download_flink.sh
cp ./kubernetes/flink-console.sh ./flink-1.15.2/bin/
echo "Build Occlum instance ..."
./build_occlum_instance.sh k8s
echo ""
echo "Build Occlum container image ..."
docker build \
-f Dockerfile \
-t ${registry}/occlum_flink:${tag} .
echo "Build is done"
popd

@ -1,126 +0,0 @@
#!/usr/bin/env bash
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
# Start a Flink service as a console application. Must be stopped with Ctrl-C
# or with SIGTERM by kill or the controlling process.
USAGE="Usage: flink-console.sh (taskexecutor|zookeeper|historyserver|standalonesession|standalonejob|kubernetes-session|kubernetes-application|kubernetes-taskmanager) [args]"
SERVICE=$1
ARGS=("${@:2}") # get remaining arguments as array
bin=`dirname "$0"`
bin=`cd "$bin"; pwd`
. "$bin"/config.sh
case $SERVICE in
(taskexecutor)
CLASS_TO_RUN=org.apache.flink.runtime.taskexecutor.TaskManagerRunner
;;
(historyserver)
CLASS_TO_RUN=org.apache.flink.runtime.webmonitor.history.HistoryServer
;;
(zookeeper)
CLASS_TO_RUN=org.apache.flink.runtime.zookeeper.FlinkZooKeeperQuorumPeer
;;
(standalonesession)
CLASS_TO_RUN=org.apache.flink.runtime.entrypoint.StandaloneSessionClusterEntrypoint
;;
(standalonejob)
CLASS_TO_RUN=org.apache.flink.container.entrypoint.StandaloneApplicationClusterEntryPoint
;;
(kubernetes-session)
CLASS_TO_RUN=org.apache.flink.kubernetes.entrypoint.KubernetesSessionClusterEntrypoint
;;
(kubernetes-application)
CLASS_TO_RUN=org.apache.flink.kubernetes.entrypoint.KubernetesApplicationClusterEntrypoint
;;
(kubernetes-taskmanager)
CLASS_TO_RUN=org.apache.flink.kubernetes.taskmanager.KubernetesTaskExecutorRunner
;;
(*)
echo "Unknown service '${SERVICE}'. $USAGE."
exit 1
;;
esac
FLINK_TM_CLASSPATH=`constructFlinkClassPath`
if [ "$FLINK_IDENT_STRING" = "" ]; then
FLINK_IDENT_STRING="$USER"
fi
pid=$FLINK_PID_DIR/flink-$FLINK_IDENT_STRING-$SERVICE.pid
mkdir -p "$FLINK_PID_DIR"
# The lock needs to be released after use because this script is started foreground
command -v flock >/dev/null 2>&1
flock_exist=$?
if [[ ${flock_exist} -eq 0 ]]; then
exec 200<"$FLINK_PID_DIR"
flock 200
fi
# Remove the pid file when all the processes are dead
if [ -f "$pid" ]; then
all_dead=0
while read each_pid; do
# Check whether the process is still running
kill -0 $each_pid > /dev/null 2>&1
[[ $? -eq 0 ]] && all_dead=1
done < "$pid"
[ ${all_dead} -eq 0 ] && rm $pid
fi
id=$([ -f "$pid" ] && echo $(wc -l < "$pid") || echo "0")
FLINK_LOG_PREFIX="${FLINK_LOG_DIR}/flink-${FLINK_IDENT_STRING}-${SERVICE}-${id}-${HOSTNAME}"
log="${FLINK_LOG_PREFIX}.log"
log_setting=("-Dlog.file=${log}" "-Dlog4j.configuration=file:${FLINK_CONF_DIR}/log4j-console.properties" "-Dlog4j.configurationFile=file:${FLINK_CONF_DIR}/log4j-console.properties" "-Dlogback.configurationFile=file:${FLINK_CONF_DIR}/logback-console.xml")
echo "Starting $SERVICE as a console application on host $HOSTNAME."
# Add the current process id to pid file
echo $$ >> "$pid" 2>/dev/null
# Release the lock because the java process runs in the foreground and would block other processes from modifying the pid file
[[ ${flock_exist} -eq 0 ]] && flock -u 200
# Evaluate user options for local variable expansion
FLINK_ENV_JAVA_OPTS=$(eval echo ${FLINK_ENV_JAVA_OPTS})
echo "################"
set -x
cp -rf /var/run/secrets /var/run/secrets-copy
cp -rf conf conf-copy
if [ -d pod-template ]; then
cp -rf pod-template pod-template-copy
else
# create dir anyway to avoid hostfs mount error
mkdir -p pod-template-copy
fi
cd occlum_instance_k8s
# exec "$JAVA_RUN" $JVM_ARGS ${FLINK_ENV_JAVA_OPTS} "${log_setting[@]}" -classpath "`manglePathList "$FLINK_TM_CLASSPATH:$INTERNAL_HADOOP_CLASSPATHS"`" ${CLASS_TO_RUN} "${ARGS[@]}"
exec occlum run /usr/lib/jvm/java-11-openjdk-amd64/bin/java -Dos.name=Linux $JVM_ARGS ${FLINK_ENV_JAVA_OPTS} "${log_setting[@]}" -classpath "`manglePathList "$FLINK_TM_CLASSPATH:$INTERNAL_HADOOP_CLASSPATHS"`" ${CLASS_TO_RUN} "${ARGS[@]}"

@ -0,0 +1,3 @@
export JAVA_HOME=/usr/lib/jvm/java-11-openjdk-amd64/
./flink-1.10.1/bin/jobmanager.sh start
echo -e "${BLUE}Flink jobmanager${NC}"

@ -1,86 +0,0 @@
#!/bin/bash
set -e
BLUE='\033[1;34m'
NC='\033[0m'
script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
FLINK_BIND_PORT=8089
run_jobmanager() {
logfile="flink--standalonesession-0.log"
echo -e "${BLUE}occlum run JVM jobmanager${NC}"
echo -e "${BLUE}logfile=$logfile${NC}"
cd occlum_instance_jobmanager
occlum run /usr/lib/jvm/java-11-openjdk-amd64/bin/java \
-Dos.name=Linux -XX:ActiveProcessorCount=4 -Xmx800m -Xms800m \
-XX:MaxMetaspaceSize=256m -Dlog.file=/host/$logfile \
-Dlog4j.configuration=file:/opt/flink/conf/log4j.properties \
-Dlog4j.configurationFile=file:/opt/flink/conf/log4j.properties \
-Dlogback.configurationFile=file:/opt/flink/conf/logback.xml \
-classpath /opt/flink/lib/* org.apache.flink.runtime.entrypoint.StandaloneSessionClusterEntrypoint \
-D jobmanager.memory.off-heap.size=128mb \
-D jobmanager.memory.jvm-overhead.min=192mb \
-D jobmanager.memory.jvm-metaspace.size=256mb \
-D jobmanager.memory.jvm-overhead.max=192mb \
-D rest.bind-port=$FLINK_BIND_PORT \
-D rest.bind-address=0.0.0.0 \
--configDir /opt/flink/conf \
--executionMode cluster \
&
cd ..
}
run_taskmanager() {
logfile="flink--taskmanager-0.log"
echo -e "${BLUE}occlum run JVM taskmanager${NC}"
echo -e "${BLUE}logfile=$logfile${NC}"
cd occlum_instance_taskmanager
occlum run /usr/lib/jvm/java-11-openjdk-amd64/bin/java \
-Dos.name=Linux -XX:ActiveProcessorCount=2 -XX:+UseG1GC \
-Xmx600m -Xms600m -XX:MaxMetaspaceSize=256m \
-Dlog.file=/host/$logfile \
-Dlog4j.configuration=file:/opt/flink/conf/log4j.properties \
-Dlog4j.configurationFile=file:/opt/flink/conf/log4j.properties \
-Dlogback.configurationFile=file:/opt/flink/conf/logback.xml \
-classpath /opt/flink/lib/* org.apache.flink.runtime.taskexecutor.TaskManagerRunner \
--configDir /opt/flink/conf -D taskmanager.memory.network.min=128mb \
-D taskmanager.cpu.cores=1.0 -D taskmanager.memory.task.off-heap.size=0b \
-D taskmanager.memory.jvm-metaspace.size=256mb -D external-resources=none \
-D taskmanager.memory.jvm-overhead.min=192mb \
-D taskmanager.memory.framework.off-heap.size=128mb \
-D taskmanager.memory.network.max=128mb \
-D taskmanager.memory.framework.heap.size=128mb \
-D taskmanager.memory.managed.size=256mb \
-D taskmanager.memory.task.heap.size=383mb \
-D taskmanager.numberOfTaskSlots=1 \
-D taskmanager.memory.jvm-overhead.max=192mb \
-D rest.bind-port=$FLINK_BIND_PORT \
-D rest.bind-address=0.0.0.0 \
&
cd ..
}
run_task() {
cd flink-1.15.2
./bin/flink run ./examples/streaming/WordCount.jar
cd ..
}
arg=$1
case "$arg" in
jm)
run_jobmanager
cd ../
;;
tm)
run_taskmanager
cd ../
;;
task)
run_task
cd ../
;;
esac

@ -0,0 +1,77 @@
#!/bin/bash
set -e
BLUE='\033[1;34m'
NC='\033[0m'
occlum_glibc=/opt/occlum/glibc/lib/
init_instance() {
# Init Occlum instance
postfix=$1
FLINK_LOG_PREFIX="/host/flink--$postfix-${id}"
log="${FLINK_LOG_PREFIX}.log"
out="./flink--$postfix-${id}.out"
rm -rf occlum_instance_$postfix && mkdir occlum_instance_$postfix
cd occlum_instance_$postfix
occlum init
new_json="$(jq '.resource_limits.user_space_size = "1MB" |
.resource_limits.user_space_max_size = "5500MB" |
.resource_limits.kernel_space_heap_size="1MB" |
.resource_limits.kernel_space_heap_max_size="64MB" |
.resource_limits.max_num_of_threads = 64 |
.process.default_heap_size = "128MB" |
.entry_points = [ "/usr/lib/jvm/java-11-openjdk-amd64/bin" ] |
.env.default = [ "LD_LIBRARY_PATH=/usr/lib/jvm/java-11-openjdk-amd64/lib/server:/usr/lib/jvm/java-11-openjdk-amd64/lib:/usr/lib/jvm/java-11-openjdk-amd64/../lib:/lib" ]' Occlum.json)" && \
echo "${new_json}" > Occlum.json
}
build_flink() {
# Copy JVM and class file into Occlum instance and build
rm -rf image
copy_bom -f ../flink.yaml --root image --include-dir /opt/occlum/etc/template
occlum build
}
run_taskmanager() {
init_instance taskmanager
build_flink
echo -e "${BLUE}occlum run JVM taskmanager${NC}"
echo -e "${BLUE}logfile=$log${NC}"
occlum run /usr/lib/jvm/java-11-openjdk-amd64/bin/java \
-Xmx800m -XX:-UseCompressedOops -XX:MaxMetaspaceSize=256m \
-XX:ActiveProcessorCount=2 \
-Dlog.file=$log \
-Dos.name=Linux \
-Dlog4j.configuration=file:/bin/conf/log4j.properties \
-Dlogback.configurationFile=file:/bin/conf/logback.xml \
-classpath /bin/lib/flink-table-blink_2.11-1.10.1.jar:/bin/lib/flink-table_2.11-1.10.1.jar:/bin/lib/log4j-1.2.17.jar:/bin/lib/slf4j-log4j12-1.7.15.jar:/bin/lib/flink-dist_2.11-1.10.1.jar org.apache.flink.runtime.taskexecutor.TaskManagerRunner \
--configDir /bin/conf \
-D taskmanager.memory.network.max=64mb \
-D taskmanager.memory.network.min=64mb \
-D taskmanager.memory.managed.size=128mb \
-D taskmanager.cpu.cores=1.0 \
-D taskmanager.memory.task.heap.size=256mb \
&
}
run_task() {
export FLINK_CONF_DIR=$PWD/flink-1.10.1/conf && \
./flink-1.10.1/bin/flink run ./flink-1.10.1/examples/streaming/WordCount.jar
}
id=$([ -f "$pid" ] && echo $(wc -l < "$pid") || echo "0")
arg=$1
case "$arg" in
tm)
run_taskmanager
cd ../
;;
task)
run_task
cd ../
;;
esac

@ -50,6 +50,7 @@ run_netty_ut() {
-cp /usr/lib/netty/netty-testsuite-4.1.51.Final.jar:/usr/lib/netty/netty-all-4.1.51.Final.jar:/usr/lib/netty/xz-1.5.jar:/usr/lib/netty/hamcrest-library-1.3.jar:/usr/lib/netty/logback-classic-1.1.7.jar \
--scan-class-path > netty-test-heap512m.log || true
cat netty-test-heap512m.log
cat netty-test-heap512m.log | grep "190 tests successful"
}
run_netty_ut

1
deps/ext2-rs vendored

@ -1 +0,0 @@
Subproject commit e78615a899adeb7b6bef5811eb9244cc73680407

1
deps/io-uring vendored

@ -1 +0,0 @@
Subproject commit c654c4925bb0b013d3eec736015f8ac4888722be

1
deps/mlsdisk vendored

@ -1 +0,0 @@
Subproject commit 864a00840110237d60d51e04d0e63394c812549a

28
deps/mlsdisk.patch vendored

@ -1,28 +0,0 @@
diff --git a/core/Cargo.toml b/core/Cargo.toml
index c1e1746..20b896f 100644
--- a/core/Cargo.toml
+++ b/core/Cargo.toml
@@ -22,16 +22,18 @@ serde = { version = "=1.0.188", default-features = false, features = ["alloc", "
spin = { version = "0.9.8", optional = true }
static_assertions = "1.1.0"
-sgx_tstd = { git = "https://github.com/apache/teaclave-sgx-sdk.git", features = ["backtrace", "thread"], optional = true }
-sgx_rand = { git = "https://github.com/apache/teaclave-sgx-sdk.git", optional = true }
-sgx_tcrypto = { git = "https://github.com/apache/teaclave-sgx-sdk.git", optional = true }
-sgx_types = { git = "https://github.com/apache/teaclave-sgx-sdk.git", optional = true }
+sgx_tstd = { path = "../../../deps/rust-sgx-sdk/sgx_tstd", features = ["backtrace", "thread"], optional = true }
+sgx_rand = { path = "../../../deps/rust-sgx-sdk/sgx_rand", optional = true }
+sgx_tcrypto = { path = "../../../deps/rust-sgx-sdk/sgx_tcrypto", optional = true }
+sgx_types = { path = "../../../deps/rust-sgx-sdk/sgx_types", optional = true }
+ext2-rs = { path = "../../../deps/ext2-rs", default-features = false, optional = true }
+ahash = { version="=0.8.6", default-features = false }
[features]
default = ["std"]
std = ["spin", "openssl", "log"]
linux = ["bindings"]
-occlum = ["sgx_tstd", "sgx_rand", "sgx_tcrypto", "sgx_types", "spin", "log"]
+occlum = ["sgx_tstd", "sgx_rand", "sgx_tcrypto", "sgx_types", "spin", "log", "ext2-rs/sgx"]
jinux = []

2
deps/rust-sgx-sdk vendored

@ -1 +1 @@
Subproject commit 67af3f726726c27d6207e8da8b92922f4b513137
Subproject commit 81384ce4d10c67eea5e1ba4ea332087940c1836b

2
deps/sefs vendored

@ -1 +1 @@
Subproject commit 30bc4e02c153c092eca37affdb9c7610411a377f
Subproject commit c2a1fe48b06cf9cedbad7a51d9cb846706630403

@ -1,17 +1,5 @@
From 21b67e210670f6c7cb7e88d9900699ac52b0c3b1 Mon Sep 17 00:00:00 2001
From: ClawSeven <zehuan97@gmail.com>
Date: Tue, 23 Jul 2024 10:47:12 +0800
Subject: [PATCH] Adapt hyper patch with updated sdk
---
sefs-cli/Makefile | 42 ++++++++++++++++++++---------
sefs-cli/app/build.rs | 4 +++
sefs-cli/enclave/Enclave.config.xml | 1 +
sefs-cli/enclave/Enclave.lds | 3 +++
4 files changed, 38 insertions(+), 12 deletions(-)
diff --git a/sefs-cli/Makefile b/sefs-cli/Makefile
index f0eeb8c..50b9917 100644
index f0eeb8c..8c6fae0 100644
--- a/sefs-cli/Makefile
+++ b/sefs-cli/Makefile
@@ -14,13 +14,19 @@ endif
@ -43,14 +31,12 @@ index f0eeb8c..50b9917 100644
######## Enclave Settings ########
-ifneq ($(SGX_MODE), HW)
- Trts_Library_Name := sgx_trts_sim
- Service_Library_Name := sgx_tservice_sim
+ifeq ($(SGX_MODE), HW)
+ Trts_Library_Name := sgx_trts
+ Service_Library_Name := sgx_tservice
+else ifeq ($(SGX_MODE), SIM)
+ Trts_Library_Name := sgx_trts_sim
+ Service_Library_Name := sgx_tservice_sim
Trts_Library_Name := sgx_trts_sim
Service_Library_Name := sgx_tservice_sim
+else ifeq ($(SGX_MODE), HYPER)
+ Trts_Library_Name := sgx_trts_hyper
+ Service_Library_Name := sgx_tservice_hyper
@ -62,14 +48,12 @@ index f0eeb8c..50b9917 100644
-Wl,--version-script=enclave/Enclave.lds
-ifneq ($(SGX_MODE), HW)
- RustEnclave_Name := lib/libsefs-cli_sim.so
- App_Name := bin/sefs-cli_sim
+ifeq ($(SGX_MODE), HW)
+ RustEnclave_Name := lib/libsefs-cli.so
+ App_Name := bin/sefs-cli
+else ifeq ($(SGX_MODE), SIM)
+ RustEnclave_Name := lib/libsefs-cli_sim.so
+ App_Name := bin/sefs-cli_sim
RustEnclave_Name := lib/libsefs-cli_sim.so
App_Name := bin/sefs-cli_sim
+else ifeq ($(SGX_MODE), HYPER)
+ RustEnclave_Name := lib/libsefs-cli_hyper.so
+ App_Name := bin/sefs-cli_hyper
@ -88,20 +72,25 @@ index f0eeb8c..50b9917 100644
######## App Objects ########
diff --git a/sefs-cli/app/build.rs b/sefs-cli/app/build.rs
index 6d54f91..a523baf 100644
index 4bf4c0a..96391ac 100644
--- a/sefs-cli/app/build.rs
+++ b/sefs-cli/app/build.rs
@@ -29,6 +29,10 @@ fn main() {
println!("cargo:rustc-link-lib=dylib=sgx_urts");
println!("cargo:rustc-link-lib=dylib=sgx_uae_service");
}
+ "HYPER" => {
+ println!("cargo:rustc-link-lib=dylib=sgx_urts_hyper");
+ println!("cargo:rustc-link-lib=dylib=sgx_uae_service_hyper");
+ }
_ => {
println!("cargo:rustc-link-lib=dylib=sgx_urts");
println!("cargo:rustc-link-lib=dylib=sgx_uae_service");
@@ -14,6 +14,7 @@ fn main() {
match is_sim.as_ref() {
"SW" | "SIM" => println!("cargo:rustc-link-lib=static=sgx_urts_sim_with_se_event"),
"HW" => println!("cargo:rustc-link-lib=dylib=sgx_urts"),
+ "HYPER" => println!("cargo:rustc-link-lib=static=sgx_urts_hyper_with_se_event"),
_ => println!("cargo:rustc-link-lib=dylib=sgx_urts"), // Treat undefined as HW
}
@@ -21,6 +22,7 @@ fn main() {
match is_sim.as_ref() {
"SW" | "SIM" => println!("cargo:rustc-link-lib=dylib=sgx_uae_service_sim"),
"HW" => println!("cargo:rustc-link-lib=dylib=sgx_uae_service"),
+ "HYPER" => println!("cargo:rustc-link-lib=dylib=sgx_uae_service_hyper"),
_ => println!("cargo:rustc-link-lib=dylib=sgx_uae_service"), // Treat undefined as HW
}
diff --git a/sefs-cli/enclave/Enclave.config.xml b/sefs-cli/enclave/Enclave.config.xml
index 109fcd2..e69f2b6 100644
--- a/sefs-cli/enclave/Enclave.config.xml
@ -130,6 +119,3 @@ index 92bebf2..d93532c 100644
local:
*;
};
--
2.25.1

@ -1,6 +1,6 @@
# Occlum File System Overview
Occlum supports various file systems: e.g., read-only integrity-protected SEFS, writable encrypted SEFS, UnionFS, Ext2, untrusted HostFS, RamFS, and other pseudo filesystems.
Occlum supports various file systems: e.g., read-only integrity-protected SEFS, writable encrypted SEFS, UnionFS, Async-SFS, untrusted HostFS, RamFS, and other pseudo filesystems.
Here is the default FS layout:
@ -16,12 +16,12 @@ Here is the default FS layout:
│ │
└──────┬──────┘
──────────┬───┴─────┬───────┐
│ │ │
│"/dev/shm"│"/proc" │"/dev" │"/ext2"(optional)
┌──┴──┐ ┌───┴──┐ ┌──┴──┐ ┌─┴──┐
│RamFS│ │ProcFS│ │DevFS│ │Ext2│
└─────┘ └─────┘ └─────┘ └────┘
┌────────┬───┴─────┬───────┐
│ │ │
│"/sfs" │"/dev/shm"│"/proc" │"/dev"
┌───┴─┐ ┌──┴──┐ ┌───┴──┐ ┌──┴──┐
│A-SFS│ │RamFS│ │ProcFS│ │DevFS│
└─────┘ └─────┘ └─────┘ └────┘
```
## SEFS
@ -103,19 +103,33 @@ Here is the configuration of rootfs, the first item is the lower layer RO-SEFS a
source: ./run/mount/__ROOT
```
## Ext2
The [Ext2](https://github.com/liqinggd/ext2-rs) is an independent filesystem Rust crate that resembles Linux's Ext2. For the sake of performance and security, it utilizes [SwornDisk](https://github.com/asterinas/mlsdisk) as its underlying block device. Compared with SEFS, the file I/O performance of "Ext2+SwornDisk" is superior. If your App's performance is highly dependent on file I/O, it is recommended to enable Ext2 in Occlum.json.
## Async-SFS
The Async-SFS is an asynchronous filesystem, which uses Rust asynchronous programming skills, making it fast and concurrent. It is mounted at `/sfs` by default. To achieve the high-performanced security, it uses the JinDisk as the underlying data storage and sends async I/O requests to it.
To accelerate block I/O, the page cache is introduced. It caches all the block I/O in the middle of Async-SFS and JinDisk. Thanks to the page cache and JinDisk, the result of the benchmark (e.g., FIO and Filebench) is significantly better than SEFS. If your App's performance is highly dependent on disk I/O, it is recommended to use Async-SFS.
```
"mount": [{
"target": "/ext2",
"type": "ext2",
"options": {
"disk_size": "10GB"
}
}]
┌───────────┐
│ │
│ Async-SFS │
│ │
└─────┬─────┘
┌─────┴─────┐
│ │
│ Page Cache│
│ │
└─────┬─────┘
┌─────┴─────┐
│ │
│ JinDisk │
│ │
└───────────┘
```
The configuration of enabling Ext2 is showed above, you can specify your mount point at `target`, the disk size that Ext2 manages should be specified at `options.disk_size`.
Currently, there are some limitations of Async-SFS:
1. The maximum size of the file is 4GB.
2. The maximum size of FS is 16TB.
## HostFS
The HostFS is used for convenient data exchange between the LibOS and the host OS. It simply wraps the untrusted host OS file to implement the functionalities of FS. So the data is straightforwardly transferred between LibOS and host OS without any protection or validation.

@ -72,13 +72,6 @@ The template of `Occlum.json` is shown below.
},
// Features
"feature": {
// Determines the use of the IO_Uring feature in Occlum for network I/O operations.
// Enabling IO_Uring feature can improve network I/O performance.
//
// "io_uring": 0 - Disables IO_Uring; network I/O uses Ocall instead.
// "io_uring": 1 - Enables IO_Uring with a single IO_Uring instance.
// "io_uring": n (1 < n <= 16) - Enables IO_Uring with 'n' IO_Uring instances.
"io_uring": 0,
// Whether to turn on AMX feature in Occlum
// Occlum supports AMX instruction running inside the enclave when user enables it
//
@ -147,13 +140,6 @@ The template of `Occlum.json` is shown below.
{
"target": "/dev",
"type": "devfs"
},
{
"target": "/ext2",
"type": "ext2",
"options": {
"disk_size": "10GB"
}
}
]
}

@ -15,7 +15,7 @@ For every application to be running in Occlum (TEE env), all the running require
| |-- opt
| |-- proc
| |-- root
| |-- sbin
| |-- sfs
| |-- sys
| `-- tmp
|-- initfs // Occlum init file system

@ -38,7 +38,6 @@
"feature": {
"amx": 0,
"pkru": 0,
"io_uring": 0,
"enable_edmm": false,
"enable_posix_shm": false
},

@ -13,5 +13,3 @@ targets:
- root
- sys
- tmp
- sbin
- ext2

@ -4,12 +4,9 @@ enclave {
from "sgx_tstdc.edl" import *;
from "sgx_tstd.edl" import *;
from "sgx_tprotected_fs.edl" import *;
from "sgx_thread.edl" import *;
from "sgx_net.edl" import *;
from "sgx_occlum_utils.edl" import *;
from "sgx_vdso_time_ocalls.edl" import *;
from "sgx_thread.edl" import *;
from "sgx_io_uring_ocalls.edl" import *;
include "sgx_quote.h"
include "occlum_edl_types.h"

674
src/libos/Cargo.lock generated

@ -10,22 +10,16 @@ dependencies = [
"atomic",
"bitflags",
"bitvec 1.0.1",
"byteorder",
"ctor",
"derive_builder",
"downcast-rs",
"errno",
"ext2-rs",
"goblin",
"intrusive-collections",
"io-uring-callback",
"itertools",
"keyable-arc",
"lazy_static",
"log",
"memoffset 0.6.5",
"modular-bitfield",
"num_enum",
"rcore-fs",
"rcore-fs-devfs",
"rcore-fs-mountfs",
@ -36,9 +30,8 @@ dependencies = [
"resolv-conf",
"ringbuf",
"scroll",
"serde 1.0.104",
"serde",
"serde_json",
"sgx-untrusted-alloc",
"sgx_cov",
"sgx_tcrypto",
"sgx_trts",
@ -46,22 +39,9 @@ dependencies = [
"sgx_tstd",
"sgx_types",
"spin 0.7.1",
"sworndisk-v2",
"vdso-time",
]
[[package]]
name = "ahash"
version = "0.8.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "91429305e9f0a25f6205c5b8e0d2db09e0708a7a6df0f42212bb56c32c8ac97a"
dependencies = [
"cfg-if",
"once_cell",
"version_check",
"zerocopy",
]
[[package]]
name = "aligned"
version = "0.4.1"
@ -71,26 +51,6 @@ dependencies = [
"as-slice",
]
[[package]]
name = "allocator-api2"
version = "0.2.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5"
[[package]]
name = "anymap"
version = "1.0.0-beta.2"
source = "git+https://github.com/lucassong-mh/anymap?branch=1.0.0-beta.2-patched#18f6555cf93ee5609b883feb6d1ec46ca14f2a78"
dependencies = [
"hashbrown",
]
[[package]]
name = "array-init"
version = "2.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3d62b7694a562cdf5a74227903507c56ab2cc8bdd1f781ed5cb4cf9c9f810bfc"
[[package]]
name = "as-slice"
version = "0.2.1"
@ -109,15 +69,6 @@ dependencies = [
"autocfg 1.1.0",
]
[[package]]
name = "atomic-polyfill"
version = "1.0.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8cf2bce30dfe09ef0bfaef228b9d414faaf7e563035494d7fe092dba54b300f4"
dependencies = [
"critical-section",
]
[[package]]
name = "autocfg"
version = "0.1.8"
@ -139,12 +90,6 @@ version = "1.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
[[package]]
name = "bittle"
version = "0.5.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "14173f083171cee3f00fbbfa3d3d2492401c25c015874aad543bbf829d6389f8"
[[package]]
name = "bitvec"
version = "0.17.4"
@ -167,12 +112,6 @@ dependencies = [
"wyz",
]
[[package]]
name = "byteorder"
version = "1.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b"
[[package]]
name = "cc"
version = "1.0.73"
@ -194,33 +133,6 @@ dependencies = [
"bitflags",
]
[[package]]
name = "cobs"
version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "67ba02a97a2bd10f4b59b25c7973101c79642302776489e030cd13cdab09ed15"
[[package]]
name = "critical-section"
version = "1.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7059fff8937831a9ae6f0fe4d658ffabf58f2ca96aa9dec1c889f936f705f216"
[[package]]
name = "crossbeam-queue"
version = "0.3.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "df0346b5d5e76ac2fe4e327c5fd1118d6be7c51dfb18f9b7922923f287471e35"
dependencies = [
"crossbeam-utils",
]
[[package]]
name = "crossbeam-utils"
version = "0.8.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "248e3bacc7dc6baa3b21e405ee045c3047101a49145e7e9eca583ab4c2ca5345"
[[package]]
name = "ctor"
version = "0.1.23"
@ -228,7 +140,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cdffe87e1d521a10f9696f833fe502293ea446d7f256c06128293a4119bdf4cb"
dependencies = [
"quote",
"syn 1.0.99",
"syn",
]
[[package]]
@ -237,18 +149,8 @@ version = "0.10.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0d706e75d87e35569db781a9b5e2416cff1236a47ed380831f959382ccd5f858"
dependencies = [
"darling_core 0.10.2",
"darling_macro 0.10.2",
]
[[package]]
name = "darling"
version = "0.13.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a01d95850c592940db9b8194bc39f4bc0e89dee5c4265e4b1807c34a9aba453c"
dependencies = [
"darling_core 0.13.4",
"darling_macro 0.13.4",
"darling_core",
"darling_macro",
]
[[package]]
@ -261,22 +163,8 @@ dependencies = [
"ident_case",
"proc-macro2",
"quote",
"strsim 0.9.3",
"syn 1.0.99",
]
[[package]]
name = "darling_core"
version = "0.13.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "859d65a907b6852c9361e3185c862aae7fafd2887876799fa55f5f99dc40d610"
dependencies = [
"fnv",
"ident_case",
"proc-macro2",
"quote",
"strsim 0.10.0",
"syn 1.0.99",
"strsim",
"syn",
]
[[package]]
@ -285,20 +173,9 @@ version = "0.10.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d9b5a2f4ac4969822c62224815d069952656cadc7084fdca9751e6d959189b72"
dependencies = [
"darling_core 0.10.2",
"darling_core",
"quote",
"syn 1.0.99",
]
[[package]]
name = "darling_macro"
version = "0.13.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9c972679f83bdf9c42bd905396b6c3588a843a17f0f16dfcfa3e2c5d57441835"
dependencies = [
"darling_core 0.13.4",
"quote",
"syn 1.0.99",
"syn",
]
[[package]]
@ -307,11 +184,11 @@ version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a2658621297f2cf68762a6f7dc0bb7e1ff2cfd6583daef8ee0fed6f7ec468ec0"
dependencies = [
"darling 0.10.2",
"darling",
"derive_builder_core",
"proc-macro2",
"quote",
"syn 1.0.99",
"syn",
]
[[package]]
@ -320,18 +197,12 @@ version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2791ea3e372c8495c0bc2033991d76b512cd799d07491fbd6890124db9458bef"
dependencies = [
"darling 0.10.2",
"darling",
"proc-macro2",
"quote",
"syn 1.0.99",
"syn",
]
[[package]]
name = "downcast-rs"
version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9ea835d29036a4087793836fa931b08837ad5e957da9e23886b29586fb9b6650"
[[package]]
name = "either"
version = "1.8.0"
@ -348,55 +219,6 @@ dependencies = [
"sgx_tstd",
]
[[package]]
name = "ext-trait"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d772df1c1a777963712fb68e014235e80863d6a91a85c4e06ba2d16243a310e5"
dependencies = [
"ext-trait-proc_macros",
]
[[package]]
name = "ext-trait-proc_macros"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1ab7934152eaf26aa5aa9f7371408ad5af4c31357073c9e84c3b9d7f11ad639a"
dependencies = [
"proc-macro2",
"quote",
"syn 1.0.99",
]
[[package]]
name = "ext2-rs"
version = "0.1.0"
dependencies = [
"bitflags",
"bitvec 1.0.1",
"cfg-if",
"inherit-methods-macro",
"log",
"lru",
"pod",
"rcore-fs",
"sgx_libc",
"sgx_trts",
"sgx_tstd",
"sgx_types",
"spin 0.9.8",
"static_assertions 1.1.0",
]
[[package]]
name = "extension-traits"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a296e5a895621edf9fa8329c83aa1cb69a964643e36cf54d8d7a69b789089537"
dependencies = [
"ext-trait",
]
[[package]]
name = "fnv"
version = "1.0.7"
@ -415,67 +237,6 @@ version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c"
[[package]]
name = "futures"
version = "0.3.28"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "23342abe12aba583913b2e62f22225ff9c950774065e4bfb61a19cd9770fec40"
dependencies = [
"futures-channel",
"futures-core",
"futures-io",
"futures-sink",
"futures-task",
"futures-util",
]
[[package]]
name = "futures-channel"
version = "0.3.28"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "955518d47e09b25bbebc7a18df10b81f0c766eaf4c4f1cccef2fca5f2a4fb5f2"
dependencies = [
"futures-core",
"futures-sink",
]
[[package]]
name = "futures-core"
version = "0.3.28"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4bca583b7e26f571124fe5b7561d49cb2868d79116cfa0eefce955557c6fee8c"
[[package]]
name = "futures-io"
version = "0.3.28"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4fff74096e71ed47f8e023204cfd0aa1289cd54ae5430a9523be060cdb849964"
[[package]]
name = "futures-sink"
version = "0.3.28"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f43be4fe21a13b9781a69afa4985b0f6ee0e1afab2c6f454a8cf30e2b2237b6e"
[[package]]
name = "futures-task"
version = "0.3.28"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "76d3d132be6c0e6aa1534069c705a74a5997a356c0dc2f86a47765e5617c5b65"
[[package]]
name = "futures-util"
version = "0.3.28"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "26b01e40b772d54cf6c6d721c1d1abd0647a0106a12ecaa1c186273392a69533"
dependencies = [
"futures-core",
"futures-sink",
"futures-task",
"pin-project-lite",
"pin-utils",
]
[[package]]
name = "goblin"
version = "0.5.4"
@ -487,61 +248,16 @@ dependencies = [
"scroll",
]
[[package]]
name = "hash32"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b0c35f58762feb77d74ebe43bdbc3210f09be9fe6742234d573bacc26ed92b67"
dependencies = [
"byteorder",
]
[[package]]
name = "hashbrown"
version = "0.14.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604"
dependencies = [
"ahash",
"allocator-api2",
"serde 1.0.188",
]
[[package]]
name = "hashbrown_tstd"
version = "0.12.0"
[[package]]
name = "heapless"
version = "0.7.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cdc6457c0eb62c71aac4bc17216026d8410337c4126773b9c5daba343f17964f"
dependencies = [
"atomic-polyfill",
"hash32",
"rustc_version",
"serde 1.0.188",
"spin 0.9.8",
"stable_deref_trait",
]
[[package]]
name = "ident_case"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39"
[[package]]
name = "inherit-methods-macro"
version = "0.1.0"
source = "git+https://github.com/asterinas/inherit-methods-macro?rev=98f7e3e#98f7e3eb9efdac98faf5a7076f154f30894b9b02"
dependencies = [
"darling 0.13.4",
"proc-macro2",
"quote",
"syn 1.0.99",
]
[[package]]
name = "intrusive-collections"
version = "0.9.4"
@ -551,36 +267,6 @@ dependencies = [
"memoffset 0.5.6",
]
[[package]]
name = "io-uring"
version = "0.5.9"
dependencies = [
"bitflags",
"libc",
"sgx_libc",
"sgx_trts",
"sgx_tstd",
"sgx_types",
]
[[package]]
name = "io-uring-callback"
version = "0.1.0"
dependencies = [
"atomic",
"cfg-if",
"futures",
"io-uring",
"lazy_static",
"libc",
"lock_api",
"log",
"sgx_libc",
"sgx_tstd",
"slab",
"spin 0.7.1",
]
[[package]]
name = "itertools"
version = "0.10.3"
@ -597,10 +283,6 @@ dependencies = [
"sgx_tstd",
]
[[package]]
name = "keyable-arc"
version = "0.1.0"
[[package]]
name = "lazy_static"
version = "1.4.0"
@ -610,45 +292,11 @@ dependencies = [
"spin 0.5.2",
]
[[package]]
name = "lending-iterator"
version = "0.1.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bc07588c853b50689205fb5c00498aa681d89828e0ce8cbd965ebc7a5d8ae260"
dependencies = [
"extension-traits",
"lending-iterator-proc_macros",
"macro_rules_attribute",
"never-say-never",
"nougat",
"polonius-the-crab",
]
[[package]]
name = "lending-iterator-proc_macros"
version = "0.1.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b5445dd1c0deb1e97b8a16561d17fc686ca83e8411128fb036e9668a72d51b1d"
dependencies = [
"proc-macro2",
"quote",
"syn 1.0.99",
]
[[package]]
name = "libc"
version = "0.2.153"
version = "0.2.132"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd"
[[package]]
name = "lock_api"
version = "0.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dd96ffd135b2fd7b973ac026d28085defbe8983df057ced3eb4f2130b0831312"
dependencies = [
"scopeguard",
]
checksum = "8371e4e5341c3a96db127eb2465ac681ced4c433e01dd0e938adbef26ba93ba5"
[[package]]
name = "log"
@ -659,31 +307,6 @@ dependencies = [
"cfg-if",
]
[[package]]
name = "lru"
version = "0.12.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d3262e75e648fce39813cb56ac41f3c3e3f65217ebf3844d818d1f9398cfb0dc"
dependencies = [
"hashbrown",
]
[[package]]
name = "macro_rules_attribute"
version = "0.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cf0c9b980bf4f3a37fd7b1c066941dd1b1d0152ce6ee6e8fe8c49b9f6810d862"
dependencies = [
"macro_rules_attribute-proc_macro",
"paste",
]
[[package]]
name = "macro_rules_attribute-proc_macro"
version = "0.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "58093314a45e00c77d5c508f76e77c3396afbbc0d01506e7fae47b018bac2b1d"
[[package]]
name = "memoffset"
version = "0.5.6"
@ -720,126 +343,20 @@ checksum = "5a7d5f7076603ebc68de2dc6a650ec331a062a13abaa346975be747bbfa4b789"
dependencies = [
"proc-macro2",
"quote",
"syn 1.0.99",
"syn",
]
[[package]]
name = "never-say-never"
version = "6.6.666"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cf5a574dadd7941adeaa71823ecba5e28331b8313fb2e1c6a5c7e5981ea53ad6"
[[package]]
name = "nougat"
version = "0.2.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "97b57b9ced431322f054fc673f1d3c7fa52d80efd9df74ad2fc759f044742510"
dependencies = [
"macro_rules_attribute",
"nougat-proc_macros",
]
[[package]]
name = "nougat-proc_macros"
version = "0.2.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c84f77a45e99a2f9b492695d99e1c23844619caa5f3e57647cffacad773ca257"
dependencies = [
"proc-macro2",
"quote",
"syn 1.0.99",
]
[[package]]
name = "num_enum"
version = "0.5.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1f646caf906c20226733ed5b1374287eb97e3c2a5c227ce668c1f2ce20ae57c9"
dependencies = [
"num_enum_derive",
]
[[package]]
name = "num_enum_derive"
version = "0.5.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dcbff9bc912032c62bf65ef1d5aea88983b420f4f839db1e9b0c281a25c9c799"
dependencies = [
"proc-macro2",
"quote",
"syn 1.0.99",
]
[[package]]
name = "once_cell"
version = "1.19.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92"
[[package]]
name = "paste"
version = "1.0.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a"
[[package]]
name = "pin-project-lite"
version = "0.2.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58"
[[package]]
name = "pin-utils"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
[[package]]
name = "plain"
version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b4596b6d070b27117e987119b4dac604f3c58cfb0b191112e24771b2faeac1a6"
[[package]]
name = "pod"
version = "0.1.0"
source = "git+https://github.com/asterinas/pod?rev=d7dba56#d7dba56cc202a10d483b60aba4f734b1f49cb37b"
dependencies = [
"pod-derive",
]
[[package]]
name = "pod-derive"
version = "0.1.0"
source = "git+https://github.com/asterinas/pod?rev=d7dba56#d7dba56cc202a10d483b60aba4f734b1f49cb37b"
dependencies = [
"proc-macro2",
"quote",
"syn 1.0.99",
]
[[package]]
name = "polonius-the-crab"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c2a69ee997a6282f8462abf1e0d8c38c965e968799e912b3bed8c9e8a28c2f9f"
[[package]]
name = "postcard"
version = "1.0.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c9ee729232311d3cd113749948b689627618133b1c5012b77342c1950b25eaeb"
dependencies = [
"cobs",
"heapless",
"serde 1.0.188",
]
[[package]]
name = "proc-macro2"
version = "1.0.78"
version = "1.0.43"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e2422ad645d89c99f8f3e6b88a9fdeca7fabeac836b1002371c4367c8f984aae"
checksum = "0a2ca2c61bc9f3d74d2886294ab7b9853abd9c1ad903a3ac7815c58989bb7bab"
dependencies = [
"unicode-ident",
]
@ -856,9 +373,9 @@ source = "git+https://github.com/mesalock-linux/quick-error-sgx.git#468bf2cce746
[[package]]
name = "quote"
version = "1.0.35"
version = "1.0.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef"
checksum = "bbe448f377a7d6961e30f5955f9b8d106c3f5e449d493ee1b125c1d43c2b5179"
dependencies = [
"proc-macro2",
]
@ -1078,27 +595,12 @@ dependencies = [
"sgx_tstd",
]
[[package]]
name = "rustc_version"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366"
dependencies = [
"semver",
]
[[package]]
name = "ryu"
version = "1.0.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4501abdff3ae82a1c1b477a17252eb69cee9e66eb915c1abaa4f44d873df9f09"
[[package]]
name = "scopeguard"
version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
[[package]]
name = "scroll"
version = "0.11.0"
@ -1116,50 +618,24 @@ checksum = "bdbda6ac5cd1321e724fa9cee216f3a61885889b896f073b8f82322789c5250e"
dependencies = [
"proc-macro2",
"quote",
"syn 1.0.99",
"syn",
]
[[package]]
name = "semver"
version = "1.0.23"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b"
[[package]]
name = "serde"
version = "1.0.104"
dependencies = [
"serde_derive 1.0.104",
"serde_derive",
"sgx_tstd",
]
[[package]]
name = "serde"
version = "1.0.188"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cf9e0fcba69a370eed61bcf2b728575f726b50b55cba78064753d708ddc7549e"
dependencies = [
"serde_derive 1.0.188",
]
[[package]]
name = "serde_derive"
version = "1.0.104"
dependencies = [
"proc-macro2",
"quote",
"syn 1.0.99",
]
[[package]]
name = "serde_derive"
version = "1.0.188"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4eca7ac642d82aa35b60049a6eccb4be6be75e599bd2e9adb5f875a737654af2"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.50",
"syn",
]
[[package]]
@ -1168,27 +644,10 @@ version = "1.0.40"
dependencies = [
"itoa",
"ryu",
"serde 1.0.104",
"serde",
"sgx_tstd",
]
[[package]]
name = "sgx-untrusted-alloc"
version = "0.1.0"
dependencies = [
"cfg-if",
"errno",
"intrusive-collections",
"lazy_static",
"libc",
"log",
"sgx_libc",
"sgx_trts",
"sgx_tstd",
"sgx_types",
"spin 0.7.1",
]
[[package]]
name = "sgx_alloc"
version = "1.1.6"
@ -1294,15 +753,6 @@ dependencies = [
"sgx_build_helper",
]
[[package]]
name = "slab"
version = "0.4.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67"
dependencies = [
"autocfg 1.1.0",
]
[[package]]
name = "spin"
version = "0.5.2"
@ -1315,15 +765,6 @@ version = "0.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "13287b4da9d1207a4f4929ac390916d64eacfe236a487e9a9f5b3be392be5162"
[[package]]
name = "spin"
version = "0.9.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67"
dependencies = [
"lock_api",
]
[[package]]
name = "stable_deref_trait"
version = "1.2.0"
@ -1348,38 +789,6 @@ version = "0.9.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6446ced80d6c486436db5c078dde11a9f73d42b57fb273121e160b84f63d894c"
[[package]]
name = "strsim"
version = "0.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623"
[[package]]
name = "sworndisk-v2"
version = "0.1.0"
dependencies = [
"ahash",
"anymap",
"array-init",
"bittle",
"crossbeam-queue",
"ext2-rs",
"hashbrown",
"inherit-methods-macro",
"lending-iterator",
"log",
"lru",
"pod",
"postcard",
"serde 1.0.188",
"sgx_rand",
"sgx_tcrypto",
"sgx_tstd",
"sgx_types",
"spin 0.9.8",
"static_assertions 1.1.0",
]
[[package]]
name = "syn"
version = "1.0.99"
@ -1391,17 +800,6 @@ dependencies = [
"unicode-ident",
]
[[package]]
name = "syn"
version = "2.0.50"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "74f1bdc9872430ce9b75da68329d1c1746faf50ffac5f19e02b71e37ff881ffb"
dependencies = [
"proc-macro2",
"quote",
"unicode-ident",
]
[[package]]
name = "tap"
version = "1.0.1"
@ -1437,12 +835,6 @@ dependencies = [
"sgx_types",
]
[[package]]
name = "version_check"
version = "0.9.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f"
[[package]]
name = "winapi"
version = "0.3.9"
@ -1473,23 +865,3 @@ checksum = "05f360fc0b24296329c78fda852a1e9ae82de9cf7b27dae4b7f62f118f77b9ed"
dependencies = [
"tap",
]
[[package]]
name = "zerocopy"
version = "0.7.32"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "74d4d3961e53fa4c9a25a8637fc2bfaf2595b3d3ae34875568a5cf64787716be"
dependencies = [
"zerocopy-derive",
]
[[package]]
name = "zerocopy-derive"
version = "0.7.32"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.50",
]

@ -34,17 +34,8 @@ ctor = "0.1"
regex = { git = "https://github.com/mesalock-linux/regex-sgx", default-features = false, features = ["std", "unicode", "mesalock_sgx"] }
goblin = { version = "0.5.4", default-features = false, features = ["elf64", "elf32", "endian_fd"] }
intrusive-collections = "0.9"
modular-bitfield = "0.11.2"
sworndisk-v2 = { path = "../../deps/mlsdisk/core", default-features = false, features = ["occlum"] }
ext2-rs = { path = "../../deps/ext2-rs", default-features = false, features = ["sgx"] }
sgx-untrusted-alloc = { path = "./crates/sgx-untrusted-alloc", features = ["sgx"]}
io-uring-callback = { path = "./crates/io-uring-callback", features = ["sgx"]}
num_enum = { version = "0.5", default-features = false }
keyable-arc = { path = "./crates/keyable-arc" }
downcast-rs = { version = "1.2.0", default-features = false }
spin = "0.7"
byteorder = { version = "1.3.2", default-features = false }
modular-bitfield = "0.11.2"
[patch.'https://github.com/apache/teaclave-sgx-sdk.git']
sgx_tstd = { path = "../../deps/rust-sgx-sdk/sgx_tstd" }
@ -63,7 +54,7 @@ kernel_heap_monitor = []# Kernel heap usage tracking. With overhead.
[target.'cfg(not(target_env = "sgx"))'.dependencies]
sgx_types = { path = "../../deps/rust-sgx-sdk/sgx_types" }
sgx_tstd = { path = "../../deps/rust-sgx-sdk/sgx_tstd", features = ["backtrace", "thread"] }
sgx_tstd = { path = "../../deps/rust-sgx-sdk/sgx_tstd", features = ["backtrace"] }
sgx_trts = { path = "../../deps/rust-sgx-sdk/sgx_trts" }
sgx_tse = { path = "../../deps/rust-sgx-sdk/sgx_tse" }
sgx_tcrypto = { path = "../../deps/rust-sgx-sdk/sgx_tcrypto" }

@ -64,8 +64,7 @@ LIBOS_CORE_A := $(OBJ_DIR)/libos/lib/lib$(LIBOS_CORE_LIB_NAME).a
LIBOS_CORE_RS_A := $(OBJ_DIR)/libos/lib/libocclum_libos_core_rs.a
# All source code
RUST_SRCS := $(wildcard src/*.rs src/*/*.rs src/*/*/*.rs src/*/*/*/*.rs src/*/*/*/*/*.rs \
crates/*/src/*.rs crates/*/src/*/*.rs crates/*/src/*/*/*.rs crates/*/src/*/*/*/*.rs)
RUST_SRCS := $(wildcard src/*.rs src/*/*.rs src/*/*/*.rs src/*/*/*/*.rs src/*/*/*/*/*.rs)
RUST_TARGET_DIR := $(OBJ_DIR)/libos/cargo-target
RUST_OUT_DIR := $(OBJ_DIR)/libos/lib
EDL_C_SRCS := $(addprefix $(OBJ_DIR)/libos/,$(SRC_OBJ)/Enclave_t.c $(SRC_OBJ)/Enclave_t.h)
@ -167,8 +166,7 @@ $(OBJ_DIR)/libos/$(SRC_OBJ)/Enclave_t.c: $(SGX_EDGER8R) ../Enclave.edl
$(SGX_EDGER8R) $(SGX_EDGER8R_MODE) --trusted $(CUR_DIR)/../Enclave.edl \
--search-path $(SGX_SDK)/include \
--search-path $(RUST_SGX_SDK_DIR)/edl \
--search-path $(CRATES_DIR)/vdso-time/ocalls \
--search-path $(PROJECT_DIR)/deps/io-uring/ocalls
--search-path $(CRATES_DIR)/vdso-time/ocalls
@echo "GEN <= $@"
$(C_OBJS):$(OBJ_DIR)/libos/$(SRC_OBJ)/%.o: src/%.c
@ -190,7 +188,6 @@ format-c: $(C_SRCS) $(CXX_SRCS)
format-rust: $(RUST_SRCS)
@$(call format-rust)
@cd crates && $(call format-rust)
format-check: format-check-c format-check-rust
@ -199,7 +196,6 @@ format-check-c: $(C_SRCS) $(CXX_SRCS)
format-check-rust: $(RUST_SRCS)
@$(call format-check-rust)
@cd crates && $(call format-check-rust)
COV_TARGET_DIR := $(RUST_TARGET_DIR)/debug/deps
DEPS_DIR := $(shell pwd)/../../deps

@ -1,24 +0,0 @@
[workspace]
members = [
"errno",
"io-uring-callback",
"keyable-arc",
"object-id",
"sgx-untrusted-alloc",
"vdso-time"
]
# Default members can run on Linux; non-default members can only run inside SGX.
default-members = [
"errno",
"io-uring-callback",
"keyable-arc",
"object-id",
"sgx-untrusted-alloc",
"vdso-time"
]
exclude = [
"test",
]

@ -1,7 +0,0 @@
MAIN_MAKEFILE := $(firstword $(MAKEFILE_LIST))
INCLUDE_MAKEFILE := $(lastword $(MAKEFILE_LIST))
CURRENT_DIR := $(shell dirname $(realpath $(MAIN_MAKEFILE)))
ROOT_DIR := $(realpath $(shell dirname $(realpath $(INCLUDE_MAKEFILE)))/../../../)
RUST_SGX_SDK_DIR := $(ROOT_DIR)/deps/rust-sgx-sdk
LIBOS_DIR := $(ROOT_DIR)/src/libos
LIBOS_CRATES_DIR := $(LIBOS_DIR)/crates

@ -116,8 +116,8 @@ mod result;
mod to_errno;
pub use self::backtrace::ErrorBacktrace;
pub use self::errno::Errno::*;
pub use self::errno::*;
pub use self::errno::Errno::*;
pub use self::error::{Error, ErrorLocation};
pub use self::result::{Result, ResultExt};
pub use self::to_errno::ToErrno;
@ -130,18 +130,13 @@ macro_rules! errno {
let msg: &'static str = $error_msg;
(errno, msg)
};
let error = $crate::Error::embedded(
inner_error,
Some($crate::ErrorLocation::new(file!(), line!())),
);
let error =
$crate::Error::embedded(inner_error, Some($crate::ErrorLocation::new(file!(), line!())));
error
}};
($error_expr: expr) => {{
let inner_error = $error_expr;
let error = $crate::Error::boxed(
inner_error,
Some($crate::ErrorLocation::new(file!(), line!())),
);
let error = $crate::Error::boxed(inner_error, Some($crate::ErrorLocation::new(file!(), line!())));
error
}};
}

@ -1,400 +0,0 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
version = 3
[[package]]
name = "atomic"
version = "0.5.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c59bdb34bc650a32731b31bd8f0829cc15d24a708ee31559e0bb34f2bc320cba"
[[package]]
name = "autocfg"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
[[package]]
name = "bitflags"
version = "1.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
[[package]]
name = "bitflags"
version = "2.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "327762f6e5a765692301e5bb513e0d9fef63be86bbc14528052b1cd3e6f03e07"
[[package]]
name = "cc"
version = "1.0.83"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0"
dependencies = [
"libc",
]
[[package]]
name = "cfg-if"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
[[package]]
name = "errno"
version = "0.3.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f258a7194e7f7c2a7837a8913aeab7fd8c383457034fa20ce4dd3dcb813e8eb8"
dependencies = [
"libc",
"windows-sys",
]
[[package]]
name = "fastrand"
version = "2.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5"
[[package]]
name = "futures"
version = "0.3.29"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "da0290714b38af9b4a7b094b8a37086d1b4e61f2df9122c3cad2577669145335"
dependencies = [
"futures-channel",
"futures-core",
"futures-io",
"futures-sink",
"futures-task",
"futures-util",
]
[[package]]
name = "futures-channel"
version = "0.3.29"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ff4dd66668b557604244583e3e1e1eada8c5c2e96a6d0d6653ede395b78bbacb"
dependencies = [
"futures-core",
"futures-sink",
]
[[package]]
name = "futures-core"
version = "0.3.29"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "eb1d22c66e66d9d72e1758f0bd7d4fd0bee04cad842ee34587d68c07e45d088c"
[[package]]
name = "futures-io"
version = "0.3.29"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8bf34a163b5c4c52d0478a4d757da8fb65cabef42ba90515efee0f6f9fa45aaa"
[[package]]
name = "futures-sink"
version = "0.3.29"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e36d3378ee38c2a36ad710c5d30c2911d752cb941c00c72dbabfb786a7970817"
[[package]]
name = "futures-task"
version = "0.3.29"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "efd193069b0ddadc69c46389b740bbccdd97203899b48d09c5f7969591d6bae2"
[[package]]
name = "futures-util"
version = "0.3.29"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a19526d624e703a3179b3d322efec918b6246ea0fa51d41124525f00f1cc8104"
dependencies = [
"futures-core",
"futures-sink",
"futures-task",
"pin-project-lite",
"pin-utils",
]
[[package]]
name = "hashbrown_tstd"
version = "0.12.0"
[[package]]
name = "io-uring"
version = "0.5.9"
dependencies = [
"bitflags 1.3.2",
"libc",
"sgx_libc",
"sgx_trts",
"sgx_tstd",
"sgx_types",
]
[[package]]
name = "io-uring-callback"
version = "0.1.0"
dependencies = [
"atomic",
"cfg-if",
"futures",
"io-uring",
"lazy_static",
"libc",
"lock_api",
"log",
"sgx_libc",
"sgx_tstd",
"slab",
"spin 0.7.1",
"tempfile",
]
[[package]]
name = "lazy_static"
version = "1.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
dependencies = [
"spin 0.5.2",
]
[[package]]
name = "libc"
version = "0.2.150"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "89d92a4743f9a61002fae18374ed11e7973f530cb3a3255fb354818118b2203c"
[[package]]
name = "linux-raw-sys"
version = "0.4.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "969488b55f8ac402214f3f5fd243ebb7206cf82de60d3172994707a4bcc2b829"
[[package]]
name = "lock_api"
version = "0.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dd96ffd135b2fd7b973ac026d28085defbe8983df057ced3eb4f2130b0831312"
dependencies = [
"scopeguard",
]
[[package]]
name = "log"
version = "0.4.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f"
[[package]]
name = "pin-project-lite"
version = "0.2.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58"
[[package]]
name = "pin-utils"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
[[package]]
name = "redox_syscall"
version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa"
dependencies = [
"bitflags 1.3.2",
]
[[package]]
name = "rustix"
version = "0.38.25"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dc99bc2d4f1fed22595588a013687477aedf3cdcfb26558c559edb67b4d9b22e"
dependencies = [
"bitflags 2.4.1",
"errno",
"libc",
"linux-raw-sys",
"windows-sys",
]
[[package]]
name = "scopeguard"
version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
[[package]]
name = "sgx_alloc"
version = "1.1.5"
[[package]]
name = "sgx_backtrace_sys"
version = "1.1.5"
dependencies = [
"cc",
"sgx_build_helper",
"sgx_libc",
]
[[package]]
name = "sgx_build_helper"
version = "1.1.5"
[[package]]
name = "sgx_demangle"
version = "1.1.5"
[[package]]
name = "sgx_libc"
version = "1.1.5"
dependencies = [
"sgx_types",
]
[[package]]
name = "sgx_tprotected_fs"
version = "1.1.5"
dependencies = [
"sgx_trts",
"sgx_types",
]
[[package]]
name = "sgx_trts"
version = "1.1.5"
dependencies = [
"sgx_libc",
"sgx_types",
]
[[package]]
name = "sgx_tstd"
version = "1.1.5"
dependencies = [
"hashbrown_tstd",
"sgx_alloc",
"sgx_backtrace_sys",
"sgx_demangle",
"sgx_libc",
"sgx_tprotected_fs",
"sgx_trts",
"sgx_types",
"sgx_unwind",
]
[[package]]
name = "sgx_types"
version = "1.1.5"
[[package]]
name = "sgx_unwind"
version = "1.1.5"
dependencies = [
"sgx_build_helper",
]
[[package]]
name = "slab"
version = "0.4.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67"
dependencies = [
"autocfg",
]
[[package]]
name = "spin"
version = "0.5.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d"
[[package]]
name = "spin"
version = "0.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "13287b4da9d1207a4f4929ac390916d64eacfe236a487e9a9f5b3be392be5162"
[[package]]
name = "tempfile"
version = "3.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7ef1adac450ad7f4b3c28589471ade84f25f731a7a0fe30d71dfa9f60fd808e5"
dependencies = [
"cfg-if",
"fastrand",
"redox_syscall",
"rustix",
"windows-sys",
]
[[package]]
name = "windows-sys"
version = "0.48.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9"
dependencies = [
"windows-targets",
]
[[package]]
name = "windows-targets"
version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c"
dependencies = [
"windows_aarch64_gnullvm",
"windows_aarch64_msvc",
"windows_i686_gnu",
"windows_i686_msvc",
"windows_x86_64_gnu",
"windows_x86_64_gnullvm",
"windows_x86_64_msvc",
]
[[package]]
name = "windows_aarch64_gnullvm"
version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8"
[[package]]
name = "windows_aarch64_msvc"
version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc"
[[package]]
name = "windows_i686_gnu"
version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e"
[[package]]
name = "windows_i686_msvc"
version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406"
[[package]]
name = "windows_x86_64_gnu"
version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e"
[[package]]
name = "windows_x86_64_gnullvm"
version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc"
[[package]]
name = "windows_x86_64_msvc"
version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538"

@ -1,29 +0,0 @@
[package]
name = "io-uring-callback"
version = "0.1.0"
authors = ["Tate, Hongliang Tian <tate.thl@antfin.com>"]
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[features]
default = ["libc"]
sgx = ["sgx_tstd", "sgx_libc", "io-uring/sgx"]
[dependencies]
atomic = "0.5.0"
cfg-if = "1.0.0"
lock_api = "=0.4.2"
log = "0.4"
futures = { version = "0.3", default-features = false, features = ["alloc"] }
lazy_static = { version = "1.4.0", features = ["spin_no_std"] }
slab = { version = "0.4.5", default-features = false }
libc = { version = "0.2", optional = true }
io-uring = { path = "../../../../deps/io-uring", features = ["unstable"] }
sgx_tstd = { path = "../../../../deps/rust-sgx-sdk/sgx_tstd", optional = true, features = ["backtrace"] }
sgx_libc = { path = "../../../../deps/rust-sgx-sdk/sgx_libc", optional = true }
spin = "0.7"
[dev-dependencies]
tempfile = "3"

@ -1,15 +0,0 @@
# io-uring-callback
io-uring with callback interface.
## Usage
To use io-uring-callback, place the following line under the `[dependencies]` section in your `Cargo.toml`:
```
io-uring-callback = { path = "your_path/io-uring-callback" }
```
if use io-uring-callback in SGX (based on rust-sgx-sdk), place the following line under the `[dependencies]` section in your `Cargo.toml` and prepare incubator-teaclave-sgx-sdk envirenments according to io-uring-callback's `Cargo.toml`:
```
io-uring-callback = { path = "your_path/io-uring-callback", features = ["sgx"] }
```

@ -1,11 +0,0 @@
Cargo.lock
Enclave_u.c
Enclave_u.h
Enclave_t.c
Enclave_t.h
app/target
enclave/target
bin/app
*.o
*.a
*.so

@ -1,161 +0,0 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
######## SGX SDK Settings ########
SGX_SDK ?= /opt/sgxsdk
SGX_MODE ?= HW
SGX_ARCH ?= x64
include ../../../common.mk
include $(RUST_SGX_SDK_DIR)/buildenv.mk
OCALLS_DIR := $(ROOT_DIR)/deps/io-uring/ocalls
ifeq ($(shell getconf LONG_BIT), 32)
SGX_ARCH := x86
else ifeq ($(findstring -m32, $(CXXFLAGS)), -m32)
SGX_ARCH := x86
endif
ifeq ($(SGX_ARCH), x86)
SGX_COMMON_CFLAGS := -m32
SGX_LIBRARY_PATH := $(SGX_SDK)/lib
SGX_ENCLAVE_SIGNER := $(SGX_SDK)/bin/x86/sgx_sign
SGX_EDGER8R := $(SGX_SDK)/bin/x86/sgx_edger8r
else
SGX_COMMON_CFLAGS := -m64
SGX_LIBRARY_PATH := $(SGX_SDK)/lib64
SGX_ENCLAVE_SIGNER := $(SGX_SDK)/bin/x64/sgx_sign
SGX_EDGER8R := $(SGX_SDK)/bin/x64/sgx_edger8r
endif
ifeq ($(SGX_DEBUG), 1)
ifeq ($(SGX_PRERELEASE), 1)
$(error Cannot set SGX_DEBUG and SGX_PRERELEASE at the same time!!)
endif
endif
ifeq ($(SGX_DEBUG), 1)
SGX_COMMON_CFLAGS += -O0 -g
else
SGX_COMMON_CFLAGS += -O2
endif
SGX_COMMON_CFLAGS += -fstack-protector
######## CUSTOM Settings ########
CUSTOM_LIBRARY_PATH := ./lib
CUSTOM_BIN_PATH := ./bin
CUSTOM_EDL_PATH := $(RUST_SGX_SDK_DIR)/edl
CUSTOM_COMMON_PATH := $(RUST_SGX_SDK_DIR)/common
######## EDL Settings ########
Enclave_EDL_Files := enclave/Enclave_t.c enclave/Enclave_t.h app/Enclave_u.c app/Enclave_u.h
######## APP Settings ########
App_Rust_Flags := --release
App_SRC_Files := $(shell find app/ -type f -name '*.rs') $(shell find app/ -type f -name 'Cargo.toml')
App_Include_Paths := -I ./app -I./include -I$(SGX_SDK)/include -I$(CUSTOM_EDL_PATH)
App_C_Flags := $(SGX_COMMON_CFLAGS) -fPIC -Wno-attributes $(App_Include_Paths)
App_Rust_Path := ./app/target/release
App_Enclave_u_Object := lib/libEnclave_u.a
App_Name := bin/app
######## Enclave Settings ########
ifneq ($(SGX_MODE), HW)
Trts_Library_Name := sgx_trts_sim
Service_Library_Name := sgx_tservice_sim
else
Trts_Library_Name := sgx_trts
Service_Library_Name := sgx_tservice
endif
Crypto_Library_Name := sgx_tcrypto
KeyExchange_Library_Name := sgx_tkey_exchange
ProtectedFs_Library_Name := sgx_tprotected_fs
RustEnclave_C_Files := $(wildcard ./enclave/*.c)
RustEnclave_C_Objects := $(RustEnclave_C_Files:.c=.o)
RustEnclave_Include_Paths := -I$(CUSTOM_COMMON_PATH)/inc -I$(CUSTOM_EDL_PATH) -I$(SGX_SDK)/include -I$(SGX_SDK)/include/tlibc -I$(SGX_SDK)/include/stlport -I$(SGX_SDK)/include/epid -I ./enclave -I./include
RustEnclave_Link_Libs := -L$(CUSTOM_LIBRARY_PATH) -lenclave
RustEnclave_Compile_Flags := $(SGX_COMMON_CFLAGS) $(ENCLAVE_CFLAGS) $(RustEnclave_Include_Paths)
RustEnclave_Link_Flags := -Wl,--no-undefined -nostdlib -nodefaultlibs -nostartfiles -L$(SGX_LIBRARY_PATH) \
-Wl,--whole-archive -l$(Trts_Library_Name) -Wl,--no-whole-archive \
-Wl,--start-group -lsgx_tstdc -l$(Service_Library_Name) -l$(Crypto_Library_Name) $(RustEnclave_Link_Libs) -Wl,--end-group \
-Wl,--version-script=enclave/Enclave.lds \
$(ENCLAVE_LDFLAGS)
RustEnclave_Name := enclave/enclave.so
Signed_RustEnclave_Name := bin/enclave.signed.so
.PHONY: all
all: $(App_Name) $(Signed_RustEnclave_Name)
######## EDL Objects ########
$(Enclave_EDL_Files): $(SGX_EDGER8R) enclave/Enclave.edl
$(SGX_EDGER8R) --trusted enclave/Enclave.edl --search-path $(OCALLS_DIR) --search-path $(SGX_SDK)/include --search-path $(CUSTOM_EDL_PATH) --trusted-dir enclave
$(SGX_EDGER8R) --untrusted enclave/Enclave.edl --search-path $(OCALLS_DIR) --search-path $(SGX_SDK)/include --search-path $(CUSTOM_EDL_PATH) --untrusted-dir app
@echo "GEN => $(Enclave_EDL_Files)"
######## App Objects ########
app/Enclave_u.o: $(Enclave_EDL_Files)
@$(CC) $(App_C_Flags) -c app/Enclave_u.c -o $@
@echo "CC <= $<"
$(App_Enclave_u_Object): app/Enclave_u.o
$(AR) rcsD $@ $^
$(App_Name): $(App_Enclave_u_Object) $(App_SRC_Files)
@cd app && SGX_SDK=$(SGX_SDK) cargo build $(App_Rust_Flags)
@echo "Cargo => $@"
mkdir -p bin
cp $(App_Rust_Path)/app ./bin
######## Enclave Objects ########
enclave/Enclave_t.o: $(Enclave_EDL_Files)
@$(CC) $(RustEnclave_Compile_Flags) -c enclave/Enclave_t.c -o $@
@echo "CC <= $<"
$(RustEnclave_Name): enclave enclave/Enclave_t.o
@$(CXX) enclave/Enclave_t.o -o $@ $(RustEnclave_Link_Flags)
@echo "LINK => $@"
$(Signed_RustEnclave_Name): $(RustEnclave_Name)
mkdir -p bin
@$(SGX_ENCLAVE_SIGNER) sign -key enclave/Enclave_private.pem -enclave $(RustEnclave_Name) -out $@ -config enclave/Enclave.config.xml
@echo "SIGN => $@"
.PHONY: enclave
enclave:
$(MAKE) -C ./enclave/
.PHONY: clean
clean:
@rm -f $(App_Name) $(RustEnclave_Name) $(Signed_RustEnclave_Name) enclave/*_t.* app/*_u.* lib/*.a
@cd enclave && cargo clean && rm -f Cargo.lock
@cd app && cargo clean && rm -f Cargo.lock

@ -1,14 +0,0 @@
## tcp_echo example for SGX
This is an example of using io-uring-callback in SGX.
This example combines tcp_echo example of io-uring-callback and hello-rust example of incubator-teaclave-sgx-sdk.
- ./app : untrusted code
- ./bin : executable program
- ./enclave : trusted code
- ./lib : library
### run tcp_echo example in SGX
1. Prepare environments.
- clone incubator-teaclave-sgx-sdk repo to ```../../../third_parties/```. And checkout incubator-teaclave-sgx-sdk to ```d94996``` commit.
- prepare environments for incubator-teaclave-sgx-sdk.
2. ```make```
3. ```cd bin && ./app```

@ -1,11 +0,0 @@
[package]
name = "app"
version = "1.0.0"
build = "build.rs"
[dependencies]
sgx-io-uring-ocalls = { path = "../deps/io-uring/ocalls" }
sgx_types = { path = "../deps/rust-sgx-sdk/sgx_types" }
sgx_urts = { path = "../deps/rust-sgx-sdk/sgx_urts" }
[workspace]

@ -1,33 +0,0 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License..
use std::env;
fn main() {
let sdk_dir = env::var("SGX_SDK").unwrap_or_else(|_| "/opt/sgxsdk".to_string());
let is_sim = env::var("SGX_MODE").unwrap_or_else(|_| "HW".to_string());
println!("cargo:rustc-link-search=native=../lib");
println!("cargo:rustc-link-lib=static=Enclave_u");
println!("cargo:rustc-link-search=native={}/lib64", sdk_dir);
match is_sim.as_ref() {
"SW" => println!("cargo:rustc-link-lib=dylib=sgx_urts_sim"),
"HW" => println!("cargo:rustc-link-lib=dylib=sgx_urts"),
_ => println!("cargo:rustc-link-lib=dylib=sgx_urts"), // Treat undefined as HW
}
}

@ -1 +0,0 @@
nightly-2020-10-25

@ -1,81 +0,0 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License..
extern crate sgx_io_uring_ocalls;
extern crate sgx_types;
extern crate sgx_urts;
use sgx_types::*;
use sgx_urts::SgxEnclave;
pub use sgx_io_uring_ocalls::*;
static ENCLAVE_FILE: &'static str = "enclave.signed.so";
extern "C" {
fn run_sgx_example(eid: sgx_enclave_id_t, retval: *mut sgx_status_t) -> sgx_status_t;
}
fn init_enclave() -> SgxResult<SgxEnclave> {
let mut launch_token: sgx_launch_token_t = [0; 1024];
let mut launch_token_updated: i32 = 0;
// call sgx_create_enclave to initialize an enclave instance
// Debug Support: set 2nd parameter to 1
let debug = 1;
let mut misc_attr = sgx_misc_attribute_t {
secs_attr: sgx_attributes_t { flags: 0, xfrm: 0 },
misc_select: 0,
};
SgxEnclave::create(
ENCLAVE_FILE,
debug,
&mut launch_token,
&mut launch_token_updated,
&mut misc_attr,
)
}
fn main() {
let enclave = match init_enclave() {
Ok(r) => {
println!("[+] Init Enclave Successful {}!", r.geteid());
r
}
Err(x) => {
println!("[-] Init Enclave Failed {}!", x.as_str());
return;
}
};
let mut retval = sgx_status_t::SGX_SUCCESS;
let result = unsafe { run_sgx_example(enclave.geteid(), &mut retval) };
match result {
sgx_status_t::SGX_SUCCESS => {}
_ => {
println!("[-] ECALL Enclave Failed {}!", result.as_str());
return;
}
}
match retval {
sgx_status_t::SGX_SUCCESS => {}
_ => {
println!("[-] ECALL Returned Error {}!", retval.as_str());
return;
}
}
println!("[+] run_sgx_example success...");
enclave.destroy();
}

@ -1,23 +0,0 @@
[package]
name = "Helloworldsampleenclave"
version = "1.0.0"
[lib]
name = "helloworldsampleenclave"
crate-type = ["staticlib"]
[features]
default = []
[dependencies]
io-uring-callback = { path = "../../../../io-uring-callback", features = ["sgx"] }
io-uring = { path = "../../../../../../../deps/io-uring", features = ["sgx"] }
slab = { version = "0.4.5", default-features = false }
lazy_static = { version = "1.4.0", features = ["spin_no_std"] }
[target.'cfg(not(target_env = "sgx"))'.dependencies]
sgx_types = { path = "../../../../../../../deps/rust-sgx-sdk/sgx_types" }
sgx_tstd = { path = "../../../../../../../deps/rust-sgx-sdk/sgx_tstd", features = ["backtrace", "thread"] }
sgx_trts = { path = "../../../../../../../deps/rust-sgx-sdk/sgx_trts" }
[workspace]

@ -1,13 +0,0 @@
<!-- Please refer to User's Guide for the explanation of each field -->
<EnclaveConfiguration>
<ProdID>0</ProdID>
<ISVSVN>0</ISVSVN>
<StackMaxSize>0x40000</StackMaxSize>
<HeapMaxSize>0x400000</HeapMaxSize>
<TCSNum>1</TCSNum>
<TCSMaxNum>1</TCSMaxNum>
<TCSPolicy>0</TCSPolicy>
<DisableDebug>0</DisableDebug>
<MiscSelect>0</MiscSelect>
<MiscMask>0xFFFFFFFF</MiscMask>
</EnclaveConfiguration>

@ -1,36 +0,0 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
enclave {
from "sgx_tstd.edl" import *;
from "sgx_stdio.edl" import *;
from "sgx_backtrace.edl" import *;
from "sgx_tstdc.edl" import *;
from "sgx_net.edl" import *;
from "sgx_thread.edl" import *;
from "sgx_io_uring_ocalls.edl" import *;
trusted {
/* define ECALLs here. */
public sgx_status_t run_sgx_example();
};
untrusted {
/* define OCALLs here. */
};
};

@ -1,9 +0,0 @@
enclave.so
{
global:
g_global_data_sim;
g_global_data;
enclave_entry;
local:
*;
};

@ -1,39 +0,0 @@
-----BEGIN RSA PRIVATE KEY-----
MIIG4gIBAAKCAYEAroOogvsj/fZDZY8XFdkl6dJmky0lRvnWMmpeH41Bla6U1qLZ
AmZuyIF+mQC/cgojIsrBMzBxb1kKqzATF4+XwPwgKz7fmiddmHyYz2WDJfAjIveJ
ZjdMjM4+EytGlkkJ52T8V8ds0/L2qKexJ+NBLxkeQLfV8n1mIk7zX7jguwbCG1Pr
nEMdJ3Sew20vnje+RsngAzdPChoJpVsWi/K7cettX/tbnre1DL02GXc5qJoQYk7b
3zkmhz31TgFrd9VVtmUGyFXAysuSAb3EN+5VnHGr0xKkeg8utErea2FNtNIgua8H
ONfm9Eiyaav1SVKzPHlyqLtcdxH3I8Wg7yqMsaprZ1n5A1v/levxnL8+It02KseD
5HqV4rf/cImSlCt3lpRg8U5E1pyFQ2IVEC/XTDMiI3c+AR+w2jSRB3Bwn9zJtFlW
KHG3m1xGI4ck+Lci1JvWWLXQagQSPtZTsubxTQNx1gsgZhgv1JHVZMdbVlAbbRMC
1nSuJNl7KPAS/VfzAgEDAoIBgHRXxaynbVP5gkO0ug6Qw/E27wzIw4SmjsxG6Wpe
K7kfDeRskKxESdsA/xCrKkwGwhcx1iIgS5+Qscd1Yg+1D9X9asd/P7waPmWoZd+Z
AhlKwhdPsO7PiF3e1AzHhGQwsUTt/Y/aSI1MpHBvy2/s1h9mFCslOUxTmWw0oj/Q
ldIEgWeNR72CE2+jFIJIyml6ftnb6qzPiga8Bm48ubKh0kvySOqnkmnPzgh+JBD6
JnBmtZbfPT97bwTT+N6rnPqOOApvfHPf15kWI8yDbprG1l4OCUaIUH1AszxLd826
5IPM+8gINLRDP1MA6azECPjTyHXhtnSIBZCyWSVkc05vYmNXYUNiXWMajcxW9M02
wKzFELO8NCEAkaTPxwo4SCyIjUxiK1LbQ9h8PSy4c1+gGP4LAMR8xqP4QKg6zdu9
osUGG/xRe/uufgTBFkcjqBHtK5L5VI0jeNIUAgW/6iNbYXjBMJ0GfauLs+g1VsOm
WfdgXzsb9DYdMa0OXXHypmV4GwKBwQDUwQj8RKJ6c8cT4vcWCoJvJF00+RFL+P3i
Gx2DLERxRrDa8AVGfqaCjsR+3vLgG8V/py+z+dxZYSqeB80Qeo6PDITcRKoeAYh9
xlT3LJOS+k1cJcEmlbbO2IjLkTmzSwa80fWexKu8/Xv6vv15gpqYl1ngYoqJM3pd
vzmTIOi7MKSZ0WmEQavrZj8zK4endE3v0eAEeQ55j1GImbypSf7Idh7wOXtjZ7WD
Dg6yWDrri+AP/L3gClMj8wsAxMV4ZR8CgcEA0fzDHkFa6raVOxWnObmRoDhAtE0a
cjUj976NM5yyfdf2MrKy4/RhdTiPZ6b08/lBC/+xRfV3xKVGzacm6QjqjZrUpgHC
0LKiZaMtccCJjLtPwQd0jGQEnKfMFaPsnhOc5y8qVkCzVOSthY5qhz0XNotHHFmJ
gffVgB0iqrMTvSL7IA2yqqpOqNRlhaYhNl8TiFP3gIeMtVa9rZy31JPgT2uJ+kfo
gV7sdTPEjPWZd7OshGxWpT6QfVDj/T9T7L6tAoHBAI3WBf2DFvxNL2KXT2QHAZ9t
k3imC4f7U+wSE6zILaDZyzygA4RUbwG0gv8/TJVn2P/Eynf76DuWHGlaiLWnCbSz
Az2DHBQBBaku409zDQym3j1ugMRjzzSQWzJg0SIyBH3hTmnYcn3+Uqcp/lEBvGW6
O+rsXFt3pukqJmIV8HzLGGaLm62BHUeZf3dyWm+i3p/hQAL7Xvu04QW70xuGqdr5
afV7p5eaeQIJXyGQJ0eylV/90+qxjMKiB1XYg6WYvwKBwQCL/ddpgOdHJGN8uRom
e7Zq0Csi3hGheMKlKbN3vcxT5U7MdyHtTZZOJbTvxKNNUNYH/8uD+PqDGNneb29G
BfGzvI3EASyLIcGZF3OhKwZd0jUrWk2y7Vhob91jwp2+t73vdMbkKyI4mHOuXvGv
fg95si9oO7EBT+Oqvhccd2J+F1IVXncccYnF4u5ZGWt5lLewN/pVr7MjjykeaHqN
t+rfnQam2psA6fL4zS2zTmZPzR2tnY8Y1GBTi0Ko1OKd1HMCgcAb5cB/7/AQlhP9
yQa04PLH9ygQkKKptZp7dy5WcWRx0K/hAHRoi2aw1wZqfm7VBNu2SLcs90kCCCxp
6C5sfJi6b8NpNbIPC+sc9wsFr7pGo9SFzQ78UlcWYK2Gu2FxlMjonhka5hvo4zvg
WxlpXKEkaFt3gLd92m/dMqBrHfafH7VwOJY2zT3WIpjwuk0ZzmRg5p0pG/svVQEH
NZmwRwlopysbR69B/n1nefJ84UO50fLh5s5Zr3gBRwbWNZyzhXk=
-----END RSA PRIVATE KEY-----

@ -1,38 +0,0 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
Rust_Enclave_Name := libenclave.a
Rust_Enclave_Files := $(wildcard src/*.rs)
Rust_Target_Path := $(CURDIR)/../../../../incubator-teaclave-sgx-sdk/xargo
ifeq ($(MITIGATION-CVE-2020-0551), LOAD)
export MITIGATION_CVE_2020_0551=LOAD
else ifeq ($(MITIGATION-CVE-2020-0551), CF)
export MITIGATION_CVE_2020_0551=CF
endif
.PHONY: all
all: $(Rust_Enclave_Name)
$(Rust_Enclave_Name): $(Rust_Enclave_Files)
ifeq ($(XARGO_SGX), 1)
RUST_TARGET_PATH=$(Rust_Target_Path) xargo build --target x86_64-unknown-linux-sgx --release
cp ./target/x86_64-unknown-linux-sgx/release/libhelloworldsampleenclave.a ../lib/libenclave.a
else
cargo build --release
cp ./target/release/libhelloworldsampleenclave.a ../lib/libenclave.a
endif

@ -1 +0,0 @@
nightly-2020-10-25

@ -1,334 +0,0 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License..
#![crate_name = "helloworldsampleenclave"]
#![crate_type = "staticlib"]
#![cfg_attr(not(target_env = "sgx"), no_std)]
#![cfg_attr(target_env = "sgx", feature(rustc_private))]
extern crate sgx_trts;
extern crate sgx_types;
#[cfg(not(target_env = "sgx"))]
#[macro_use]
extern crate sgx_tstd as std;
extern crate io_uring;
extern crate io_uring_callback;
extern crate lazy_static;
extern crate slab;
use sgx_trts::libc;
use sgx_types::*;
use std::collections::VecDeque;
use std::os::unix::io::RawFd;
use std::prelude::v1::*;
use std::ptr;
use std::sync::SgxMutex as Mutex;
use io_uring::opcode::types;
use io_uring_callback::{Builder, Handle, IoUring};
use lazy_static::lazy_static;
use slab::Slab;
lazy_static! {
static ref TOKEN_QUEUE: Mutex<VecDeque<(Token, i32)>> = Mutex::new(VecDeque::new());
static ref HANDLE_SLAB: Mutex<slab::Slab<Handle>> = Mutex::new(slab::Slab::new());
}
#[derive(Clone, Debug)]
enum Token {
Accept,
Poll {
fd: RawFd,
},
Read {
fd: RawFd,
buf_index: usize,
},
Write {
fd: RawFd,
buf_index: usize,
offset: usize,
len: usize,
},
}
pub struct AcceptCount {
fd: types::Fd,
count: usize,
}
impl AcceptCount {
fn new(fd: RawFd, count: usize) -> AcceptCount {
AcceptCount {
fd: types::Fd(fd),
count: count,
}
}
pub fn try_push_accept(&mut self, ring: &IoUring) {
while self.count > 0 {
let to_complete_token = Token::Accept;
let mut handle_slab = HANDLE_SLAB.lock().unwrap();
let slab_entry = handle_slab.vacant_entry();
let slab_key = slab_entry.key();
let complete_fn = move |retval: i32| {
let mut queue = TOKEN_QUEUE.lock().unwrap();
queue.push_back((to_complete_token, retval));
HANDLE_SLAB.lock().unwrap().remove(slab_key);
};
let handle =
unsafe { ring.accept(self.fd, ptr::null_mut(), ptr::null_mut(), 0, complete_fn) };
slab_entry.insert(handle);
self.count -= 1;
}
}
}
#[no_mangle]
pub extern "C" fn run_sgx_example() -> sgx_status_t {
// std::backtrace::enable_backtrace("enclave.signed.so", std::backtrace::PrintFormat::Full);
println!("[ECALL] run_sgx_example");
let ring = Builder::new()
.setup_sqpoll(Some(500/* ms */))
.build(256)
.unwrap();
let socket_fd = {
let socket_fd = unsafe { libc::ocall::socket(libc::AF_INET, libc::SOCK_STREAM, 0) };
if socket_fd < 0 {
println!("[ECALL] create socket failed, ret: {}", socket_fd);
return sgx_status_t::SGX_ERROR_UNEXPECTED;
}
let ret = unsafe {
let servaddr = libc::sockaddr_in {
sin_family: libc::AF_INET as u16,
sin_port: 3456_u16.to_be(),
sin_addr: libc::in_addr { s_addr: 0 },
sin_zero: [0; 8],
};
libc::ocall::bind(
socket_fd,
&servaddr as *const _ as *const libc::sockaddr,
core::mem::size_of::<libc::sockaddr_in>() as u32,
)
};
if ret < 0 {
println!("[ECALL] bind failed, ret: {}", ret);
unsafe {
libc::ocall::close(socket_fd);
}
return sgx_status_t::SGX_ERROR_UNEXPECTED;
}
let ret = unsafe { libc::ocall::listen(socket_fd, 10) };
if ret < 0 {
println!("[ECALL] listen failed, ret: {}", ret);
unsafe {
libc::ocall::close(socket_fd);
}
return sgx_status_t::SGX_ERROR_UNEXPECTED;
}
socket_fd
};
let mut bufpool = Vec::with_capacity(64);
let mut buf_alloc = Slab::with_capacity(64);
println!("[ECALL] listen 127.0.0.1:3456");
let mut accept = AcceptCount::new(socket_fd, 3);
loop {
accept.try_push_accept(&ring);
ring.trigger_callbacks();
let mut queue = TOKEN_QUEUE.lock().unwrap();
while !queue.is_empty() {
let (token, ret) = queue.pop_front().unwrap();
match token {
Token::Accept => {
println!("[ECALL] accept");
accept.count += 1;
let fd = ret;
let to_complete_token = Token::Poll { fd };
let mut handle_slab = HANDLE_SLAB.lock().unwrap();
let slab_entry = handle_slab.vacant_entry();
let slab_key = slab_entry.key();
let complete_fn = move |retval: i32| {
let mut queue = TOKEN_QUEUE.lock().unwrap();
queue.push_back((to_complete_token, retval));
HANDLE_SLAB.lock().unwrap().remove(slab_key);
};
let handle =
unsafe { ring.poll(types::Fd(fd), libc::POLLIN as _, complete_fn) };
slab_entry.insert(handle);
}
Token::Poll { fd } => {
let (buf_index, buf) = match bufpool.pop() {
Some(buf_index) => (buf_index, &mut buf_alloc[buf_index]),
None => {
let buf = Box::new(unsafe {
std::slice::from_raw_parts_mut(
libc::ocall::malloc(2048) as *mut u8,
2048,
)
});
let buf_entry = buf_alloc.vacant_entry();
let buf_index = buf_entry.key();
(buf_index, buf_entry.insert(buf))
}
};
let to_complete_token = Token::Read { fd, buf_index };
let mut handle_slab = HANDLE_SLAB.lock().unwrap();
let slab_entry = handle_slab.vacant_entry();
let slab_key = slab_entry.key();
let complete_fn = move |retval: i32| {
let mut queue = TOKEN_QUEUE.lock().unwrap();
queue.push_back((to_complete_token, retval));
HANDLE_SLAB.lock().unwrap().remove(slab_key);
};
let handle = unsafe {
ring.read(
types::Fd(fd),
buf.as_mut_ptr(),
buf.len() as _,
0,
0,
complete_fn,
)
};
slab_entry.insert(handle);
}
Token::Read { fd, buf_index } => {
if ret == 0 {
bufpool.push(buf_index);
println!("[ECALL] shutdown");
unsafe {
libc::ocall::close(fd);
}
} else {
let len = ret as usize;
let buf = &buf_alloc[buf_index];
let to_complete_token = Token::Write {
fd,
buf_index,
len,
offset: 0,
};
let mut handle_slab = HANDLE_SLAB.lock().unwrap();
let slab_entry = handle_slab.vacant_entry();
let slab_key = slab_entry.key();
let complete_fn = move |retval: i32| {
let mut queue = TOKEN_QUEUE.lock().unwrap();
queue.push_back((to_complete_token, retval));
HANDLE_SLAB.lock().unwrap().remove(slab_key);
};
let handle = unsafe {
ring.write(types::Fd(fd), buf.as_ptr(), len as _, 0, 0, complete_fn)
};
slab_entry.insert(handle);
}
}
Token::Write {
fd,
buf_index,
offset,
len,
} => {
let write_len = ret as usize;
if offset + write_len >= len {
bufpool.push(buf_index);
let to_complete_token = Token::Poll { fd };
let mut handle_slab = HANDLE_SLAB.lock().unwrap();
let slab_entry = handle_slab.vacant_entry();
let slab_key = slab_entry.key();
let complete_fn = move |retval: i32| {
let mut queue = TOKEN_QUEUE.lock().unwrap();
queue.push_back((to_complete_token, retval));
HANDLE_SLAB.lock().unwrap().remove(slab_key);
};
let handle =
unsafe { ring.poll_add(types::Fd(fd), libc::POLLIN as _, complete_fn) };
slab_entry.insert(handle);
} else {
let offset = offset + write_len;
let len = len - offset;
let buf = &buf_alloc[buf_index][offset..];
let to_complete_token = Token::Write {
fd,
buf_index,
offset,
len,
};
let mut handle_slab = HANDLE_SLAB.lock().unwrap();
let slab_entry = handle_slab.vacant_entry();
let slab_key = slab_entry.key();
let complete_fn = move |retval: i32| {
let mut queue = TOKEN_QUEUE.lock().unwrap();
queue.push_back((to_complete_token, retval));
HANDLE_SLAB.lock().unwrap().remove(slab_key);
};
let handle = unsafe {
ring.write(types::Fd(fd), buf.as_ptr(), len as _, 0, 0, complete_fn)
};
slab_entry.insert(handle);
};
}
}
}
}
}

@ -1,31 +0,0 @@
{
"arch": "x86_64",
"cpu": "x86-64",
"data-layout": "e-m:e-i64:64-f80:128-n8:16:32:64-S128",
"dynamic-linking": true,
"env": "sgx",
"exe-allocation-crate": "alloc_system",
"executables": true,
"has-elf-tls": true,
"has-rpath": true,
"linker-flavor": "gcc",
"linker-is-gnu": true,
"llvm-target": "x86_64-unknown-linux-gnu",
"max-atomic-width": 64,
"os": "linux",
"position-independent-executables": true,
"pre-link-args": {
"gcc": [
"-Wl,--as-needed",
"-Wl,-z,noexecstack",
"-m64"
]
},
"relro-level": "full",
"stack-probes": true,
"target-c-int-width": "32",
"target-endian": "little",
"target-family": "unix",
"target-pointer-width": "64",
"vendor": "mesalock"
}

@ -1,251 +0,0 @@
use std::collections::VecDeque;
use std::net::TcpListener;
use std::os::unix::io::{AsRawFd, RawFd};
use std::ptr;
use std::sync::Mutex;
use io_uring::opcode::types;
use io_uring_callback::{Builder, IoHandle, IoUring};
use lazy_static::lazy_static;
lazy_static! {
static ref TOKEN_QUEUE: Mutex<VecDeque<(Token, i32)>> = Mutex::new(VecDeque::new());
static ref HANDLE_SLAB: Mutex<slab::Slab<IoHandle>> = Mutex::new(slab::Slab::new());
}
#[derive(Clone, Debug)]
enum Token {
Accept,
Poll {
fd: RawFd,
},
Read {
fd: RawFd,
buf_index: usize,
},
Write {
fd: RawFd,
buf_index: usize,
offset: usize,
len: usize,
},
}
pub struct AcceptCount {
fd: types::Fd,
count: usize,
}
impl AcceptCount {
fn new(fd: RawFd, count: usize) -> AcceptCount {
AcceptCount {
fd: types::Fd(fd),
count: count,
}
}
pub fn try_push_accept(&mut self, ring: &IoUring) {
while self.count > 0 {
let to_complete_token = Token::Accept;
let mut handle_slab = HANDLE_SLAB.lock().unwrap();
let slab_entry = handle_slab.vacant_entry();
let slab_key = slab_entry.key();
let complete_fn = move |retval: i32| {
let mut queue = TOKEN_QUEUE.lock().unwrap();
queue.push_back((to_complete_token, retval));
HANDLE_SLAB.lock().unwrap().remove(slab_key);
};
let handle =
unsafe { ring.accept(self.fd, ptr::null_mut(), ptr::null_mut(), 0, complete_fn) };
slab_entry.insert(handle);
self.count -= 1;
}
}
}
fn main() {
let ring = Builder::new()
.setup_sqpoll(Some(500 /* ms */))
.build(256)
.unwrap();
let listener = TcpListener::bind(("127.0.0.1", 3456)).unwrap();
let mut bufpool = Vec::with_capacity(64);
let mut buf_alloc = slab::Slab::with_capacity(64);
println!("listen {}", listener.local_addr().unwrap());
let mut accept = AcceptCount::new(listener.as_raw_fd(), 3);
loop {
accept.try_push_accept(&ring);
ring.poll_completions(0, 100);
let mut queue = TOKEN_QUEUE.lock().unwrap();
while !queue.is_empty() {
let (token, ret) = queue.pop_front().unwrap();
match token {
Token::Accept => {
println!("accept");
accept.count += 1;
let fd = ret;
let to_complete_token = Token::Poll { fd };
let mut handle_slab = HANDLE_SLAB.lock().unwrap();
let slab_entry = handle_slab.vacant_entry();
let slab_key = slab_entry.key();
let complete_fn = move |retval: i32| {
let mut queue = TOKEN_QUEUE.lock().unwrap();
queue.push_back((to_complete_token, retval));
HANDLE_SLAB.lock().unwrap().remove(slab_key);
};
let handle =
unsafe { ring.poll(types::Fd(fd), libc::POLLIN as _, complete_fn) };
slab_entry.insert(handle);
}
Token::Poll { fd } => {
let (buf_index, buf) = match bufpool.pop() {
Some(buf_index) => (buf_index, &mut buf_alloc[buf_index]),
None => {
let buf = vec![0u8; 2048].into_boxed_slice();
let buf_entry = buf_alloc.vacant_entry();
let buf_index = buf_entry.key();
(buf_index, buf_entry.insert(buf))
}
};
let to_complete_token = Token::Read { fd, buf_index };
let mut handle_slab = HANDLE_SLAB.lock().unwrap();
let slab_entry = handle_slab.vacant_entry();
let slab_key = slab_entry.key();
let complete_fn = move |retval: i32| {
let mut queue = TOKEN_QUEUE.lock().unwrap();
queue.push_back((to_complete_token, retval));
HANDLE_SLAB.lock().unwrap().remove(slab_key);
};
let handle = unsafe {
ring.read(
types::Fd(fd),
buf.as_mut_ptr(),
buf.len() as _,
0,
0,
complete_fn,
)
};
slab_entry.insert(handle);
}
Token::Read { fd, buf_index } => {
if ret == 0 {
bufpool.push(buf_index);
println!("shutdown");
unsafe {
libc::close(fd);
}
} else {
let len = ret as usize;
let buf = &buf_alloc[buf_index];
let to_complete_token = Token::Write {
fd,
buf_index,
len,
offset: 0,
};
let mut handle_slab = HANDLE_SLAB.lock().unwrap();
let slab_entry = handle_slab.vacant_entry();
let slab_key = slab_entry.key();
let complete_fn = move |retval: i32| {
let mut queue = TOKEN_QUEUE.lock().unwrap();
queue.push_back((to_complete_token, retval));
HANDLE_SLAB.lock().unwrap().remove(slab_key);
};
let handle = unsafe {
ring.write(types::Fd(fd), buf.as_ptr(), len as _, 0, 0, complete_fn)
};
slab_entry.insert(handle);
}
}
Token::Write {
fd,
buf_index,
offset,
len,
} => {
let write_len = ret as usize;
if offset + write_len >= len {
bufpool.push(buf_index);
let to_complete_token = Token::Poll { fd };
let mut handle_slab = HANDLE_SLAB.lock().unwrap();
let slab_entry = handle_slab.vacant_entry();
let slab_key = slab_entry.key();
let complete_fn = move |retval: i32| {
let mut queue = TOKEN_QUEUE.lock().unwrap();
queue.push_back((to_complete_token, retval));
HANDLE_SLAB.lock().unwrap().remove(slab_key);
};
let handle =
unsafe { ring.poll(types::Fd(fd), libc::POLLIN as _, complete_fn) };
slab_entry.insert(handle);
} else {
let offset = offset + write_len;
let len = len - offset;
let buf = &buf_alloc[buf_index][offset..];
let to_complete_token = Token::Write {
fd,
buf_index,
offset,
len,
};
let mut handle_slab = HANDLE_SLAB.lock().unwrap();
let slab_entry = handle_slab.vacant_entry();
let slab_key = slab_entry.key();
let complete_fn = move |retval: i32| {
let mut queue = TOKEN_QUEUE.lock().unwrap();
queue.push_back((to_complete_token, retval));
HANDLE_SLAB.lock().unwrap().remove(slab_key);
};
let handle = unsafe {
ring.write(types::Fd(fd), buf.as_ptr(), len as _, 0, 0, complete_fn)
};
slab_entry.insert(handle);
};
}
}
}
}
}

@ -1 +0,0 @@
nightly-2022-02-23

@ -1,192 +0,0 @@
use std::sync::Arc;
cfg_if::cfg_if! {
if #[cfg(feature = "sgx")] {
use std::prelude::v1::*;
use spin::Mutex as Mutex;
} else {
use std::sync::Mutex;
}
}
/// The handle to an I/O request pushed to the submission queue of an io_uring instance.
#[derive(Debug)]
#[repr(transparent)]
pub struct IoHandle(pub(crate) Arc<IoToken>);
/// The state of an I/O request represented by an [`IoHandle`].
/// If a request is in `Processed` or `Cancelled` state, means that the request is completed.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum IoState {
/// The I/O request has been submitted.
Submitted,
/// The I/O request has been processed by the kernel and returns a value.
Processed(i32),
/// The I/O request is being cancelled.
Cancelling,
/// The I/O request has been cancelled by the kernel.
Cancelled,
}
const CANCEL_RETVAL: i32 = -libc::ECANCELED;
impl IoHandle {
pub(crate) fn new(token: Arc<IoToken>) -> Self {
Self(token)
}
/// Returns the state of the I/O request.
pub fn state(&self) -> IoState {
self.0.state()
}
/// Returns the return value of the I/O request if it is completed.
pub fn retval(&self) -> Option<i32> {
self.0.retval()
}
/// Release a handle.
///
/// Normally, a handle is not alloed to be dropped before the I/O is completed.
/// This helps discover memory safety problems due to potential misuse by users.
///
/// But sometimes keeping handles can be pointless. This is when the `release`
/// method can help. The release method explictly states that a handle is
/// useless and then drop it.
pub fn release(self) {
// Safety. The representation is transparent.
let token = unsafe { std::mem::transmute::<Self, Arc<IoToken>>(self) };
drop(token);
}
}
impl Unpin for IoHandle {}
impl Drop for IoHandle {
fn drop(&mut self) {
// The user cannot drop a handle if the request isn't completed.
assert!(matches!(
self.state(),
IoState::Processed(_) | IoState::Cancelled
));
}
}
/// A token representing an on-going I/O request.
///
/// Tokens and handles are basically the same thing---an on-going I/O request. The main difference
/// is that handles are used by users, while tokens are used internally.
pub(crate) struct IoToken {
inner: Mutex<Inner>,
}
impl IoToken {
pub fn new(completion_callback: impl FnOnce(i32) + Send + 'static, token_key: u64) -> Self {
let inner = Mutex::new(Inner::new(completion_callback, token_key));
Self { inner }
}
pub fn state(&self) -> IoState {
let inner = self.inner.lock();
inner.state()
}
pub fn retval(&self) -> Option<i32> {
let inner = self.inner.lock();
inner.retval()
}
pub fn complete(&self, retval: i32) {
// let mut inner = self.inner.lock().unwrap();
let mut inner = self.inner.lock();
let callback = inner.complete(retval);
// Must release the lock before invoking the callback function.
// This avoids any deadlock if the IoHandle is accessed inside the callback by
// user.
drop(inner);
(callback)(retval);
}
/// Change the state from submited to cancelling.
/// If transition succeeds, return the token_key for following cancel operation.
pub fn transit_to_cancelling(&self) -> Result<u64, ()> {
// let mut inner = self.inner.lock().unwrap();
let mut inner = self.inner.lock();
inner.transit_to_cancelling()
}
}
impl std::fmt::Debug for IoToken {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("IoToken")
.field("state", &self.state())
.finish()
}
}
struct Inner {
state: IoState,
completion_callback: Option<Callback>,
token_key: u64,
}
type Callback = Box<dyn FnOnce(i32) + Send + 'static>;
impl Inner {
pub fn new(completion_callback: impl FnOnce(i32) + Send + 'static, token_key: u64) -> Self {
let state = IoState::Submitted;
let completion_callback = Some(Box::new(completion_callback) as _);
Self {
state,
completion_callback,
token_key,
}
}
pub fn complete(&mut self, retval: i32) -> Callback {
match self.state {
IoState::Submitted => {
self.state = IoState::Processed(retval);
}
IoState::Cancelling => {
if retval == CANCEL_RETVAL {
// case 1: The request was cancelled successfully.
self.state = IoState::Cancelled;
} else {
// case 2.1: The request was cancelled with error.
// case 2.2: The request was not actually canceled.
self.state = IoState::Processed(retval);
}
}
_ => {
unreachable!("cannot do complete twice");
}
}
self.completion_callback.take().unwrap()
}
pub fn transit_to_cancelling(&mut self) -> Result<u64, ()> {
match self.state {
IoState::Submitted => {
self.state = IoState::Cancelling;
return Ok(self.token_key);
}
_ => {
return Err(());
}
}
}
pub fn retval(&self) -> Option<i32> {
match self.state {
IoState::Processed(retval) => Some(retval),
IoState::Cancelled => Some(CANCEL_RETVAL),
_ => None,
}
}
pub fn state(&self) -> IoState {
self.state
}
}

@ -1,812 +0,0 @@
//! A more user-friendly io_uring crate.
//!
//! # Overview
//!
//! While the original [io_uring crate](https://github.com/tokio-rs/io-uring) exposes io_uring's API in Rust, it has
//! one big shortcoming: users have to manually pop entries out of the completion queue and map those entries to
//! user requests. It makes the APIs cumbersome to use.
//!
//! This crate provides more user-friend APIs with the following features:
//!
//! * Callback-based. On the completion of an I/O request, the corresponding user-registered
//! callback will get invoked. No manual dispatching of I/O completions.
//!
//! * Async/await-ready. After submitting an I/O request, the user will get a handle that
//! represents the on-going I/O request. The user can await the handle (as it is a `Future`).
//!
//! * Polling-based I/O. Both I/O submissions and completions can be easily done in polling mode.
//!
//! # Usage
//!
//! Use [`Builder`] to create a new instance of [`IoUring`].
//!
//! ```
//! use io_uring_callback::{Builder, IoUring};
//!
//! let io_uring: IoUring = Builder::new().build(256).unwrap();
//! ```
//!
//! A number of I/O operations are supported, e.g., `read`, `write`, `fsync`, `sendmsg`,
//! `recvmsg`, etc. Requests for such I/O operations can be pushed into the submission
//! queue of the io_uring with the corresponding methods.
//!
//! ```
//! # use io_uring_callback::{Builder};
//! use io_uring_callback::{Fd, RwFlags};
//!
//! # let io_uring = Builder::new().build(256).unwrap();
//! let fd = Fd(1); // use the stdout
//! let msg = "hello world\0";
//! let completion_callback = move |retval: i32| {
//! assert!(retval > 0);
//! };
//! let handle = unsafe {
//! io_uring.write(fd, msg.as_ptr(), msg.len() as u32, 0, RwFlags::default(), completion_callback)
//! };
//! # while handle.retval().is_none() {
//! # io_uring.wait_completions(1);
//! # }
//! ```
//!
//! You have to two ways to get notified about the completion of I/O requests. The first
//! is through the registered callback function and the second is by `await`ing the handle
//! (which is a `Future`) obtained as a result of pushing I/O requests.
//!
//! After completing the I/O requests, Linux will push I/O responses into the completion queue of
//! the io_uring instance. You need _periodically_ poll completions from the queue:
//! ```no_run
//! # use io_uring_callback::{Builder};
//! # let io_uring = Builder::new().build(256).unwrap();
//! let min_complete = 1;
//! let polling_retries = 5000;
//! io_uring.poll_completions(min_complete, polling_retries);
//! ```
//! which will trigger registered callbacks and wake up handles.
//!
//! When the I/O request is completed (the request is processed or cancelled by the kernel),
//! `poll_completions` will trigger the user-registered callback.
//!
//! # I/O Handles
//!
//! After submitting an I/O request, the user will get as the return value
//! an instance of [`IoHandle`], which represents the submitted I/O requests.
//!
//! So why bother keeping I/O handles? The reasons are three-fold.
//!
//! - First, as a future, `IoHandle` allows you to await on it, which is quite
//! convenient if you happen to use io_uring with Rust's async/await.
//! - Second, `IoHandle` makes it possible to _cancel_ on-going I/O requests.
//! - Third, it makes the whole APIs less prone to memory safety issues. Recall that all I/O submitting
//! methods (e.g., `write`, `accept`, etc.) are _unsafe_ as there are no guarantee that
//! their arguments---like FDs or buffer pointers---are valid throughout the lifetime of
//! an I/O request. What if an user accidentally releases the in-use resources associated with
//! an on-going I/O request? I/O handles can detect such programming bugs as long as
//! the handles are also released along with other in-use I/O resources (which is most likely).
//! This is because when an `IoHandle` is dropped, we will panic if its state is neither
//! processed (`IoState::Processed`) or canceled (`IoState::Canceled`). That is, dropping
//! an `IoHandle` that is still in-use is forbidden.
//!
//! After pushing an I/O request into the submission queue, you will get an `IoHandle`.
//! With this handle, you can cancel the I/O request.
//! ```
//! # use io_uring_callback::Builder;
//! use io_uring_callback::{Timespec, TimeoutFlags};
//!
//! # let io_uring = Builder::new().build(256).unwrap();
//! let tp = Timespec { tv_sec: 1, tv_nsec: 0, };
//! let completion_callback = move |_retval: i32| {};
//! let handle = unsafe {
//! io_uring.timeout(&tp as *const _, 0, TimeoutFlags::empty(), completion_callback)
//! };
//! unsafe { io_uring.cancel(&handle); }
//! io_uring.wait_completions(1);
//! ```
#![feature(get_mut_unchecked)]
#![cfg_attr(feature = "sgx", no_std)]
#[cfg(feature = "sgx")]
extern crate sgx_libc as libc;
#[cfg(feature = "sgx")]
extern crate sgx_tstd as std;
use core::sync::atomic::AtomicUsize;
use std::sync::Arc;
use std::{collections::HashMap, io};
cfg_if::cfg_if! {
if #[cfg(feature = "sgx")] {
use std::prelude::v1::*;
use spin::Mutex as Mutex;
} else {
use std::sync::Mutex;
}
}
use atomic::Ordering;
use io_uring::opcode;
use io_uring::squeue::Entry as SqEntry;
use io_uring::types;
use slab::Slab;
use spin::RwLock;
use std::os::unix::prelude::RawFd;
use crate::io_handle::IoToken;
mod io_handle;
pub use crate::io_handle::{IoHandle, IoState};
pub use io_uring::types::{Fd, RwFlags, TimeoutFlags, Timespec};
pub type IoUringRef = Arc<IoUring>;
/// An io_uring instance.
///
/// # Safety
///
/// All I/O methods are based on the assumption that the resources (e.g., file descriptors, pointers, etc.)
/// given in their arguments are valid before the completion of the async I/O.
pub struct IoUring {
ring: io_uring::IoUring,
token_table: Mutex<Slab<Arc<IoToken>>>,
sq_lock: Mutex<()>, // For submission queue synchronization
fd_map: RwLock<HashMap<usize, AtomicUsize>>, // (key: fd, value: op num)
}
impl Drop for IoUring {
fn drop(&mut self) {
// By the end of the life of the io_uring instance, its token table should have been emptied.
// This emptyness check prevents handles created by this io_uring become "dangling".
// That is, no user will ever hold a handle whose associated io_uring instance has
// been destroyed.
// let token_table = self.token_table.lock().unwrap();
let token_table = self.token_table.lock();
assert!(token_table.len() == 0);
}
}
impl IoUring {
/// The magic token_key for Cancel I/O request.
/// The magic token_key should be different from the token_table's keys.
const CANCEL_TOKEN_KEY: u64 = u64::MAX;
/// Constructor for internal uses.
///
/// Users should use `Builder` instead.
pub(crate) fn new(ring: io_uring::IoUring) -> Self {
let token_table = Mutex::new(Slab::new());
let sq_lock = Mutex::new(());
let fd_map = RwLock::new(HashMap::new());
Self {
ring,
token_table,
sq_lock,
fd_map,
}
}
/// Get the raw io_uring instance for advanced usage.
pub fn raw(&self) -> &io_uring::IoUring {
&self.ring
}
/// Push an accept request into the submission queue of the io_uring.
///
/// # Safety
///
/// See the safety section of the `IoUring`.
pub unsafe fn accept(
&self,
fd: Fd,
addr: *mut libc::sockaddr,
addrlen: *mut libc::socklen_t,
flags: u32,
callback: impl FnOnce(i32) + Send + 'static,
) -> IoHandle {
let entry = opcode::Accept::new(fd, addr, addrlen)
.flags(flags as i32)
.build();
self.op_fetch_add(fd.0 as usize, 1);
self.push_entry(entry, callback)
}
/// Push a connect request into the submission queue of the io_uring.
///
/// # Safety
///
/// See the safety section of the `IoUring`.
pub unsafe fn connect(
&self,
fd: Fd,
addr: *const libc::sockaddr,
addrlen: libc::socklen_t,
callback: impl FnOnce(i32) + Send + 'static,
) -> IoHandle {
let entry = opcode::Connect::new(fd, addr, addrlen).build();
self.op_fetch_add(fd.0 as usize, 1);
self.push_entry(entry, callback)
}
/// Push a poll request into the submission queue of the io_uring.
///
/// # Safety
///
/// See the safety section of the `IoUring`.
pub unsafe fn poll(
&self,
fd: Fd,
flags: u32,
callback: impl FnOnce(i32) + Send + 'static,
) -> IoHandle {
let entry = opcode::PollAdd::new(fd, flags).build();
self.op_fetch_add(fd.0 as usize, 1);
self.push_entry(entry, callback)
}
/// Push a read request into the submission queue of the io_uring.
///
/// # Safety
///
/// See the safety section of the `IoUring`.
pub unsafe fn read(
&self,
fd: Fd,
buf: *mut u8,
len: u32,
offset: libc::off_t,
flags: types::RwFlags,
callback: impl FnOnce(i32) + Send + 'static,
) -> IoHandle {
let entry = opcode::Read::new(fd, buf, len)
.offset(offset)
.rw_flags(flags)
.build();
self.op_fetch_add(fd.0 as usize, 1);
self.push_entry(entry, callback)
}
/// Push a write request into the submission queue of the io_uring.
///
/// # Safety
///
/// See the safety section of the `IoUring`.
pub unsafe fn write(
&self,
fd: Fd,
buf: *const u8,
len: u32,
offset: libc::off_t,
flags: types::RwFlags,
callback: impl FnOnce(i32) + Send + 'static,
) -> IoHandle {
let entry = opcode::Write::new(fd, buf, len)
.offset(offset)
.rw_flags(flags)
.build();
self.op_fetch_add(fd.0 as usize, 1);
self.push_entry(entry, callback)
}
/// Push a readv request into the submission queue of the io_uring.
///
/// # Safety
///
/// See the safety section of the `IoUring`.
pub unsafe fn readv(
&self,
fd: Fd,
iovec: *const libc::iovec,
len: u32,
offset: libc::off_t,
flags: types::RwFlags,
callback: impl FnOnce(i32) + Send + 'static,
) -> IoHandle {
let entry = opcode::Readv::new(fd, iovec, len)
.offset(offset)
.rw_flags(flags)
.build();
self.op_fetch_add(fd.0 as usize, 1);
self.push_entry(entry, callback)
}
/// Push a writev request into the submission queue of the io_uring.
///
/// # Safety
///
/// See the safety section of the `IoUring`.
pub unsafe fn writev(
&self,
fd: Fd,
iovec: *const libc::iovec,
len: u32,
offset: libc::off_t,
flags: types::RwFlags,
callback: impl FnOnce(i32) + Send + 'static,
) -> IoHandle {
let entry = opcode::Writev::new(fd, iovec, len)
.offset(offset)
.rw_flags(flags)
.build();
self.op_fetch_add(fd.0 as usize, 1);
self.push_entry(entry, callback)
}
/// Push a recvmsg request into the submission queue of the io_uring.
///
/// # Safety
///
/// See the safety section of the `IoUring`.
pub unsafe fn recvmsg(
&self,
fd: Fd,
msg: *mut libc::msghdr,
flags: u32,
callback: impl FnOnce(i32) + Send + 'static,
) -> IoHandle {
let entry = opcode::RecvMsg::new(fd, msg).flags(flags).build();
self.op_fetch_add(fd.0 as usize, 1);
self.push_entry(entry, callback)
}
/// Push a sendmsg request into the submission queue of the io_uring.
///
/// # Safety
///
/// See the safety section of the `IoUring`.
pub unsafe fn sendmsg(
&self,
fd: Fd,
msg: *const libc::msghdr,
flags: u32,
callback: impl FnOnce(i32) + Send + 'static,
) -> IoHandle {
let entry = opcode::SendMsg::new(fd, msg).flags(flags).build();
self.op_fetch_add(fd.0 as usize, 1);
self.push_entry(entry, callback)
}
/// Push a fsync request into the submission queue of the io_uring.
///
/// # Safety
///
/// See the safety section of the `IoUring`.
pub unsafe fn fsync(
&self,
fd: Fd,
datasync: bool,
callback: impl FnOnce(i32) + Send + 'static,
) -> IoHandle {
let entry = if datasync {
opcode::Fsync::new(fd)
.flags(types::FsyncFlags::DATASYNC)
.build()
} else {
opcode::Fsync::new(fd).build()
};
self.op_fetch_add(fd.0 as usize, 1);
self.push_entry(entry, callback)
}
/// Push a timeout request into the submission queue of the io_uring.
///
/// # Safety
///
/// See the safety section of the `IoUring`.
pub unsafe fn timeout(
&self,
timespec: *const types::Timespec,
count: u32,
flags: types::TimeoutFlags,
callback: impl FnOnce(i32) + Send + 'static,
) -> IoHandle {
let entry = opcode::Timeout::new(timespec)
.count(count)
.flags(flags)
.build();
self.push_entry(entry, callback)
}
/// Poll new I/O completions in the completions queue of io_uring
/// and return the number of I/O completions.
///
/// Upon receiving completed I/O, the corresponding user-registered callback functions
/// will get invoked and the `IoHandle` (as a `Future`) will become ready.
///
/// The method guarantees at least the specified number of entries are
/// popped from the completion queue. To do so, it starts by polling the
/// completion queue for at most the specified number of retries.
/// If the number of completion entries popped so far does not reach the
/// the specified minimum value, then the method shall block
/// until new completions arrive. After getting unblocked, the method
/// repeats polling.
///
/// If the user does not want to the method to block, set `min_complete`
/// to 0. If the user does not want to the method to busy polling, set
/// `polling_retries` to 0.
pub fn poll_completions(&self, min_complete: usize, polling_retries: usize) -> usize {
let mut cq = unsafe { self.ring.completion_shared() }; // Safety: Only polling thread is using the completion queue
let mut nr_complete = 0;
loop {
// Polling for at most a specified number of times
let mut nr_retries = 0;
while nr_retries <= polling_retries {
// completetion queue must be synchoronized when loop for next entry.
cq.sync();
if let Some(cqe) = cq.next() {
let retval = cqe.result();
let token_key = cqe.user_data();
if token_key != IoUring::CANCEL_TOKEN_KEY {
let io_token = {
let token_idx = token_key as usize;
let mut token_table = self.token_table.lock();
token_table.remove(token_idx)
};
io_token.complete(retval);
nr_complete += 1;
}
} else {
nr_retries += 1;
std::hint::spin_loop();
}
}
if nr_complete >= min_complete {
return nr_complete;
}
// Wait until at least one new completion entry arrives
let _ = self.ring.submit_and_wait(1);
}
}
/// Wait for at least the specified number of I/O completions.
pub fn wait_completions(&self, min_complete: usize) -> usize {
self.poll_completions(min_complete, 10)
}
unsafe fn push(&self, entry: SqEntry) {
// Push the entry into the submission queue
// No other `SubmissionQueue`s may exist when calling submission_shared(). Thus must lock here.
// Since the loop below should be very quick, acquire lock here.
let sq_guard = self.sq_lock.lock();
loop {
if self.ring.submission_shared().push(&entry).is_err() {
if self.ring.enter(1, 1, 0, None).is_err() {
panic!("sq broken");
}
} else {
break;
}
}
drop(sq_guard);
// Make sure Linux is aware of the new submission
if let Err(e) = self.ring.submit() {
panic!("submit failed, error: {}", e);
}
}
// Push a submission entry to io_uring and return a corresponding handle.
//
// Safety. All resources referenced by the entry must be valid before its completion.
unsafe fn push_entry(
&self,
mut entry: SqEntry,
callback: impl FnOnce(i32) + Send + 'static,
) -> IoHandle {
// Create the user-visible handle that is associated with the submission entry
let io_handle = {
// let mut token_table = self.token_table.lock().unwrap();
let mut token_table = self.token_table.lock();
let token_slot = token_table.vacant_entry();
let token_key = token_slot.key() as u64;
assert!(token_key != IoUring::CANCEL_TOKEN_KEY);
let token = Arc::new(IoToken::new(callback, token_key));
token_slot.insert(token.clone());
let handle = IoHandle::new(token);
// Associated entry with token, the latter of which is pointed to by handle.
entry = entry.user_data(token_key);
handle
};
self.push(entry);
io_handle
}
fn op_fetch_add(&self, fd: usize, val: usize) -> usize {
let fd_map = self.fd_map.upgradeable_read();
match fd_map.get(&fd) {
Some(ops_num) => ops_num.fetch_add(val, Ordering::Relaxed),
None => {
let mut fd_map = fd_map.upgrade();
fd_map.insert(fd, AtomicUsize::new(val));
0
}
}
}
pub fn disattach_fd(&self, fd: usize) -> Option<AtomicUsize> {
let mut fd_map = self.fd_map.write();
fd_map.remove(&fd)
}
// Using the sum of the number of attached file descriptors (raw fd) as a measure of task load.
pub fn task_load(&self) -> usize {
let fd_map = self.fd_map.read();
fd_map
.values()
.fold(0, |acc, val| acc + val.load(Ordering::Relaxed))
}
// The number of registered fd in this io_uring instance
pub fn registered_fds(&self) -> usize {
let fd_map = self.fd_map.read();
fd_map.len()
}
/// Cancel an ongoing I/O request.
///
/// # safety
///
/// The handle must be generated by this IoUring instance.
pub unsafe fn cancel(&self, handle: &IoHandle) {
let target_token_key = match handle.0.transit_to_cancelling() {
Ok(target_token_key) => target_token_key,
Err(_) => {
return;
}
};
let mut entry = opcode::AsyncCancel::new(target_token_key).build();
entry = entry.user_data(IoUring::CANCEL_TOKEN_KEY);
self.push(entry);
}
}
/// A builder for `IoUring`.
pub struct Builder {
inner: io_uring::Builder,
}
impl Builder {
/// Creates a `IoUring` builder.
pub fn new() -> Self {
let inner = io_uring::IoUring::builder();
Self { inner }
}
/// When this flag is specified, a kernel thread is created to perform submission queue polling.
/// An io_uring instance configured in this way enables an application to issue I/O
/// without ever context switching into the kernel.
pub fn setup_sqpoll(&mut self, idle: u32) -> &mut Self {
self.inner.setup_sqpoll(idle);
self
}
pub fn setup_attach_wq(&mut self, fd: RawFd) -> &mut Self {
self.inner.setup_attach_wq(fd);
self
}
/// If this flag is specified,
/// then the poll thread will be bound to the cpu set in the value.
/// This flag is only meaningful when [Builder::setup_sqpoll] is enabled.
pub fn setup_sqpoll_cpu(&mut self, n: u32) -> &mut Self {
self.inner.setup_sqpoll_cpu(n);
self
}
/// Create the completion queue with struct `io_uring_params.cq_entries` entries.
/// The value must be greater than entries, and may be rounded up to the next power-of-two.
pub fn setup_cqsize(&mut self, n: u32) -> &mut Self {
self.inner.setup_cqsize(n);
self
}
/// Build a [IoUring].
#[inline]
pub fn build(&self, entries: u32) -> io::Result<IoUring> {
let io_uring_inner = self.inner.build(entries)?;
let io_uring = IoUring::new(io_uring_inner);
Ok(io_uring)
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::fs::File;
use std::io::{IoSlice, IoSliceMut, Write};
use std::os::unix::io::{AsRawFd, FromRawFd};
use std::thread;
use std::time::Duration;
use std::time::Instant;
#[test]
fn test_builder() {
let _io_uring = Builder::new().setup_sqpoll(1000).build(256).unwrap();
}
#[test]
fn test_new() {
let _io_uring = IoUring::new(io_uring::IoUring::new(256).unwrap());
}
#[test]
fn test_writev_readv() {
let io_uring = IoUring::new(io_uring::IoUring::new(256).unwrap());
let fd = tempfile::tempfile().unwrap();
let fd = Fd(fd.as_raw_fd());
let text = b"1234";
let text2 = b"5678";
let mut output = vec![0; text.len()];
let mut output2 = vec![0; text2.len()];
let w_iovecs = vec![IoSlice::new(text), IoSlice::new(text2)];
let r_iovecs = vec![IoSliceMut::new(&mut output), IoSliceMut::new(&mut output2)];
let complete_fn = move |_retval: i32| {};
let handle = unsafe {
io_uring.writev(
fd,
w_iovecs.as_ptr().cast(),
w_iovecs.len() as _,
0,
0,
complete_fn,
)
};
io_uring.wait_completions(1);
let retval = handle.retval().unwrap();
assert_eq!(retval, (text.len() + text2.len()) as i32);
let complete_fn = move |_retval: i32| {};
let handle = unsafe {
io_uring.readv(
fd,
r_iovecs.as_ptr().cast(),
r_iovecs.len() as _,
0,
0,
complete_fn,
)
};
io_uring.wait_completions(1);
let retval = handle.retval().unwrap();
assert_eq!(retval, (text.len() + text2.len()) as i32);
assert_eq!(&output, text);
assert_eq!(&output2, text2);
}
#[test]
fn test_poll() {
let mut fd = unsafe {
let fd = libc::eventfd(0, libc::EFD_CLOEXEC);
assert!(fd != -1);
File::from_raw_fd(fd)
};
let io_uring = IoUring::new(io_uring::IoUring::new(256).unwrap());
let complete_fn = move |_retval: i32| {};
let handle = unsafe { io_uring.poll(Fd(fd.as_raw_fd()), libc::POLLIN as _, complete_fn) };
thread::sleep(Duration::from_millis(100));
assert_eq!(io_uring.poll_completions(0, 10000), 0);
fd.write(&0x1u64.to_ne_bytes()).unwrap();
io_uring.wait_completions(1);
assert_eq!(handle.retval().unwrap(), 1);
}
#[test]
fn test_cancel_poll() {
let mut fd = unsafe {
let fd = libc::eventfd(0, libc::EFD_CLOEXEC);
assert!(fd != -1);
File::from_raw_fd(fd)
};
let io_uring = IoUring::new(io_uring::IoUring::new(256).unwrap());
let complete_fn = move |_retval: i32| {};
let poll_handle =
unsafe { io_uring.poll(Fd(fd.as_raw_fd()), libc::POLLIN as _, complete_fn) };
unsafe {
io_uring.cancel(&poll_handle);
}
thread::sleep(Duration::from_millis(100));
fd.write(&0x1u64.to_ne_bytes()).unwrap();
io_uring.wait_completions(1);
assert_eq!(poll_handle.retval().unwrap(), -libc::ECANCELED);
}
#[test]
fn test_cancel_poll_failed() {
let mut fd = unsafe {
let fd = libc::eventfd(0, libc::EFD_CLOEXEC);
assert!(fd != -1);
File::from_raw_fd(fd)
};
let io_uring = IoUring::new(io_uring::IoUring::new(256).unwrap());
let complete_fn = move |_retval: i32| {};
let poll_handle =
unsafe { io_uring.poll(Fd(fd.as_raw_fd()), libc::POLLIN as _, complete_fn) };
fd.write(&0x1u64.to_ne_bytes()).unwrap();
io_uring.wait_completions(1);
unsafe {
io_uring.cancel(&poll_handle);
}
thread::sleep(Duration::from_millis(100));
assert_eq!(poll_handle.retval().unwrap(), 1);
}
#[test]
fn test_timeout() {
let io_uring = IoUring::new(io_uring::IoUring::new(256).unwrap());
let start = Instant::now();
let secs = 1;
let timespec = types::Timespec::new().sec(secs).nsec(0);
let complete_fn = move |_retval: i32| {};
let handle = unsafe {
io_uring.timeout(
&timespec as *const _,
0,
types::TimeoutFlags::empty(),
complete_fn,
)
};
io_uring.wait_completions(1);
assert_eq!(handle.retval().unwrap(), -libc::ETIME);
assert_eq!(start.elapsed().as_secs(), secs as u64);
}
#[test]
fn test_cancel_timeout() {
let io_uring = IoUring::new(io_uring::IoUring::new(256).unwrap());
let start = Instant::now();
let secs = 1;
let timespec = types::Timespec::new().sec(secs).nsec(0);
let complete_fn = move |_retval: i32| {};
let handle = unsafe {
io_uring.timeout(
&timespec as *const _,
0,
types::TimeoutFlags::empty(),
complete_fn,
)
};
unsafe {
io_uring.cancel(&handle);
}
io_uring.wait_completions(1);
assert_eq!(handle.retval().unwrap(), -libc::ECANCELED);
assert_eq!(start.elapsed().as_secs(), 0);
}
}

@ -1,10 +0,0 @@
[package]
name = "keyable-arc"
version = "0.1.0"
authors = ["Tate, Hongliang Tian <tate.thl@antgroup.com>"]
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]

@ -1,348 +0,0 @@
//! Same as the standard `Arc`, except that it can be used as the key type of a hash table.
//!
//! # Motivation
//!
//! A type `K` is _keyable_ if it can be used as the key type for a hash map. Specifically,
//! according to the document of `std::collections::HashMap`, the type `K` must satisfy
//! the following properties.
//!
//! 1. It implements the `Eq` and `Hash` traits.
//! 2. The two values of `k1` and `k2` of type `K` equal to each other,
//! if and only if their hash values equal to each other.
//! 3. The hashes of a value of `k` of type `K` cannot change while it
//! is in a map.
//!
//! Sometimes we want to use `Arc<T>` as the key type for a hash map but cannot do so
//! since `T` does not satisfy the properties above. For example, a lot of types
//! do not or cannot implemennt the `Eq` trait. This is when `KeyableArc<T>` can come
//! to your aid.
//!
//! # Overview
//!
//! For any type `T`, `KeyableArc<T>` satisfies all the properties to be keyable.
//! This can be achieved easily and efficiently as we can simply use the address
//! of the data (of `T` type) of a `KeyableArc<T>` object in the heap to determine the
//! equality and hash of the `KeyableArc<T>` object. As the address won't change for
//! an immutable `KeyableArc<T>` object, the hash and equality also stay the same.
//!
//! This crate is `#[no_std]` compatible, but requires the `alloc` crate.
//!
//! # Usage
//!
//! Here is a basic example to how that `KeyableArc<T>` is keyable even when `T`
//! is not.
//!
//! ```rust
//! use std::collections::HashMap;
//! use std::sync::Arc;
//! use keyable_arc::KeyableArc;
//!
//! struct Dummy; // Does not implement Eq and Hash
//!
//! let map: HashMap<KeyableArc<Dummy>, String> = HashMap::new();
//! ```
//!
//! `KeyableArc` is a reference counter-based smart pointer, just like `Arc`.
//! So you can use `KeyableArc` the same way you would use `Arc`.
//!
//! ```rust
//! use std::sync::atomic::{AtomicU64, Ordering::Relaxed};
//! use keyable_arc::KeyableArc;
//!
//! let key_arc0 = KeyableArc::new(AtomicU64::new(0));
//! let key_arc1 = key_arc0.clone();
//! assert!(key_arc0.load(Relaxed) == 0 && key_arc1.load(Relaxed) == 0);
//!
//! key_arc0.fetch_add(1, Relaxed);
//! assert!(key_arc0.load(Relaxed) == 1 && key_arc1.load(Relaxed) == 1);
//! ```
//!
//! # Differences from `Arc<T>`
//!
//! Notice how `KeyableArc` differs from standard smart pointers in determining equality?
//! Two `KeyableArc` objects are considered different even when their data have the same
//! value.
//!
//! ```rust
//! use keyable_arc::KeyableArc;
//!
//! let key_arc0 = KeyableArc::new(0);
//! let key_arc1 = key_arc0.clone();
//! assert!(key_arc0 == key_arc1);
//! assert!(*key_arc0 == *key_arc1);
//!
//! let key_arc1 = KeyableArc::new(0);
//! assert!(key_arc0 != key_arc1);
//! assert!(*key_arc0 == *key_arc1);
//! ```
//!
//! `KeyableArc<T>` is simply a wrapper of `Arc<T>. So converting between them
//! through the `From` and `Into` traits is zero cost.
//!
//! ```rust
//! use std::sync::Arc;
//! use keyable_arc::KeyableArc;
//!
//! let key_arc: KeyableArc<u32> = Arc::new(0).into();
//! let arc: Arc<u32> = KeyableArc::new(0).into();
//! ```
//!
//! # The weak version
//!
//! `KeyableWeak<T>` is the weak version of `KeyableArc<T>`, just like `Weak<T>` is
//! that of `Arc<T>`. And of course, `KeyableWeak<T>` is also _keyable_ for any
//! type `T`.
// TODO: Add `KeyableBox<T>` or other keyable versions of smart pointers.
// If this is needed in the future, this crate should be renamed to `keyable`.
// TODO: Add the missing methods offered by `Arc` or `Weak` but not their
// keyable counterparts.
#![cfg_attr(not(test), no_std)]
#![feature(coerce_unsized)]
#![feature(unsize)]
extern crate alloc;
use alloc::sync::{Arc, Weak};
use core::borrow::Borrow;
use core::convert::AsRef;
use core::fmt;
use core::hash::{Hash, Hasher};
use core::marker::Unsize;
use core::ops::{CoerceUnsized, Deref};
/// Same as the standard `Arc`, except that it can be used as the key type of a hash table.
#[repr(transparent)]
pub struct KeyableArc<T: ?Sized>(Arc<T>);
impl<T> KeyableArc<T> {
/// Constructs a new instance of `KeyableArc<T>`.
#[inline]
pub fn new(data: T) -> Self {
Self(Arc::new(data))
}
}
impl<T: ?Sized> KeyableArc<T> {
/// Returns a raw pointer to the object `T` pointed to by this `KeyableArc<T>`.
#[inline]
pub fn as_ptr(this: &Self) -> *const T {
Arc::as_ptr(&this.0)
}
/// Creates a new `KeyableWeak` pointer to this allocation.
pub fn downgrade(this: &Self) -> KeyableWeak<T> {
Arc::downgrade(&this.0).into()
}
}
impl<T: ?Sized> Deref for KeyableArc<T> {
type Target = T;
#[inline]
fn deref(&self) -> &T {
&*self.0
}
}
impl<T: ?Sized> AsRef<T> for KeyableArc<T> {
#[inline]
fn as_ref(&self) -> &T {
&**self
}
}
impl<T: ?Sized> Borrow<T> for KeyableArc<T> {
#[inline]
fn borrow(&self) -> &T {
&**self
}
}
impl<T: ?Sized> From<Arc<T>> for KeyableArc<T> {
#[inline]
fn from(arc: Arc<T>) -> Self {
Self(arc)
}
}
impl<T: ?Sized> Into<Arc<T>> for KeyableArc<T> {
#[inline]
fn into(self) -> Arc<T> {
self.0
}
}
impl<T: ?Sized> PartialEq for KeyableArc<T> {
fn eq(&self, other: &Self) -> bool {
Arc::as_ptr(&self.0) == Arc::as_ptr(&other.0)
}
}
impl<T: ?Sized> Eq for KeyableArc<T> {}
impl<T: ?Sized> Hash for KeyableArc<T> {
fn hash<H: Hasher>(&self, s: &mut H) {
Arc::as_ptr(&self.0).hash(s)
}
}
impl<T: ?Sized> Clone for KeyableArc<T> {
fn clone(&self) -> Self {
Self(self.0.clone())
}
}
impl<T: ?Sized + fmt::Debug> fmt::Debug for KeyableArc<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Debug::fmt(&**self, f)
}
}
//=========================================================
// The weak version
//=========================================================
/// The weak counterpart of `KeyableArc<T>`, similar to `Weak<T>`.
///
/// `KeyableWeak<T>` is also _keyable_ for any type `T` just like
/// `KeyableArc<T>`.
#[repr(transparent)]
pub struct KeyableWeak<T: ?Sized>(Weak<T>);
impl<T> KeyableWeak<T> {
/// Constructs a new `KeyableWeak<T>`, without allocating any memory.
/// Calling `upgrade` on the return value always gives `None`.
#[inline]
pub fn new() -> Self {
Self(Weak::new())
}
/// Returns a raw pointer to the object `T` pointed to by this `KeyableWeak<T>`.
///
/// The pointer is valid only if there are some strong references.
/// The pointer may be dangling, unaligned or even null otherwise.
#[inline]
pub fn as_ptr(&self) -> *const T {
self.0.as_ptr()
}
}
impl<T: ?Sized> KeyableWeak<T> {
/// Attempts to upgrade the Weak pointer to an Arc,
/// delaying dropping of the inner value if successful.
///
/// Returns None if the inner value has since been dropped.
#[inline]
pub fn upgrade(&self) -> Option<KeyableArc<T>> {
self.0.upgrade().map(|arc| arc.into())
}
/// Gets the number of strong pointers pointing to this allocation.
#[inline]
pub fn strong_count(&self) -> usize {
self.0.strong_count()
}
/// Gets the number of weak pointers pointing to this allocation.
#[inline]
pub fn weak_count(&self) -> usize {
self.0.weak_count()
}
}
impl<T> Hash for KeyableWeak<T> {
fn hash<H: Hasher>(&self, s: &mut H) {
self.0.as_ptr().hash(s)
}
}
impl<T: ?Sized> From<Weak<T>> for KeyableWeak<T> {
#[inline]
fn from(weak: Weak<T>) -> Self {
Self(weak)
}
}
impl<T: ?Sized> Into<Weak<T>> for KeyableWeak<T> {
#[inline]
fn into(self) -> Weak<T> {
self.0
}
}
impl<T: ?Sized> PartialEq for KeyableWeak<T> {
fn eq(&self, other: &Self) -> bool {
self.0.as_ptr() == other.0.as_ptr()
}
}
impl<T: ?Sized> PartialOrd for KeyableWeak<T> {
fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> {
Some(self.cmp(other))
}
}
impl<T: ?Sized> Eq for KeyableWeak<T> {}
impl<T: ?Sized> Ord for KeyableWeak<T> {
fn cmp(&self, other: &Self) -> core::cmp::Ordering {
self.0.as_ptr().cmp(&other.0.as_ptr())
}
}
impl<T: ?Sized + fmt::Debug> fmt::Debug for KeyableWeak<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "(KeyableWeak)")
}
}
// Enabling type coercing, e.g., converting from `KeyableArc<T>` to `KeyableArc<dyn S>`,
// where `T` implements `S`.
impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<KeyableArc<U>> for KeyableArc<T> {}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn downgrade_and_upgrade() {
let arc = KeyableArc::new(1);
let weak = KeyableArc::downgrade(&arc);
assert!(arc.clone() == weak.upgrade().unwrap());
assert!(weak == KeyableArc::downgrade(&arc));
}
#[test]
fn debug_format() {
println!("{:?}", KeyableArc::new(1u32));
println!("{:?}", KeyableWeak::<u32>::new());
}
#[test]
fn use_as_key() {
use std::collections::HashMap;
let mut map: HashMap<KeyableArc<u32>, u32> = HashMap::new();
let key = KeyableArc::new(1);
let val = 1;
map.insert(key.clone(), val);
assert!(map.get(&key) == Some(&val));
assert!(map.remove(&key) == Some(val));
assert!(map.keys().count() == 0);
}
#[test]
fn as_trait_object() {
trait DummyTrait {}
struct DummyStruct;
impl DummyTrait for DummyStruct {}
let arc_struct = KeyableArc::new(DummyStruct);
let arc_dyn0: KeyableArc<dyn DummyTrait> = arc_struct.clone();
let arc_dyn1: KeyableArc<dyn DummyTrait> = arc_struct.clone();
assert!(arc_dyn0 == arc_dyn1);
}
}

@ -1,9 +0,0 @@
[package]
name = "object-id"
version = "0.1.0"
authors = ["Tate, Hongliang Tian <tate.thl@antgroup.com>"]
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]

@ -1,62 +0,0 @@
//! Assign a unique and immutable ID.
//!
//! Some types do not have a natural implementation for `PartialEq` or `Hash`.
//! In such cases, it can be convenient to assign an unique ID for each instance
//! of such types and use the ID to implement `PartialEq` or `Hash`.
//!
//! An ID have a length of 64-bit.
#![cfg_attr(not(any(test, doctest)), no_std)]
use core::sync::atomic::{AtomicU64, Ordering};
/// A unique id.
#[derive(PartialEq, Eq, Copy, Clone, Debug, Hash)]
#[repr(transparent)]
pub struct ObjectId(u64);
impl ObjectId {
/// Create a new unique ID.
pub fn new() -> Self {
static NEXT_ID: AtomicU64 = AtomicU64::new(1);
let id = NEXT_ID.fetch_add(1, Ordering::Relaxed);
// Make sure that we can detect the overflow of id even in face of
// (extremely) concurrent addition on NEXT_ID.
assert!(id <= u64::max_value() / 2);
Self(id)
}
/// Return a special "null" ID.
///
/// Note that no ID created by `ObjectId::new()` will be equivalent to the
/// null ID.
pub const fn null() -> Self {
Self(0)
}
/// Get the ID value as `u64`.
pub const fn get(&self) -> u64 {
self.0
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn unique() {
let id0 = ObjectId::new();
let id1 = ObjectId::new();
assert!(id0 != id1);
assert!(id0.get() < id1.get());
}
#[test]
fn non_null() {
let id0 = ObjectId::new();
assert!(id0 != ObjectId::null());
}
}

@ -1,2 +0,0 @@
target/
Cargo.lock

@ -1,22 +0,0 @@
[package]
name = "sgx-untrusted-alloc"
version = "0.1.0"
edition = "2021"
[features]
default = ["libc"]
sgx = ["sgx_types", "sgx_tstd", "sgx_trts", "sgx_libc"]
[dependencies]
cfg-if = "1.0.0"
libc = { version = "0.2", optional = true }
lazy_static = { version = "1.4.0", features = ["spin_no_std"] }
intrusive-collections = "0.9"
log = "0.4"
spin = "0.7"
errno = { path = "../errno" }
sgx_types = { path = "../../../../deps/rust-sgx-sdk/sgx_types", optional = true }
sgx_tstd = { path = "../../../../deps/rust-sgx-sdk/sgx_tstd", optional = true, features = ["backtrace"] }
sgx_trts = { path = "../../../../deps/rust-sgx-sdk/sgx_trts", optional = true }
sgx_libc = { path = "../../../../deps/rust-sgx-sdk/sgx_libc", optional = true }

@ -1,10 +0,0 @@
# sgx-untrusted-alloc
untrusted memory allocator for SGX.
## Usage
To use sgx-untrusted-alloc, place the following line under the `[dependencies]` section in your `Cargo.toml`:
```
sgx-untrusted-alloc = { path = "your_path/sgx-untrusted-alloc" }
```

@ -1,190 +0,0 @@
use crate::untrusted_allocator::Allocator;
use std::fmt::Debug;
use std::mem::{align_of, size_of};
use std::ops::{Deref, DerefMut};
use std::ptr::NonNull;
lazy_static! {
static ref UNTRUSTED_MEM_INSTANCE: Allocator = Allocator::new();
}
use crate::MaybeUntrusted;
/// A memory location on the heap in untrusted memory.
///
/// `UntrustedBox<T>` Behaves similar to the standard `Box<T>`, except that
/// it requires that the type bound of `T: MaybeUntrusted`. This is a safety
/// measure to avoid potential misuses.
pub struct UntrustedBox<T: ?Sized> {
ptr: NonNull<T>,
}
impl<T> Debug for UntrustedBox<T>
where
T: ?Sized + Debug,
{
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Untrusted T")
.field("T", unsafe { &self.ptr.as_ref() })
.finish()
}
}
impl<T: MaybeUntrusted> UntrustedBox<T> {
/// Creates a value of `T` on the heap in untrusted memory.
pub fn new(val: T) -> Self {
let mut new_self = Self::new_uninit();
*new_self = val;
new_self
}
/// Creates an _uninitialized_ value of `T` on the heap in untrusted memory.
pub fn new_uninit() -> Self {
let ptr = {
let raw_ptr = unsafe {
UNTRUSTED_MEM_INSTANCE
.alloc(size_of::<T>(), None)
.expect("memory allocation failure")
} as *mut T;
assert!(raw_ptr != std::ptr::null_mut());
assert!((raw_ptr as usize) % align_of::<T>() == 0);
NonNull::new(raw_ptr).unwrap()
};
Self { ptr }
}
}
impl<T: MaybeUntrusted + Copy> UntrustedBox<[T]> {
/// Creates a slice of `T` on the heap in untrusted memory.
///
/// Note that the pointer and length of the slice is still kept in trusted memory;
/// only the pointer refers to untrusted memory. Thus, there is no risk of buffer
/// overflow.
pub fn new_slice(slice: &[T]) -> Self {
let mut uninit_slice = Self::new_uninit_slice(slice.len());
uninit_slice.copy_from_slice(slice);
uninit_slice
}
}
impl<T: MaybeUntrusted> UntrustedBox<[T]> {
/// Creates an uninitialized slice of `T` on the heap in untrusted memory.
pub fn new_uninit_slice(len: usize) -> Self {
let ptr = {
let total_bytes = size_of::<T>() * len;
let raw_slice_ptr = unsafe {
UNTRUSTED_MEM_INSTANCE
.alloc(total_bytes, None)
.expect("memory allocation failure")
} as *mut T;
assert!(raw_slice_ptr != std::ptr::null_mut());
assert!((raw_slice_ptr as usize) % align_of::<T>() == 0);
let untrusted_slice = unsafe { std::slice::from_raw_parts_mut(raw_slice_ptr, len) };
// For DST types like slice, NonNull is now a fat pointer.
NonNull::new(untrusted_slice as _).unwrap()
};
Self { ptr }
}
}
impl<T: ?Sized> UntrustedBox<T> {
/// Gets an immutable pointer of the value on the untrusted memory.
pub fn as_ptr(&self) -> *const T {
self.ptr.as_ptr()
}
/// Gets a mutable pointer of the value on the untrusted memory.
pub fn as_mut_ptr(&self) -> *mut T {
self.ptr.as_ptr()
}
}
impl<T: ?Sized> Drop for UntrustedBox<T> {
fn drop(&mut self) {
unsafe {
UNTRUSTED_MEM_INSTANCE.free(self.as_mut_ptr() as *mut u8);
}
}
}
impl<T: ?Sized> Deref for UntrustedBox<T> {
type Target = T;
fn deref(&self) -> &T {
unsafe { self.ptr.as_ref() }
}
}
impl<T: ?Sized> DerefMut for UntrustedBox<T> {
fn deref_mut(&mut self) -> &mut T {
unsafe { self.ptr.as_mut() }
}
}
impl<T: MaybeUntrusted + Default> Default for UntrustedBox<T> {
fn default() -> Self {
Self::new(T::default())
}
}
impl<T: MaybeUntrusted + Clone> Clone for UntrustedBox<T> {
fn clone(&self) -> Self {
Self::new(self.deref().clone())
}
}
unsafe impl<T: ?Sized + Send> Send for UntrustedBox<T> {}
unsafe impl<T: ?Sized + Sync> Sync for UntrustedBox<T> {}
#[cfg(test)]
mod tests {
use super::*;
struct Point {
x: usize,
y: usize,
}
unsafe impl MaybeUntrusted for Point {}
#[test]
fn with_i32() {
let mut untrusted_i32 = UntrustedBox::new(0i32);
assert!(*untrusted_i32 == 0);
*untrusted_i32 = 1;
assert!(*untrusted_i32 == 1);
drop(untrusted_i32);
}
#[test]
fn with_point() {
let mut untrusted_point = UntrustedBox::new(Point { x: 0, y: 0 });
assert!(untrusted_point.x == 0 && untrusted_point.y == 0);
untrusted_point.x += 10;
untrusted_point.y += 20;
assert!(untrusted_point.x == 10 && untrusted_point.y == 20);
drop(untrusted_point);
}
#[test]
fn with_array() {
let mut untrusted_array = UntrustedBox::new([0u8, 1, 2, 3]);
untrusted_array
.iter()
.enumerate()
.for_each(|(pos, i)| assert!(pos as u8 == *i));
for i in untrusted_array.iter_mut() {
*i = 0;
}
untrusted_array.iter().for_each(|i| assert!(*i == 0));
}
#[test]
fn with_slice() {
let len = 4;
let mut untrusted_slice: UntrustedBox<[i32]> = UntrustedBox::new_uninit_slice(len);
assert!(untrusted_slice.len() == len);
untrusted_slice[1] = 123;
assert!(untrusted_slice[1] == 123);
}
}

@ -1,95 +0,0 @@
//! Allocation and access of _untrusted_ memory in a _safe_ way.
//!
//! # Usage
//!
//! ## Basics
//!
//! Suppose you have a data structure named `AcceptReq`
//! ```rust
//! struct AcceptReq {
//! addr: libc::sockaddr_storage,
//! addr_len: libc::socklen_t,
//! }
//! ```
//! which is intended to be used as an untrusted buffer shared
//! with the host OS to pass arguments of the accept system call.
//! And we assume that this buffer must be present during the lifetime
//! of a listening socket. So it must be allocated on the heap in
//! untrusted memory. So how to do it?
//!
//! With this crate, it takes two steps.
//!
//! 1. Implement the [`MaybeUntrusted`] marker trait for the data structure.
//!
//! ```rust
//! use sgx_untrusted_alloc::MaybeUntrusted;
//! # struct AcceptReq;
//!
//! unsafe impl MaybeUntrusted for AcceptReq { }
//! ```
//!
//! By implementing this trait, you are claiming: "I am fully aware of the
//! security risks in communicating with the host through _untrusted,
//! shared data structures_. I know that an attacker may peek or tamper with
//! the data structure at any possible timing or in an arbitrary way.
//! I will be very careful. And I am good to go."
//!
//! 2. You can now allocate the data structure in untrusted heap with
//! [`UntrustedBox`], which is similar to the standard `Box` albeit for the
//! untrusted memory.
//!
//! ```rust
//! # use sgx_untrusted_alloc::MaybeUntrusted;
//! # struct AcceptReq;
//! # unsafe impl MaybeUntrusted for AcceptReq { }
//! #
//! use sgx_untrusted_alloc::UntrustedBox;
//!
//! let accept_req: UntrustedBox<AcceptReq> = UntrustedBox::new_uninit();
//! ```
//!
//! Note that the convenient constructor method `UntrustedBox::<T>::new_uninit`
//! creates an _uninitialized_ instance of `T` on untrusted heap.
//! Alternatively, you can create an _initialized_ instance with `UntrustedBox::new`.
//!
//! ## Arrays and slices
//!
//! You can also use `UntrustedBox` to allocate arrays (`[T; N]`) or
//! slices (`[T]`) on untrusted heap as long as the trait bound of `T: MaybeUntrusted`
//! is held.
//!
//! ```rust
//! use sgx_untrusted_alloc::{MaybeUntrusted, UntrustedBox};
//!
//! let untrusted_array: UntrustedBox<[u8; 4]> = UntrustedBox::new_uninit();
//!
//! let untrusted_slice: UntrustedBox<[u8]> = UntrustedBox::new_uninit_slice(4);
//! ```
//!
//! Both `untrusted_array` and `untrusted_slice` above consist of four `u8` integers.
#![cfg_attr(feature = "sgx", no_std)]
#![feature(linked_list_remove)]
#[cfg(feature = "sgx")]
extern crate sgx_libc as libc;
#[cfg(feature = "sgx")]
extern crate sgx_tstd as std;
#[macro_use]
extern crate alloc;
#[macro_use]
extern crate lazy_static;
extern crate intrusive_collections;
#[macro_use]
extern crate log;
extern crate spin;
mod maybe_untrusted;
pub use maybe_untrusted::MaybeUntrusted;
mod box_;
pub use box_::UntrustedBox;
mod prelude;
mod untrusted_allocator;

Some files were not shown because too many files have changed in this diff Show More