Compare commits

...

59 Commits
0.30.1 ... main

Author SHA1 Message Date
531f9a1241 Merge pull request 'Update README' (#1) from noormohammedb/detee-occlum:fix_readme into main
Some checks failed
SGX Hardware Mode Test / Fish_test ([self-hosted SGX2-HW EDMM]) (push) Has been cancelled
SGX Hardware Mode Test / Fish_test ([self-hosted SGX2-HW]) (push) Has been cancelled
SGX Hardware Mode Test / Xgboost_test ([self-hosted SGX2-HW EDMM]) (push) Has been cancelled
SGX Hardware Mode Test / Python_musl_support_test ([self-hosted SGX2-HW]) (push) Has been cancelled
SGX Hardware Mode Test / Openvino_test ([self-hosted SGX2-HW EDMM]) (push) Has been cancelled
SGX Hardware Mode Test / Openvino_test ([self-hosted SGX2-HW]) (push) Has been cancelled
SGX Hardware Mode Test / Grpc_test ([self-hosted SGX2-HW EDMM]) (push) Has been cancelled
SGX Hardware Mode Test / Grpc_test ([self-hosted SGX2-HW]) (push) Has been cancelled
SGX Hardware Mode Test / Init_RA_grpc ([self-hosted SGX2-HW EDMM]) (push) Has been cancelled
SGX Hardware Mode Test / Gvisor_test ([self-hosted SGX2-HW EDMM]) (push) Has been cancelled
SGX Hardware Mode Test / Gvisor_test ([self-hosted SGX2-HW]) (push) Has been cancelled
SGX Hardware Mode Test / Test_deb_deploy ([self-hosted SGX2-HW]) (push) Has been cancelled
SGX Hardware Mode Test / Tensorflow_serving_test ([self-hosted SGX2-HW EDMM]) (push) Has been cancelled
SGX Hardware Mode Test / Tensorflow_serving_test ([self-hosted SGX2-HW]) (push) Has been cancelled
SGX Hardware Mode Test / Remote_attestation_test ([self-hosted SGX2-HW EDMM]) (push) Has been cancelled
SGX Hardware Mode Test / Init_RA_grpc ([self-hosted SGX2-HW]) (push) Has been cancelled
SGX Hardware Mode Test / Init_RA_AECS ([self-hosted SGX2-HW EDMM]) (push) Has been cancelled
SGX Hardware Mode Test / Init_RA_AECS ([self-hosted SGX2-HW]) (push) Has been cancelled
SGX Hardware Mode Test / MySQL_test ([self-hosted SGX2-HW EDMM]) (push) Has been cancelled
SGX Hardware Mode Test / MySQL_test ([self-hosted SGX2-HW]) (push) Has been cancelled
SGX Hardware Mode Test / Stress_test_with_musl ([self-hosted SGX2-HW EDMM]) (push) Has been cancelled
SGX Hardware Mode Test / Stress_test_with_musl ([self-hosted SGX2-HW]) (push) Has been cancelled
SGX Hardware Mode Test / Stress_test_with_glibc ([self-hosted SGX2-HW EDMM]) (push) Has been cancelled
SGX Hardware Mode Test / Stress_test_with_glibc ([self-hosted SGX2-HW]) (push) Has been cancelled
Benchmarks Test / Sysbench_Test ([self-hosted SGX2-HW benchmark]) (push) Has been cancelled
Benchmarks Test / Iperf3_Test ([self-hosted SGX2-HW benchmark]) (push) Has been cancelled
Benchmarks Test for dev branch / Sysbench_Test ([self-hosted SGX2-HW benchmark]) (push) Has been cancelled
Benchmarks Test for dev branch / Iperf3_Test ([self-hosted SGX2-HW benchmark]) (push) Has been cancelled
Benchmarks Test for dev branch / SEFS_FIO_Test ([self-hosted SGX2-HW benchmark]) (push) Has been cancelled
Benchmarks Test for dev branch / AsyncSFS_FIO_Test ([self-hosted SGX2-HW benchmark]) (push) Has been cancelled
fixed occlum_utils library installation scripts
correct repository clone URL

Reviewed-on: SGX/occlum#1
Reviewed-by: Valentyn Faychuk <valy@detee.ltd>
2024-12-03 10:09:37 +00:00
cd594d293d Update README
fixed occlum_utils library installation scripts
correct repository clone URL
2024-12-02 14:36:13 +05:30
fb93be46a2 added utils_lib 2024-10-27 15:51:53 +02:00
98550a1ebc
sealing key generation 2024-10-27 15:51:53 +02:00
ClawSeven
be4de47940 [Demos] Fix broken mnist source in paddlepaddle demo 2024-06-13 12:00:13 +08:00
Qi Zheng
814b573304 [demos] Specify protoc-gen-go-grpc version for go pingpong demo 2024-06-13 12:00:13 +08:00
Qi Zheng
ea6e33e6f1 [demos] Fix flask demo run failure 2024-06-13 12:00:13 +08:00
Qi Zheng
8f9e8d52cb [demos] Fix golang webserver build error 2024-06-13 12:00:13 +08:00
Hui, Chunyang
0c9a44fc60 Add kernel_heap_monitor as the default feature 2024-05-29 14:03:40 +08:00
Qi Zheng
473eec584e Update cargo lock 2024-03-19 10:19:50 +08:00
LI Qing
f9839299b2 Refine the implementation of NiceValue 2024-03-18 15:26:45 +08:00
LI Qing
db3a31d42e Fix the chown syscall with negative id 2024-03-15 14:44:26 +08:00
Hui, Chunyang
6eaad69941 Bump version to 0.30.1 2024-03-14 16:23:34 +08:00
ClawSeven
fb013a2bcd [time] Fix wrong SGX_CPUID leaf 2024-03-14 16:23:34 +08:00
Qi Zheng
faad595225 [deps] Update rust sgx sdk 2024-03-14 16:23:34 +08:00
Hui, Chunyang
2198d9e395 Add "kernel_heap_monitor" feature 2024-03-14 16:23:34 +08:00
ClawSeven
e48cc13f79 [time] Adapt vdso module to SGX1 platform 2024-03-14 16:23:34 +08:00
ClawSeven
b2f721d1bb [crates] Implement vdso for time precision 2024-03-14 16:23:34 +08:00
ClawSeven
9404da7cf8 [crates] Seperate error module into errno crate 2024-03-14 16:23:34 +08:00
Qi Zheng
f9b53dc410 [toolchains] Support set glibc branch for Occlum glibc build 2024-03-14 16:23:34 +08:00
Hui, Chunyang
d95199ace5 Fix feature configuration for make test 2024-03-14 16:23:34 +08:00
Hui, Chunyang
0b93c187f4 Remove reuse actions for release image 2024-03-14 16:23:34 +08:00
Hui, Chunyang
f46dbbad11 Refine CI actions to reuse old image 2024-03-14 16:23:34 +08:00
Hui, Chunyang
4ce27ae5c9 Fix AECS client version mismatch 2024-03-14 16:23:34 +08:00
Hui, Chunyang
aae9b6d940 Use special exception register and Replace sgx_tprotect_rsrv_mem with low leve API 2024-03-14 16:23:34 +08:00
Hui, Chunyang
ca4bcbf8fe Use low level API to replace sgx_mm_(commit/commit_data/modify_permissions)
Reduce the EMA management overhead and the global lock of emm module of
Intel SGX SDK
2024-03-14 16:23:34 +08:00
ClawSeven
2b1a9b960a [libos] Fix rt_sigaction wrong inargument 2024-03-14 16:23:34 +08:00
ClawSeven
e9f2c09012 [libos] Fix readlinkat with non-positive bufsize 2024-03-14 16:23:34 +08:00
ClawSeven
c2296c13d2 [libos] Fix sendfile with non-writable file 2024-03-14 16:23:34 +08:00
Hui, Chunyang
ee77ee618b Fix permission violation check for mmap and mprotect 2024-03-14 16:23:34 +08:00
Hui, Chunyang
e637ddbdfe Fix mmap file flush exceeding the file length 2024-03-14 16:23:34 +08:00
Hui, Chunyang
bf8d6a65f0 Add AMX and EDMM as feature in Occlum.json 2024-03-14 16:23:34 +08:00
Hui, Chunyang
836513687a Refine log for #UD exception and file open 2024-03-14 16:23:34 +08:00
Hui, Chunyang
ad6cab55f9 Refine log for VM module 2024-03-14 16:23:34 +08:00
ClawSeven
c465e7782a [libos] Fix clock_getres return successfully with wrong clock id 2024-03-14 16:23:34 +08:00
Qi Zheng
41e62ae982 [demos] Update Linux LTP test demo 2024-03-14 16:23:34 +08:00
Qi Zheng
b746fea82b [libos] Add iov buffer check for readv and writev
Signed-off-by: Qi Zheng <huaiqing.zq@antgroup.com>
2024-03-14 16:23:34 +08:00
Qi Zheng
dc060c57b4 Update unsupported syscall table 2024-03-14 16:23:34 +08:00
Qi Zheng
ce2cbccdb8 [readthedocs] Update occlum commands chapter 2024-03-14 16:23:34 +08:00
Qi Zheng
beeffcced7 [libos] faccessat only accepts three parameters 2024-03-14 16:23:34 +08:00
ClawSeven
47ac767886 [libos] Lower sigsuspend error log level 2024-03-14 16:23:34 +08:00
Qi Zheng
bc7096815d [readthedocs] Description for Occlum log config option 2024-03-14 16:23:34 +08:00
Qi Zheng
a7317b0aa9 [libos] Add disable_log cfg option 2024-03-14 16:23:34 +08:00
ClawSeven
065c367b37 [libos] Fix deadlock in signal implementions 2024-03-14 16:23:34 +08:00
ClawSeven
2a801e5fec [test] Implement ut for sigsuspend 2024-03-14 16:23:34 +08:00
ClawSeven
1147e6956f [libos] Implement the rt_sigsuspend syscall 2024-03-14 16:23:34 +08:00
ClawSeven
382bc812f1 [test] Implement unit test for pselect 2024-03-14 16:23:34 +08:00
ClawSeven
56528f67da [libos] Implement Pselect syscall with sigset 2024-03-14 16:23:34 +08:00
Hui, Chunyang
4d2ba8ca01 Refine error level log 2024-03-14 16:23:34 +08:00
Shaowei Song
69a8d078a5 [ci] Enable features on hw ci for EDMM 2024-03-14 16:23:34 +08:00
Shaowei Song
76edc08233 [config] Add "feature" field to Occlum.json 2024-03-14 16:23:34 +08:00
Shaowei Song
5efc54cb81 [vm] Refine shared chunk expansion 2024-03-14 16:23:34 +08:00
ClawSeven
6c8c8fc871 Reduce error log by downgrading unnecessary error! to warn! 2024-03-14 16:23:34 +08:00
Qi Zheng
367fa9c4ce [demos] CUDA torch python packages are not required for CPU inference 2024-03-14 16:23:34 +08:00
ClawSeven
5043797bc8 Change netty ut java version to jdk8 2024-03-14 16:23:34 +08:00
Qi Zheng
37c56c8b81 [test] SIGSTKSZ is not constant in glibc>2.34 2024-03-14 16:23:34 +08:00
Qi Zheng
3fb86f96c4 [libos] Update cpuid leaf table 2024-03-14 16:23:34 +08:00
Qi Zheng
cb1ee85605 [toolchain] Update grpc_ratls toolchain with patching way
Signed-off-by: Qi Zheng <huaiqing.zq@antgroup.com>
2024-03-14 16:23:34 +08:00
LI Qing
a82cfb87f0 Fix the issue about handling the AT_EMPTY_PATH flag 2024-03-14 16:23:34 +08:00
195 changed files with 7724 additions and 19875 deletions

@ -5,7 +5,6 @@ name: Build Image for CI (Manual Trigger)
# GVisor syscall test also compiles slow. It needs its own image. The images
# are stored in "occlumbackup/occlum" dockerhub repo.
# This is a manual trigger.
on:
workflow_dispatch:
@ -14,11 +13,9 @@ on:
description: 'image name (must choose from <grpc, gvisor_test, openvino, python, tf_serving>)'
required: true
default: 'grpc'
tag:
description: 'image tag'
required: true
default: 'latest'
reuse_image:
description: 'admin can choose to reuse an old image as the new CI image. set reuse image name, e.g.:0.30.0, 0.30.1-rc'
required: false
jobs:
Build_grpc_image:
@ -57,7 +54,9 @@ jobs:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
# Rebuild the image
- name: Build and push
if: "${{ github.event.inputs.reuse_image == '' }}"
uses: docker/build-push-action@v2
with:
context: .
@ -67,7 +66,15 @@ jobs:
"OCCLUM_VERSION=${{ env.OCCLUM_VERSION }}"
"OCCLUM_BRANCH=${{ env.OCCLUM_BRANCH }}"
push: true
tags: occlumbackup/occlum:${{ github.event.inputs.tag }}-ubuntu20.04-grpc
tags: occlumbackup/occlum:${{ env.OCCLUM_VERSION }}-ubuntu20.04-grpc
# Reuse the old image
- name: Reuse the old image
if: "${{ github.event.inputs.reuse_image != '' }}"
run: |
docker pull occlumbackup/occlum:${{ github.event.inputs.reuse_image }}-ubuntu20.04-grpc
docker tag occlumbackup/occlum:${{ github.event.inputs.reuse_image }}-ubuntu20.04-grpc occlumbackup/occlum:${{ env.OCCLUM_VERSION }}-ubuntu20.04-grpc
docker push occlumbackup/occlum:${{ env.OCCLUM_VERSION }}-ubuntu20.04-grpc
Build_gvisor_test_image:
@ -100,7 +107,9 @@ jobs:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
# Rebuild the image
- name: Build and push
if: "${{ github.event.inputs.reuse_image == '' }}"
uses: docker/build-push-action@v2
with:
context: .
@ -110,7 +119,15 @@ jobs:
"OCCLUM_VERSION=${{ env.OCCLUM_VERSION }}"
"OCCLUM_BRANCH=${{ env.OCCLUM_BRANCH }}"
push: true
tags: occlumbackup/occlum:${{ github.event.inputs.tag }}-ubuntu20.04-gvisor_test
tags: occlumbackup/occlum:${{ env.OCCLUM_VERSION }}-ubuntu20.04-gvisor_test
# Reuse the old image
- name: Reuse the old image
if: "${{ github.event.inputs.reuse_image != '' }}"
run: |
docker pull occlumbackup/occlum:${{ github.event.inputs.reuse_image }}-ubuntu20.04-gvisor_test
docker tag occlumbackup/occlum:${{ github.event.inputs.reuse_image }}-ubuntu20.04-gvisor_test occlumbackup/occlum:${{ env.OCCLUM_VERSION }}-ubuntu20.04-gvisor_test
docker push occlumbackup/occlum:${{ env.OCCLUM_VERSION }}-ubuntu20.04-gvisor_test
Build_openvino_image:
@ -148,7 +165,9 @@ jobs:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
# Rebuild the image
- name: Build and push
if: "${{ github.event.inputs.reuse_image == '' }}"
uses: docker/build-push-action@v2
with:
context: .
@ -158,7 +177,15 @@ jobs:
"OCCLUM_VERSION=${{ env.OCCLUM_VERSION }}"
"OCCLUM_BRANCH=${{ env.OCCLUM_BRANCH }}"
push: true
tags: occlumbackup/occlum:${{ github.event.inputs.tag }}-ubuntu20.04-openvino
tags: occlumbackup/occlum:${{ env.OCCLUM_VERSION }}-ubuntu20.04-openvino
# Reuse the old image
- name: Reuse the old image
if: "${{ github.event.inputs.reuse_image != '' }}"
run: |
docker pull occlumbackup/occlum:${{ github.event.inputs.reuse_image }}-ubuntu20.04-openvino
docker tag occlumbackup/occlum:${{ github.event.inputs.reuse_image }}-ubuntu20.04-openvino occlumbackup/occlum:${{ env.OCCLUM_VERSION }}-ubuntu20.04-openvino
docker push occlumbackup/occlum:${{ env.OCCLUM_VERSION }}-ubuntu20.04-openvino
Build_python_image:
@ -196,7 +223,9 @@ jobs:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
# Rebuild the image
- name: Build and push
if: "${{ github.event.inputs.reuse_image == '' }}"
uses: docker/build-push-action@v2
with:
context: .
@ -206,7 +235,15 @@ jobs:
"OCCLUM_VERSION=${{ env.OCCLUM_VERSION }}"
"OCCLUM_BRANCH=${{ env.OCCLUM_BRANCH }}"
push: true
tags: occlumbackup/occlum:${{ github.event.inputs.tag }}-ubuntu20.04-python
tags: occlumbackup/occlum:${{ env.OCCLUM_VERSION }}-ubuntu20.04-python
# Reuse the old image
- name: Reuse the old image
if: "${{ github.event.inputs.reuse_image != '' }}"
run: |
docker pull occlumbackup/occlum:${{ github.event.inputs.reuse_image }}-ubuntu20.04-python
docker tag occlumbackup/occlum:${{ github.event.inputs.reuse_image }}-ubuntu20.04-python occlumbackup/occlum:${{ env.OCCLUM_VERSION }}-ubuntu20.04-python
docker push occlumbackup/occlum:${{ env.OCCLUM_VERSION }}-ubuntu20.04-python
# TODO: Add actions to build tf_serving_base image.
@ -255,7 +292,9 @@ jobs:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
# Rebuild the image
- name: Build and push
if: "${{ github.event.inputs.reuse_image == '' }}"
uses: docker/build-push-action@v2
with:
context: .
@ -265,4 +304,12 @@ jobs:
"OCCLUM_VERSION=${{ env.OCCLUM_VERSION }}"
"OCCLUM_BRANCH=${{ env.OCCLUM_BRANCH }}"
push: true
tags: occlumbackup/occlum:${{ github.event.inputs.tag }}-ubuntu20.04-tf_serving
tags: occlumbackup/occlum:${{ env.OCCLUM_VERSION }}-ubuntu20.04-tf_serving
# Reuse the old image
- name: Reuse the old image
if: "${{ github.event.inputs.reuse_image != '' }}"
run: |
docker pull occlumbackup/occlum:${{ github.event.inputs.reuse_image }}-ubuntu20.04-tf_serving
docker tag occlumbackup/occlum:${{ github.event.inputs.reuse_image }}-ubuntu20.04-tf_serving occlumbackup/occlum:${{ env.OCCLUM_VERSION }}-ubuntu20.04-tf_serving
docker push occlumbackup/occlum:${{ env.OCCLUM_VERSION }}-ubuntu20.04-tf_serving

@ -72,6 +72,7 @@ jobs:
push: true
tags: occlum/occlum:${{ env.IMAGE_TAG }}-anolis8.8
generate-ubuntu20-image:
runs-on: ubuntu-20.04
if: github.event.inputs.OS == 'ubuntu20'

@ -98,7 +98,7 @@ jobs:
- name: LTP test
run: docker exec code_coverage bash -c "cd /root/occlum/demos/linux-ltp && ./dl_and_build_ltp.sh && SGX_MODE=SIM ./prepare_ltp.sh;
cd ltp_instance;
cd occlum_instance;
occlum run /opt/ltp/run-ltp.sh -f syscalls-occlum;
cd /root/occlum/demos && rm -rf ./linux-ltp"

@ -42,11 +42,7 @@ runs:
if [[ "${{ matrix.self_runner[1] }}" == "SGX1-HW" ]]; then
docker run -itd --name=${{ env.CONTAINER_NAME }} ${{ inputs.container-run-params }} --privileged --rm --env CARGO_HTTP_MULTIPLEXING=false --device /dev/isgx -v $GITHUB_WORKSPACE:/root/occlum occlum/occlum:${{ env.OCCLUM_VERSION }}-${{ inputs.os }};
elif [[ "${{ matrix.self_runner[1] }}" == "SGX2-HW" ]]; then
if [[ "${{ matrix.self_runner[2] }}" == "EDMM" ]]; then
docker run -itd --name=${{ env.CONTAINER_NAME }} ${{ inputs.container-run-params }} --privileged --rm --env CARGO_HTTP_MULTIPLEXING=false --env ENABLE_EDMM=Y -v /dev/sgx:/dev/sgx -v $GITHUB_WORKSPACE:/root/occlum occlum/occlum:${{ env.OCCLUM_VERSION }}-${{ inputs.os }};
else
docker run -itd --name=${{ env.CONTAINER_NAME }} ${{ inputs.container-run-params }} --privileged --rm --env CARGO_HTTP_MULTIPLEXING=false -v /dev/sgx:/dev/sgx -v $GITHUB_WORKSPACE:/root/occlum occlum/occlum:${{ env.OCCLUM_VERSION }}-${{ inputs.os }};
fi
docker run -itd --name=${{ env.CONTAINER_NAME }} ${{ inputs.container-run-params }} --privileged --rm --env CARGO_HTTP_MULTIPLEXING=false -v /dev/sgx:/dev/sgx -v $GITHUB_WORKSPACE:/root/occlum occlum/occlum:${{ env.OCCLUM_VERSION }}-${{ inputs.os }};
else
echo "Unsupported Hardware"
fi;
@ -87,3 +83,10 @@ runs:
run: docker exec ${{ env.CONTAINER_NAME }} bash -c "source /opt/intel/sgxsdk/environment; cd /root/occlum; ${{ inputs.build-envs}} make install"
shell: bash
# When there comes new features, the configuration should be enabled accordingly
- name: Configure Occlum features
run: |
if [[ "${{ matrix.self_runner[2] }}" == "EDMM" ]]; then
docker exec ${{ env.CONTAINER_NAME }} bash -c "jq '.feature.enable_posix_shm = true | .feature.enable_edmm = true' /opt/occlum/etc/template/Occlum.json > /tmp.json && mv /tmp.json /opt/occlum/etc/template/Occlum.json"
fi;
shell: bash

@ -82,8 +82,7 @@ jobs:
run: docker exec ${{ github.job }} bash -c "cd /root/occlum/tools/toolchains/golang && ./build.sh go1.18.4_for_occlum && cd /root/occlum/demos/golang/go_sqlite/ && SGX_MODE=SIM ./run_go_sqlite_demo.sh"
- name: Go Server set up and run
run: docker exec ${{ github.job }} bash -c "cd /root/occlum/demos/golang/web_server && occlum-go mod init web_server && occlum-go get -u -v github.com/gin-gonic/gin;
occlum-go build -o web_server ./web_server.go;
run: docker exec ${{ github.job }} bash -c "cd /root/occlum/demos/golang/web_server && ./build.sh;
SGX_MODE=SIM ./run_golang_on_occlum.sh" &
- name: Set up Golang grpc pingpong test
@ -473,8 +472,11 @@ jobs:
with:
submodules: true
- name: Get occlum version
run: echo "OCCLUM_VERSION=$(grep 'Version =' src/pal/include/occlum_version.h | awk '{print $4}')" >> $GITHUB_ENV;
- name: Create container
run: docker run -itd --name=${{ github.job }} -v $GITHUB_WORKSPACE:/root/occlum occlumbackup/occlum:latest-ubuntu20.04-grpc
run: docker run -itd --name=${{ github.job }} -v $GITHUB_WORKSPACE:/root/occlum occlumbackup/occlum:${{ env.OCCLUM_VERSION }}-ubuntu20.04-grpc
- uses: ./.github/workflows/composite_action/prebuild
with:
@ -514,8 +516,11 @@ jobs:
with:
submodules: true
- name: Get occlum version
run: echo "OCCLUM_VERSION=$(grep 'Version =' src/pal/include/occlum_version.h | awk '{print $4}')" >> $GITHUB_ENV;
- name: Create container
run: docker run -itd --name=${{ github.job }} -v $GITHUB_WORKSPACE:/root/occlum occlumbackup/occlum:latest-ubuntu20.04-grpc
run: docker run -itd --name=${{ github.job }} -v $GITHUB_WORKSPACE:/root/occlum occlumbackup/occlum:${{ env.OCCLUM_VERSION }}-ubuntu20.04-grpc
- uses: ./.github/workflows/composite_action/prebuild
with:
@ -600,8 +605,11 @@ jobs:
with:
submodules: true
- name: Get occlum version
run: echo "OCCLUM_VERSION=$(grep 'Version =' src/pal/include/occlum_version.h | awk '{print $4}')" >> $GITHUB_ENV;
- name: Create container
run: docker run -itd --name=${{ github.job }} -v $GITHUB_WORKSPACE:/root/occlum occlumbackup/occlum:latest-ubuntu20.04-openvino
run: docker run -itd --name=${{ github.job }} -v $GITHUB_WORKSPACE:/root/occlum occlumbackup/occlum:${{ env.OCCLUM_VERSION }}-ubuntu20.04-openvino
- uses: ./.github/workflows/composite_action/prebuild
with:
@ -633,8 +641,12 @@ jobs:
- uses: actions/checkout@v1
with:
submodules: true
- name: Get occlum version
run: echo "OCCLUM_VERSION=$(grep 'Version =' src/pal/include/occlum_version.h | awk '{print $4}')" >> $GITHUB_ENV;
- name: Create container
run: docker run -itd --name=${{ github.job }} -v $GITHUB_WORKSPACE:/root/occlum occlumbackup/occlum:latest-ubuntu20.04-python
run: docker run -itd --name=${{ github.job }} -v $GITHUB_WORKSPACE:/root/occlum occlumbackup/occlum:${{ env.OCCLUM_VERSION }}-ubuntu20.04-python
- uses: ./.github/workflows/composite_action/prebuild
with:
@ -940,7 +952,7 @@ jobs:
run: docker exec ${{ github.job }} bash -c "cd /root/occlum/demos/netty_ut && ./build.sh"
- name: Run netty unit test demo
run: docker exec ${{ github.job }} bash -c "cd /root/occlum/demos/netty_ut && SGX_MODE=SIM ./run_netty_ut_jdk11.sh"
run: docker exec ${{ github.job }} bash -c "cd /root/occlum/demos/netty_ut && SGX_MODE=SIM ./run_netty_ut_jdk8.sh"
- name: Clean Netty test
run: docker exec ${{ github.job }} bash -c "cd /root/occlum/demos && rm -rf ./netty_ut"
@ -1019,10 +1031,13 @@ jobs:
with:
submodules: true
- name: Get occlum version
run: echo "OCCLUM_VERSION=$(grep 'Version =' src/pal/include/occlum_version.h | awk '{print $4}')" >> $GITHUB_ENV;
- name: Create container
run: |
docker pull occlumbackup/occlum:latest-ubuntu20.04-gvisor_test
gvisor_test=$(docker run -itd -v $GITHUB_WORKSPACE:/root/occlum occlumbackup/occlum:latest-ubuntu20.04-gvisor_test);
docker pull occlumbackup/occlum:${{ env.OCCLUM_VERSION }}-ubuntu20.04-gvisor_test
gvisor_test=$(docker run -itd -v $GITHUB_WORKSPACE:/root/occlum occlumbackup/occlum:${{ env.OCCLUM_VERSION }}-ubuntu20.04-gvisor_test);
echo "gvisor_test=$gvisor_test" >> $GITHUB_ENV
- uses: ./.github/workflows/composite_action/prebuild
@ -1144,7 +1159,7 @@ jobs:
run: docker exec ${{ github.job }} bash -c "cd /root/occlum/demos/linux-ltp && SGX_MODE=SIM ./prepare_ltp.sh"
- name: Run the LTP demo
run: docker exec ${{ github.job }} bash -c "cd /root/occlum/demos/linux-ltp/ltp_instance;
run: docker exec ${{ github.job }} bash -c "cd /root/occlum/demos/linux-ltp/occlum_instance;
occlum run /opt/ltp/run-ltp.sh -f syscalls-occlum"
- name: Clean LTP test

@ -54,6 +54,15 @@ jobs:
container-name: ${{ github.job }}
build-envs: 'OCCLUM_RELEASE_BUILD=1'
# Udpate the test json file
# When there comes new features, the configuration should be enabled accordingly
- name: Configure Occlum features
run: |
if [[ "${{ matrix.self_runner[2] }}" == "EDMM" ]]; then
docker exec ${{ env.CONTAINER_NAME }} bash -c "jq '.feature.enable_posix_shm = true | .feature.enable_edmm = true' /root/occlum/test/Occlum.json > /tmp.json && mv /tmp.json /root/occlum/test/Occlum.json"
fi;
shell: bash
- name: Integration test
run: docker exec ${{ env.CONTAINER_NAME }} bash -c "cd /root/occlum; OCCLUM_LOG_LEVEL=trace make test"
@ -133,8 +142,7 @@ jobs:
- name: Go server set up and run
run: docker exec ${{ env.CONTAINER_NAME }} bash -c "export GO111MODULE=on;
cd /root/occlum/demos/golang/web_server && occlum-go mod init web_server && occlum-go get -u -v github.com/gin-gonic/gin;
occlum-go build -o web_server ./web_server.go;
cd /root/occlum/demos/golang/web_server && ./build.sh;
./run_golang_on_occlum.sh" &
- name: Set up Golang grpc pingpong test
@ -441,16 +449,19 @@ jobs:
ref: refs/pull/${{ github.event.pull_request.number }}/merge
submodules: true
- name: Get occlum version
run: echo "OCCLUM_VERSION=$(grep 'Version =' src/pal/include/occlum_version.h | awk '{print $4}')" >> $GITHUB_ENV;
- name: Create container
run: |
docker pull occlumbackup/occlum:latest-ubuntu20.04-python
docker pull occlumbackup/occlum:${{ env.OCCLUM_VERSION }}-ubuntu20.04-python
if [[ "${{ matrix.self_runner[1] }}" == "SGX1-HW" ]]; then
python_musl_support_test=$(docker run -itd --privileged --rm --env CARGO_HTTP_MULTIPLEXING=false --net host --device /dev/isgx -v $GITHUB_WORKSPACE:/root/occlum occlumbackup/occlum:latest-ubuntu20.04-python);
python_musl_support_test=$(docker run -itd --privileged --rm --env CARGO_HTTP_MULTIPLEXING=false --net host --device /dev/isgx -v $GITHUB_WORKSPACE:/root/occlum occlumbackup/occlum:${{ env.OCCLUM_VERSION }}-ubuntu20.04-python);
elif [[ "${{ matrix.self_runner[1] }}" == "SGX2-HW" ]]; then
if [[ "${{ matrix.self_runner[2] }}" == "EDMM" ]]; then
python_musl_support_test=$(docker run -itd --privileged --rm --env CARGO_HTTP_MULTIPLEXING=false --env ENABLE_EDMM=Y --net host -v /dev/sgx:/dev/sgx -v $GITHUB_WORKSPACE:/root/occlum occlumbackup/occlum:latest-ubuntu20.04-python);
python_musl_support_test=$(docker run -itd --privileged --rm --env CARGO_HTTP_MULTIPLEXING=false --env ENABLE_EDMM=Y --net host -v /dev/sgx:/dev/sgx -v $GITHUB_WORKSPACE:/root/occlum occlumbackup/occlum:${{ env.OCCLUM_VERSION }}-ubuntu20.04-python);
else
python_musl_support_test=$(docker run -itd --privileged --rm --env CARGO_HTTP_MULTIPLEXING=false --net host -v /dev/sgx:/dev/sgx -v $GITHUB_WORKSPACE:/root/occlum occlumbackup/occlum:latest-ubuntu20.04-python);
python_musl_support_test=$(docker run -itd --privileged --rm --env CARGO_HTTP_MULTIPLEXING=false --net host -v /dev/sgx:/dev/sgx -v $GITHUB_WORKSPACE:/root/occlum occlumbackup/occlum:${{ env.OCCLUM_VERSION }}-ubuntu20.04-python);
fi
else
echo "Unsupported Hardware"
@ -516,16 +527,19 @@ jobs:
ref: refs/pull/${{ github.event.pull_request.number }}/merge
submodules: true
- name: Get occlum version
run: echo "OCCLUM_VERSION=$(grep 'Version =' src/pal/include/occlum_version.h | awk '{print $4}')" >> $GITHUB_ENV;
- name: Create container
run: |
docker pull occlumbackup/occlum:latest-ubuntu20.04-openvino
docker pull occlumbackup/occlum:${{ env.OCCLUM_VERSION }}-ubuntu20.04-openvino
if [[ "${{ matrix.self_runner[1] }}" == "SGX1-HW" ]]; then
openvino_test=$(docker run -itd --privileged --rm --env CARGO_HTTP_MULTIPLEXING=false --net host --device /dev/isgx -v $GITHUB_WORKSPACE:/root/occlum occlumbackup/occlum:latest-ubuntu20.04-openvino);
openvino_test=$(docker run -itd --privileged --rm --env CARGO_HTTP_MULTIPLEXING=false --net host --device /dev/isgx -v $GITHUB_WORKSPACE:/root/occlum occlumbackup/occlum:${{ env.OCCLUM_VERSION }}-ubuntu20.04-openvino);
elif [[ "${{ matrix.self_runner[1] }}" == "SGX2-HW" ]]; then
if [[ "${{ matrix.self_runner[2] }}" == "EDMM" ]]; then
openvino_test=$(docker run -itd --privileged --rm --env CARGO_HTTP_MULTIPLEXING=false --env ENABLE_EDMM=Y --net host -v /dev/sgx:/dev/sgx -v $GITHUB_WORKSPACE:/root/occlum occlumbackup/occlum:latest-ubuntu20.04-openvino);
openvino_test=$(docker run -itd --privileged --rm --env CARGO_HTTP_MULTIPLEXING=false --env ENABLE_EDMM=Y --net host -v /dev/sgx:/dev/sgx -v $GITHUB_WORKSPACE:/root/occlum occlumbackup/occlum:${{ env.OCCLUM_VERSION }}-ubuntu20.04-openvino);
else
openvino_test=$(docker run -itd --privileged --rm --env CARGO_HTTP_MULTIPLEXING=false --net host -v /dev/sgx:/dev/sgx -v $GITHUB_WORKSPACE:/root/occlum occlumbackup/occlum:latest-ubuntu20.04-openvino);
openvino_test=$(docker run -itd --privileged --rm --env CARGO_HTTP_MULTIPLEXING=false --net host -v /dev/sgx:/dev/sgx -v $GITHUB_WORKSPACE:/root/occlum occlumbackup/occlum:${{ env.OCCLUM_VERSION }}-ubuntu20.04-openvino);
fi
else
echo "Unsupported Hardware"
@ -588,16 +602,19 @@ jobs:
ref: refs/pull/${{ github.event.pull_request.number }}/merge
submodules: true
- name: Get occlum version
run: echo "OCCLUM_VERSION=$(grep 'Version =' src/pal/include/occlum_version.h | awk '{print $4}')" >> $GITHUB_ENV;
- name: Create container
run: |
docker pull occlumbackup/occlum:latest-ubuntu20.04-grpc
docker pull occlumbackup/occlum:${{ env.OCCLUM_VERSION }}-ubuntu20.04-grpc
if [[ "${{ matrix.self_runner[1] }}" == "SGX1-HW" ]]; then
grpc_test=$(docker run -itd --privileged --rm --env CARGO_HTTP_MULTIPLEXING=false --net host --device /dev/isgx -v $GITHUB_WORKSPACE:/root/occlum occlumbackup/occlum:latest-ubuntu20.04-grpc);
grpc_test=$(docker run -itd --privileged --rm --env CARGO_HTTP_MULTIPLEXING=false --net host --device /dev/isgx -v $GITHUB_WORKSPACE:/root/occlum occlumbackup/occlum:${{ env.OCCLUM_VERSION }}-ubuntu20.04-grpc);
elif [[ "${{ matrix.self_runner[1] }}" == "SGX2-HW" ]]; then
if [[ "${{ matrix.self_runner[2] }}" == "EDMM" ]]; then
grpc_test=$(docker run -itd --privileged --rm --env CARGO_HTTP_MULTIPLEXING=false --env ENABLE_EDMM=Y --net host -v /dev/sgx:/dev/sgx -v $GITHUB_WORKSPACE:/root/occlum occlumbackup/occlum:latest-ubuntu20.04-grpc);
grpc_test=$(docker run -itd --privileged --rm --env CARGO_HTTP_MULTIPLEXING=false --env ENABLE_EDMM=Y --net host -v /dev/sgx:/dev/sgx -v $GITHUB_WORKSPACE:/root/occlum occlumbackup/occlum:${{ env.OCCLUM_VERSION }}-ubuntu20.04-grpc);
else
grpc_test=$(docker run -itd --privileged --rm --env CARGO_HTTP_MULTIPLEXING=false --net host -v /dev/sgx:/dev/sgx -v $GITHUB_WORKSPACE:/root/occlum occlumbackup/occlum:latest-ubuntu20.04-grpc);
grpc_test=$(docker run -itd --privileged --rm --env CARGO_HTTP_MULTIPLEXING=false --net host -v /dev/sgx:/dev/sgx -v $GITHUB_WORKSPACE:/root/occlum occlumbackup/occlum:${{ env.OCCLUM_VERSION }}-ubuntu20.04-grpc);
fi
else
echo "Unsupported Hardware"
@ -682,16 +699,19 @@ jobs:
ref: refs/pull/${{ github.event.pull_request.number }}/merge
submodules: true
- name: Get occlum version
run: echo "OCCLUM_VERSION=$(grep 'Version =' src/pal/include/occlum_version.h | awk '{print $4}')" >> $GITHUB_ENV;
- name: Create container
run: |
docker pull occlumbackup/occlum:latest-ubuntu20.04-gvisor_test
docker pull occlumbackup/occlum:${{ env.OCCLUM_VERSION }}-ubuntu20.04-gvisor_test
if [[ "${{ matrix.self_runner[1] }}" == "SGX1-HW" ]]; then
gvisor_test=$(docker run -itd --privileged --rm --env CARGO_HTTP_MULTIPLEXING=false --net host --device /dev/isgx -v $GITHUB_WORKSPACE:/root/occlum occlumbackup/occlum:latest-ubuntu20.04-gvisor_test);
gvisor_test=$(docker run -itd --privileged --rm --env CARGO_HTTP_MULTIPLEXING=false --net host --device /dev/isgx -v $GITHUB_WORKSPACE:/root/occlum occlumbackup/occlum:${{ env.OCCLUM_VERSION }}-ubuntu20.04-gvisor_test);
elif [[ "${{ matrix.self_runner[1] }}" == "SGX2-HW" ]]; then
if [[ "${{ matrix.self_runner[2] }}" == "EDMM" ]]; then
gvisor_test=$(docker run -itd --privileged --rm --env CARGO_HTTP_MULTIPLEXING=false --env ENABLE_EDMM=Y --net host -v /dev/sgx:/dev/sgx -v $GITHUB_WORKSPACE:/root/occlum occlumbackup/occlum:latest-ubuntu20.04-gvisor_test);
gvisor_test=$(docker run -itd --privileged --rm --env CARGO_HTTP_MULTIPLEXING=false --env ENABLE_EDMM=Y --net host -v /dev/sgx:/dev/sgx -v $GITHUB_WORKSPACE:/root/occlum occlumbackup/occlum:${{ env.OCCLUM_VERSION }}-ubuntu20.04-gvisor_test);
else
gvisor_test=$(docker run -itd --privileged --rm --env CARGO_HTTP_MULTIPLEXING=false --net host -v /dev/sgx:/dev/sgx -v $GITHUB_WORKSPACE:/root/occlum occlumbackup/occlum:latest-ubuntu20.04-gvisor_test);
gvisor_test=$(docker run -itd --privileged --rm --env CARGO_HTTP_MULTIPLEXING=false --net host -v /dev/sgx:/dev/sgx -v $GITHUB_WORKSPACE:/root/occlum occlumbackup/occlum:${{ env.OCCLUM_VERSION }}-ubuntu20.04-gvisor_test);
fi
else
echo "Unsupported Hardware"
@ -836,16 +856,19 @@ jobs:
ref: refs/pull/${{ github.event.pull_request.number }}/merge
submodules: true
- name: Get occlum version
run: echo "OCCLUM_VERSION=$(grep 'Version =' src/pal/include/occlum_version.h | awk '{print $4}')" >> $GITHUB_ENV;
- name: Create container
run: |
docker pull occlumbackup/occlum:latest-ubuntu20.04-tf_serving
docker pull occlumbackup/occlum:${{ env.OCCLUM_VERSION }}-ubuntu20.04-tf_serving
if [[ "${{ matrix.self_runner[1] }}" == "SGX1-HW" ]]; then
tf_serving_test=$(docker run -itd --privileged --rm --env CARGO_HTTP_MULTIPLEXING=false --net host --device /dev/isgx -v $GITHUB_WORKSPACE:/root/occlum occlumbackup/occlum:latest-ubuntu20.04-tf_serving);
tf_serving_test=$(docker run -itd --privileged --rm --env CARGO_HTTP_MULTIPLEXING=false --net host --device /dev/isgx -v $GITHUB_WORKSPACE:/root/occlum occlumbackup/occlum:${{ env.OCCLUM_VERSION }}-ubuntu20.04-tf_serving);
elif [[ "${{ matrix.self_runner[1] }}" == "SGX2-HW" ]]; then
if [[ "${{ matrix.self_runner[2] }}" == "EDMM" ]]; then
tf_serving_test=$(docker run -itd --privileged --rm --env CARGO_HTTP_MULTIPLEXING=false --env ENABLE_EDMM=Y --net host -v /dev/sgx:/dev/sgx -v $GITHUB_WORKSPACE:/root/occlum occlumbackup/occlum:latest-ubuntu20.04-tf_serving);
tf_serving_test=$(docker run -itd --privileged --rm --env CARGO_HTTP_MULTIPLEXING=false --env ENABLE_EDMM=Y --net host -v /dev/sgx:/dev/sgx -v $GITHUB_WORKSPACE:/root/occlum occlumbackup/occlum:${{ env.OCCLUM_VERSION }}-ubuntu20.04-tf_serving);
else
tf_serving_test=$(docker run -itd --privileged --rm --env CARGO_HTTP_MULTIPLEXING=false --net host -v /dev/sgx:/dev/sgx -v $GITHUB_WORKSPACE:/root/occlum occlumbackup/occlum:latest-ubuntu20.04-tf_serving);
tf_serving_test=$(docker run -itd --privileged --rm --env CARGO_HTTP_MULTIPLEXING=false --net host -v /dev/sgx:/dev/sgx -v $GITHUB_WORKSPACE:/root/occlum occlumbackup/occlum:${{ env.OCCLUM_VERSION }}-ubuntu20.04-tf_serving);
fi
else
echo "Unsupported Hardware"
@ -1128,7 +1151,7 @@ jobs:
run: docker exec ${{ env.CONTAINER_NAME }} bash -c "cd /root/occlum/demos/netty_ut && ./build.sh"
- name: Run netty unit test demo
run: docker exec ${{ env.CONTAINER_NAME }} bash -c "cd /root/occlum/demos/netty_ut && ./run_netty_ut_jdk11.sh"
run: docker exec ${{ env.CONTAINER_NAME }} bash -c "cd /root/occlum/demos/netty_ut && ./run_netty_ut_jdk8.sh"
- name: Clean the environment
if: ${{ always() }}

@ -68,6 +68,12 @@ jobs:
if: ${{ failure() }}
run: docker exec ${{ github.job }} bash -c "cat /root/occlum/build/test/.fail"
- name: Integration test with optional Occlum features
run: |
docker exec ${{ github.job }} bash -c 'source /opt/intel/sgxsdk/environment; cd /root/occlum; make clean && LIBOS_FEATURES="kernel_heap_monitor" make install'
docker exec ${{ github.job }} bash -c "cd /root/occlum; SGX_MODE=SIM make test-glibc"
shell: bash
# Make_test_on_centos:
# runs-on: ubuntu-18.04

6
.gitignore vendored

@ -6,3 +6,9 @@ build/
.DS_Store
src/libos/target/
tools/toolchains/dcap_lib/target/
# Added by DeTEE
**/target
**/Cargo.lock
.idea

@ -64,3 +64,27 @@ Thanks go to [all these wonderful contributors to this project](CONTRIBUTORS.md)
## License
Occlum is released under BSD License. See the copyright information [here](LICENSE).
## DeTEE
Occlum is a part of the DeTEE project. DeTEE is a research project that aims to provide a secure and efficient computing environment for data-intensive applications.
```bash
# Run the occlum ubuntu 20.04 docker container
docker run --device /dev/sgx/enclave --device /dev/sgx/provision --rm --name valytest -it -v /home/vfaychuk:/root/vfaychuk occlum/occlum:latest-ubuntu20.04
# inside the container run the following commands
apt update && apt install -y ssh-client
mkdir -p /root/.ssh && vim /root/.ssh/config
#Host gitea.detee.cloud
# IdentityFile ~/.ssh/gitea_ed25519
vim /root/.ssh/gitea_ed25519
# put the server private key to download the repo
chown -R root:root /root/.ssh
chmod 600 /root/.ssh/gitea_ed25519
ssh-keyscan -H gitea.detee.cloud > ~/.ssh/known_hosts
git clone git@gitea.detee.cloud:SGX/occlum.git
cd occlum && make submodule
cd tools/toolchains/utils_lib/
# following command installs the utils library which can derive sealing key of the enclave
./build.sh
```

@ -70,7 +70,7 @@ BigDL-LLM also support FastChat with using BigDL-LLM as a serving backend in the
For this demo, below commands show how to run an inference service in Occlum with webui interface.
In order to load models using BigDL-LLM, the model name should include "bigdl". In our case, first create a soft link **chatglm2-6b-bigdl** to **chatglm2-6b**.
In order to load models using BigDL-LLM, the model name should include "bigdl". For example, model **vicuna-7b** should be renamed to **bigdl-7b**. A special case is **ChatGLM** models. For these models, you do not need to do any changes after downloading the model and the BigDL-LLM backend will be used automatically. Details please refer to [Models](https://github.com/intel-analytics/BigDL/tree/main/python/llm/src/bigdl/llm/serving#models).
### Serving with WebGUI
@ -87,7 +87,7 @@ This controller manages the distributed workers.
```bash
cd occlum_instance
occlum start
HF_DATASETS_CACHE=/root/cache occlum exec /bin/python3 -m bigdl.llm.serving.model_worker --model-path /models/chatglm2-6b-bigdl --device cpu --host 0.0.0.0
HF_DATASETS_CACHE=/root/cache occlum exec /bin/python3 -m bigdl.llm.serving.model_worker --model-path /models/chatglm2-6b --device cpu --host 0.0.0.0
```
Wait until the process finishes loading the model and you see "Uvicorn running on ...". The model worker will register itself to the controller.

@ -10,4 +10,5 @@ $script_dir/miniconda/bin/conda create \
python=3.9.11
# Install BigDL LLM
$script_dir/python-occlum/bin/pip install torch==2.1.0 --index-url https://download.pytorch.org/whl/cpu
$script_dir/python-occlum/bin/pip install --pre --upgrade bigdl-llm[all] bigdl-llm[serving]

@ -54,9 +54,9 @@ fi
if ! type "protoc-gen-go-grpc" > /dev/null 2>&1; then
if [[ $GOVERSION != 'go1.16.3' ]];then
occlum-go get google.golang.org/grpc/cmd/protoc-gen-go-grpc
occlum-go install google.golang.org/grpc/cmd/protoc-gen-go-grpc
occlum-go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@v1.3.0
else
occlum-go get google.golang.org/grpc/cmd/protoc-gen-go-grpc
occlum-go get google.golang.org/grpc/cmd/protoc-gen-go-grpc@v1.3.0
fi
fi

@ -19,10 +19,7 @@ occlum init
new_json="$(jq '.resource_limits.user_space_size = "1MB" |
.resource_limits.user_space_max_size = "800MB" |
.resource_limits.kernel_space_heap_size="1MB" |
.resource_limits.kernel_space_heap_max_size="40MB" |
.resource_limits.kernel_space_stack_size="1MB" |
.process.default_stack_size = "1MB" |
.process.default_heap_size = "20MB" ' Occlum.json)" && \
.resource_limits.kernel_space_heap_max_size="80MB" ' Occlum.json)" && \
echo "${new_json}" > Occlum.json
# 2. Copy program into Occlum Workspace and build

@ -19,10 +19,7 @@ occlum init
new_json="$(jq '.resource_limits.user_space_size = "1MB" |
.resource_limits.user_space_max_size = "800MB" |
.resource_limits.kernel_space_heap_size="1MB" |
.resource_limits.kernel_space_heap_max_size="40MB" |
.resource_limits.kernel_space_stack_size="1MB" |
.process.default_stack_size = "1MB" |
.process.default_heap_size = "20MB" ' Occlum.json)" && \
.resource_limits.kernel_space_heap_max_size="80MB" ' Occlum.json)" && \
echo "${new_json}" > Occlum.json
# 2. Copy program into Occlum Workspace and build

@ -2,24 +2,18 @@
This project demonstrates how Occlum enables [Golang](https://golang.org) programs running in SGX enclaves, the demo program is a HTTP web server based on a widely used web framework([Gin](https://gin-gonic.com)) for Go.
Step 1: Install Gin with `occlum-go`, it may take a few minutes
Step 1: Install Gin and build Golang web server with `occlum-go`
```
occlum-go mod init web_server && \
occlum-go get -u -v github.com/gin-gonic/gin
./build.sh
```
Step 2: Build the Golang web server using the Occlum Golang toolchain(i.e., `occlum-go`)
```
occlum-go build -o web_server ./web_server.go
```
Step 3: You can run the web server demo on Occlum via
Step 2: You can run the web server demo on Occlum via
```
./run_golang_on_occlum.sh
```
The HTTP web server should now start to listen on port 8090 and serve HTTP requests.
Step 4: To check whether the HTTP server works, run
Step 3: To check whether the HTTP server works, run
```
curl http://127.0.0.1:8090/ping
```

@ -0,0 +1,10 @@
#!/bin/bash
set -e
rm -f go.mod
occlum-go mod init web_server
occlum-go mod tidy
occlum-go get -u -v github.com/gin-gonic/gin
occlum-go get -u -v golang.org/x/crypto@v0.23.0
occlum-go build -o web_server ./web_server.go

@ -36,7 +36,7 @@ init_instance() {
}
update_pku_config() {
new_json="$(jq '.metadata.pkru = 1' Occlum.json)" && echo "${new_json}" > Occlum.json
new_json="$(jq '.feature.pkru = 1' Occlum.json)" && echo "${new_json}" > Occlum.json
}
build_web() {

@ -18,7 +18,7 @@ Some test cases are failed due to multiple reasons, such as syscall is not imple
* Some default LTP test cases may make the Occlum crash or hang (Only checked the cases in syscalls for now).
* Occlum runable syscall test cases are defined in [`syscalls-occlum`](./syscalls-occlum). It may be updated with Occlum development.
The original [`syscalls`] test cases could be found in the built demo `ltp_instance/image/opt/ltp/runtest/syscalls`.
The original [`syscalls`] test cases could be found in the built demo `occlum_instance/image/opt/ltp/runtest/syscalls`.
Panic/Sefault/hang testcases could be listed by a simple diff for these two files.
## Prepare the Occlum instance for LTP demo
@ -39,8 +39,13 @@ The script `run-ltp.sh` supports two optional arguments as below.
example: run-ltp.sh -f syscalls-occlum -s timerfd
```
To run in Occlum, use the following command.
```
occlum run /opt/ltp/run-ltp.sh -f syscalls-occlum -s timerfd
```
If no options provided, all the test cases in default LTP syscalls will be run one by one.
Note:
* The `CMDFILES` are defined in the LTP install path, such as `ltp_instance/image/opt/ltp/runtest/` in this demo.
* The `CMDFILES` are defined in the LTP install path, such as `occlum_instance/image/opt/ltp/runtest/` in this demo.

@ -1,10 +1,10 @@
#! /bin/bash
set -e
rm -rf ltp_instance
occlum new ltp_instance
rm -rf occlum_instance
occlum new occlum_instance
cd ltp_instance
cd occlum_instance
rm -rf image
copy_bom -f ../ltp.yaml --root image --include-dir /opt/occlum/etc/template

@ -1004,8 +1004,8 @@ preadv03_64 preadv03_64
preadv201 preadv201
preadv201_64 preadv201_64
preadv202 preadv202
preadv202_64 preadv202_64
# preadv202 preadv202
# preadv202_64 preadv202_64
preadv203 preadv203
preadv203_64 preadv203_64
@ -1052,15 +1052,15 @@ pwrite04_64 pwrite04_64
pwritev01 pwritev01
pwritev01_64 pwritev01_64
# pwritev02 pwritev02
# pwritev02_64 pwritev02_64
pwritev02 pwritev02
pwritev02_64 pwritev02_64
pwritev03 pwritev03
pwritev03_64 pwritev03_64
pwritev201 pwritev201
pwritev201_64 pwritev201_64
pwritev202 pwritev202
pwritev202_64 pwritev202_64
# pwritev202 pwritev202
# pwritev202_64 pwritev202_64
quotactl01 quotactl01
quotactl02 quotactl02
@ -1090,7 +1090,7 @@ readlinkat01 readlinkat01
readlinkat02 readlinkat02
readv01 readv01
readv02 readv02
# readv02 readv02
realpath01 realpath01
@ -1152,7 +1152,6 @@ rt_sigaction03 rt_sigaction03
rt_sigprocmask01 rt_sigprocmask01
#rt_sigprocmask02 rt_sigprocmask02
rt_sigqueueinfo01 rt_sigqueueinfo01
rt_sigsuspend01 rt_sigsuspend01
rt_sigtimedwait01 rt_sigtimedwait01
rt_tgsigqueueinfo01 rt_tgsigqueueinfo01
@ -1448,8 +1447,6 @@ sigprocmask01 sigprocmask01
sigrelse01 sigrelse01
sigsuspend01 sigsuspend01
sigtimedwait01 sigtimedwait01
sigwait01 sigwait01
@ -1698,12 +1695,12 @@ write02 write02
write04 write04
#write05 write05
#writev01 writev01
#writev02 writev02
writev01 writev01
# writev02 writev02
writev03 writev03
#writev05 writev05
# writev05 writev05
writev06 writev06
#writev07 writev07
# writev07 writev07
perf_event_open01 perf_event_open01
perf_event_open02 perf_event_open02
@ -1732,7 +1729,7 @@ copy_file_range03 copy_file_range03
statx01 statx01
statx02 statx02
statx03 statx03
# statx03 statx03
statx04 statx04
statx05 statx05
statx06 statx06

@ -3,7 +3,7 @@
This demo demonstrates how to verify the completeness of Netty on Occlum.
### 1. Preinstall dependencies
Related dependencies: OpenJDK 11 (Glibc)
Related dependencies: OpenJDK 8 (Glibc)
```
./preinstall_deps.sh
```
@ -15,5 +15,5 @@ Related dependencies: OpenJDK 11 (Glibc)
### 3. Run `netty unit test ` on Occlum
```
./run_netty_ut_jdk11.sh
./run_netty_ut_jdk8.sh
```

@ -1,6 +1,6 @@
includes:
- base.yaml
- java-11-openjdk-amd64.yaml
- java-8-openjdk-amd64.yaml
targets:
# copy sofaboot jar
- target: /usr/lib

@ -3,8 +3,7 @@ set -e
OS=`awk -F= '/^NAME/{print $2}' /etc/os-release`
if [ "$OS" == "\"Ubuntu\"" ]; then
apt-get update -y && apt-get install -y openjdk-11-jdk
rm -rf /usr/lib/jvm/java-11-openjdk-amd64/lib/security/blacklisted.certs
apt-get update -y && apt-get install -y openjdk-8-jdk
else
echo "Unsupported OS: $OS"
exit 1

@ -23,15 +23,15 @@ init_instance() {
.resource_limits.kernel_space_heap_max_size="64MB" |
.resource_limits.max_num_of_threads = 128 |
.process.default_heap_size = "512MB" |
.entry_points = [ "/usr/lib/jvm/java-11-openjdk-amd64/bin" ] |
.env.default = [ "LD_LIBRARY_PATH=/usr/lib/jvm/java-11-openjdk-amd64/lib/server:/usr/lib/jvm/java-11-openjdk-amd64/lib:/usr/lib/jvm/java-11-openjdk-amd64/../lib:/lib" ]' Occlum.json)" && \
.entry_points = [ "/usr/lib/jvm/java-8-openjdk-amd64/bin" ] |
.env.default = [ "LD_LIBRARY_PATH=/usr/lib/jvm/java-8-openjdk-amd64/lib/server:/usr/lib/jvm/java-8-openjdk-amd64/lib:/usr/lib/jvm/java-8-openjdk-amd64/../lib:/lib" ]' Occlum.json)" && \
echo "${new_json}" > Occlum.json
}
build_netty_ut() {
# Copy JVM and JAR file into Occlum instance and build
rm -rf image
copy_bom -f ../netty-ut-jdk11.yaml --root image --include-dir /opt/occlum/etc/template
copy_bom -f ../netty-ut-jdk8.yaml --root image --include-dir /opt/occlum/etc/template
occlum build
}
@ -42,7 +42,7 @@ run_netty_ut() {
init_instance
build_netty_ut
echo -e "${BLUE}occlum run netty ut${NC}"
occlum run /usr/lib/jvm/java-11-openjdk-amd64/bin/java \
occlum run /usr/lib/jvm/java-8-openjdk-amd64/bin/java \
-Xmx1048m -XX:-UseCompressedOops -XX:MaxMetaspaceSize=128m \
-XX:ActiveProcessorCount=2 \
-Dos.name=Linux \
@ -50,7 +50,7 @@ run_netty_ut() {
-cp /usr/lib/netty/netty-testsuite-4.1.51.Final.jar:/usr/lib/netty/netty-all-4.1.51.Final.jar:/usr/lib/netty/xz-1.5.jar:/usr/lib/netty/hamcrest-library-1.3.jar:/usr/lib/netty/logback-classic-1.1.7.jar \
--scan-class-path > netty-test-heap512m.log || true
cat netty-test-heap512m.log
cat netty-test-heap512m.log | grep "141 containers successful"
cat netty-test-heap512m.log | grep "190 tests successful"
}
run_netty_ut

@ -21,19 +21,7 @@ sed -i "186 i \ elif sysstr == 'occlum':\n return True" $CORE_PY
sed -ie "37,64d" $IMAGE_PY
sed -i "37 i \try:\n import cv2\nexcept ImportError:\n cv2 = None" $IMAGE_PY
# Download the dataset
DATASET=$script_dir/mnist
git clone https://github.com/fgnt/mnist.git
[ -d $DATASET ] && exit 0
TRAIN_IMAGE=train-images-idx3-ubyte.gz
TRAIN_LABEL=train-labels-idx1-ubyte.gz
TEST_IMAGE=t10k-images-idx3-ubyte.gz
TEST_LABEL=t10k-labels-idx1-ubyte.gz
URL=http://yann.lecun.com/exdb/mnist
mkdir $DATASET
wget $URL/$TRAIN_IMAGE -P $DATASET
wget $URL/$TRAIN_LABEL -P $DATASET
wget $URL/$TEST_IMAGE -P $DATASET
wget $URL/$TEST_LABEL -P $DATASET

@ -8,7 +8,7 @@ script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
# 2. Install python and dependencies to specified position
[ -f Miniconda3-latest-Linux-x86_64.sh ] || wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh
[ -d miniconda ] || bash ./Miniconda3-latest-Linux-x86_64.sh -b -p $script_dir/miniconda
$script_dir/miniconda/bin/conda create --prefix $script_dir/python-occlum -y python=3.9.11 flask=2.2.2 flask-restful=0.3.9 jinja2=3.1.2 werkzeug
$script_dir/miniconda/bin/conda create --prefix $script_dir/python-occlum -y python=3.9.11 flask=2.2.2 flask-restful=0.3.9 jinja2=3.1.2 werkzeug=2.3
# 3. Remove miniconda and installation scripts
rm -rf ./Miniconda3-latest-Linux-x86_64.sh $script_dir/miniconda

@ -19,6 +19,7 @@ fi
new_json="$(jq '.resource_limits.user_space_size = "1000MB" |
.resource_limits.kernel_space_heap_size = "300MB" |
.feature.enable_posix_shm = true |
.env.default += ["PYTHONHOME=/opt/python-occlum", "PATH=/bin"]' Occlum.json)" && \
echo "${new_json}" > Occlum.json
occlum build

2
demos/sofaboot/.gitignore vendored Normal file

@ -0,0 +1,2 @@
occlum_instance
sofa-boot-guides

2
deps/rust-sgx-sdk vendored

@ -1 +1 @@
Subproject commit e28e25ce718cc21b232a1bf37bb6446535d7ea00
Subproject commit 81384ce4d10c67eea5e1ba4ea332087940c1836b

@ -106,7 +106,7 @@ PKU feature can only be enabled in `HW` mode. Whether to turn on PKU feature is
```js
{
// ...
"metadata": {
"feature": {
// "pkru" = 0: PKU feature must be disabled
// "pkru" = 1: PKU feature must be enabled
// "pkru" = 2: PKU feature is enabled if the platform supports it
@ -118,8 +118,8 @@ PKU feature can only be enabled in `HW` mode. Whether to turn on PKU feature is
Users have three options for PKU feature:
1. If `metadata.pkru` == 0: The PKU feature must disabled in Occlum. It is the default value. Since docker has not supported PKU related syscalls in container environment by default, Occlum turns off PKU feature in default configuration.
1. If `feature.pkru` == 0: The PKU feature must disabled in Occlum. It is the default value. Since docker has not supported PKU related syscalls in container environment by default, Occlum turns off PKU feature in default configuration.
2. If `metadata.pkru` == 1: The PKU feature must enabled in Occlum. If CPU on the platform does not support PKU, then enclave cannot be successfully initialized.
2. If `feature.pkru` == 1: The PKU feature must enabled in Occlum. If CPU on the platform does not support PKU, then enclave cannot be successfully initialized.
3. If `metadata.pkru` == 2: If CPU supports PKU, also OS enables PKU feature, PKU feature is turned on in Occlum.
3. If `feature.pkru` == 2: If CPU supports PKU, also OS enables PKU feature, PKU feature is turned on in Occlum.

@ -4,4 +4,6 @@ To debug an app running upon Occlum, one can harness Occlum's builtin support fo
Meanwhile, one can use `occlum mount` command to access and manipulate the secure filesystem for debug purpose.
If the cause of a problem does not seem to be the app but Occlum itself, then one can take a glimpse into the inner workings of Occlum by checking out its log. Occlum's log level can be adjusted through `OCCLUM_LOG_LEVEL` environment variable. It has six levels: `off`, `error`, `warn`, `debug`, `info`, and `trace`. The default value is `off`, i.e., showing no log messages at all. The most verbose level is `trace`.
If the cause of a problem does not seem to be the app but Occlum itself, then one can take a glimpse into the inner workings of Occlum by checking out its log. Occlum's log level can be adjusted through `OCCLUM_LOG_LEVEL` environment variable. It has six levels: `off`, `error`, `warn`, `debug`, `info`, and `trace`. The default value is `off`, i.e., showing no log messages at all. The most verbose level is `trace`.
The Occlum log output could be disabled totally for better security by setting `metadata.disable_log=true` in `Occlum.json` before building the Occlum instance. For detail please refer [Occlum Configuration](https://occlum.readthedocs.io/en/latest/occlum_configuration.html).

@ -19,15 +19,25 @@ occlum init
Initialize a directory as the Occlum instance.
```bash
occlum build [--sign-key <key_path>] [--sign-tool <tool_path>] [--image-key <key_path>] [-f/--force]
occlum build [--sign-key <key_path>] [--sign-tool <tool_path>] [--image-key <key_path>] [-f/--force] [--enable-edmm <Y/N>]
```
Build and sign an Occlum SGX enclave (.so) and generate its associated secure FS image according to the user-provided image directory and Occlum.json config file.
The whole building process is incremental: the building artifacts are built only
when needed.
To force rebuilding all artifacts, give the [-f/--force] flag.
EDMM feature is not enabled by default. To enable it, set ENABLE_EDMM during the Occlum build phase as below:
```
ENABLE_EDMM=Y occlum build
```
Or
```
occlum build --enable-edmm Y
```
Details please refer to [doc](https://occlum.readthedocs.io/en/latest/edmm_config_guide.html).
```bash
occlum run [--cpus <num_of_cpus>] <program_name> <program_args>
occlum run <program_name> <program_args>
```
Run the user program inside an SGX enclave.

@ -62,6 +62,23 @@ The template of `Occlum.json` is shown below.
// Whether the enclave is debuggable through special SGX instructions.
// For production enclave, it is IMPORTANT to set this value to false.
"debuggable": true,
// Whether the enclave is allowable to print Occlum log.
// Optional, if not set, in default it is false for debuggable enclave
// but true for production/release enclave for better security.
// Production/release enclave could explicitly set it false to have log
// output for debugging purpose. In this case, error log level is the
// only allowed log level.
"disable_log": false,
},
// Features
"feature": {
// Whether to turn on AMX feature in Occlum
// Occlum supports AMX instruction running inside the enclave when user enables it
//
// "amx" = 0: AMX feature must be disabled
// "amx" = 1: AMX feature must be enabled
// "amx" = 2: AMX feature is enabled if the platform supports it
"amx": 0,
// Whether to turn on PKU feature in Occlum
// Occlum uses PKU for isolation between LibOS and userspace program,
// It is useful for developers to detect potential bugs.
@ -69,7 +86,22 @@ The template of `Occlum.json` is shown below.
// "pkru" = 0: PKU feature must be disabled
// "pkru" = 1: PKU feature must be enabled
// "pkru" = 2: PKU feature is enabled if the platform supports it
"pkru": 0
"pkru": 0,
// Whether to enable the EDMM feature
// Enabling EDMM feature can make the enclave initialize faster and sometimes can also
// bring performance benifit for the entire application
//
// Enabling EDMM feature will need more configuration on the memory related fields, for more information,
// please visit [EDMM Configuration Guide](https://github.com/occlum/occlum/blob/master/docs/edmm/edmm_config_guide.md)
"enable_edmm": false,
// Whether to enable POSIX shared memory feature
// Enabling POSIX shm allows processes to communicate by sharing a region of memory
//
// Set "enable_posix_shm" to true, the syscall `mmap` with flag `MAP_SHARED`
// is supported more comprehensively, implies that the file-backed memory mapping
// become shared among processes.
// More API information of POSIX shm is listed in [shm_overview](https://man7.org/linux/man-pages/man7/shm_overview.7.html).
"enable_posix_shm": false
},
// Mount points and their file systems
//

@ -33,9 +33,13 @@
"ext_prod_id": {
"high": "0x0",
"low": "0x0"
},
}
},
"feature": {
"amx": 0,
"pkru": 0,
"amx": 0
"enable_edmm": false,
"enable_posix_shm": false
},
"mount": [
{

@ -6,6 +6,7 @@ enclave {
from "sgx_tprotected_fs.edl" import *;
from "sgx_net.edl" import *;
from "sgx_occlum_utils.edl" import *;
from "sgx_vdso_time_ocalls.edl" import *;
include "sgx_quote.h"
include "occlum_edl_types.h"
@ -148,9 +149,6 @@ enclave {
int occlum_ocall_thread_getcpuclock([out] struct timespec* ts) propagate_errno;
void occlum_ocall_gettimeofday([out] struct timeval* tv);
void occlum_ocall_clock_gettime(clockid_t clockid, [out] struct timespec* ts);
void occlum_ocall_clock_getres(clockid_t clockid, [out] struct timespec* res);
void occlum_ocall_rdtsc([out] uint32_t* low, [out] uint32_t* high);
void occlum_ocall_get_timerslack([out] int *timer_slack);

2
src/exec/Cargo.lock generated

@ -542,7 +542,7 @@ dependencies = [
[[package]]
name = "occlum_exec"
version = "0.30.0"
version = "0.30.1"
dependencies = [
"chrono",
"clap",

@ -1,6 +1,6 @@
[package]
name = "occlum_exec"
version = "0.30.0"
version = "0.30.1"
edition = "2021"
[lib]

28
src/libos/Cargo.lock generated

@ -4,7 +4,7 @@ version = 3
[[package]]
name = "Occlum"
version = "0.30.0"
version = "0.30.1"
dependencies = [
"aligned",
"atomic",
@ -12,6 +12,7 @@ dependencies = [
"bitvec 1.0.1",
"ctor",
"derive_builder",
"errno",
"goblin",
"intrusive-collections",
"itertools",
@ -38,6 +39,7 @@ dependencies = [
"sgx_tstd",
"sgx_types",
"spin 0.7.1",
"vdso-time",
]
[[package]]
@ -207,6 +209,16 @@ version = "1.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "90e5c1c8368803113bf0c9584fc495a58b86dc8a29edbf8fe877d21d9507e797"
[[package]]
name = "errno"
version = "0.1.0"
dependencies = [
"log",
"rcore-fs",
"serde_json",
"sgx_tstd",
]
[[package]]
name = "fnv"
version = "1.0.7"
@ -809,6 +821,20 @@ dependencies = [
"rand",
]
[[package]]
name = "vdso-time"
version = "0.1.0"
dependencies = [
"cfg-if",
"errno",
"lazy_static",
"log",
"sgx_libc",
"sgx_trts",
"sgx_tstd",
"sgx_types",
]
[[package]]
name = "winapi"
version = "0.3.9"

@ -1,6 +1,6 @@
[package]
name = "Occlum"
version = "0.30.0"
version = "0.30.1"
edition = "2021"
[lib]
@ -25,6 +25,8 @@ rcore-fs-devfs = { path = "../../deps/sefs/rcore-fs-devfs" }
resolv-conf = { path = "../../deps/resolv-conf" }
serde = { path = "../../deps/serde-sgx/serde", features = ["derive"] }
serde_json = { path = "../../deps/serde-json-sgx" }
errno = { path = "crates/errno", features = ["occlum"] }
vdso-time = { path = "crates/vdso-time", default-features = false, features = ["sgx"] }
memoffset = "0.6.1"
scroll = { version = "0.11.0", default-features = false }
itertools = { version = "0.10.0", default-features = false, features = ["use_alloc"] }
@ -39,7 +41,7 @@ modular-bitfield = "0.11.2"
sgx_tstd = { path = "../../deps/rust-sgx-sdk/sgx_tstd" }
[features]
default = ["integrity_only_opt", "sgx_file_cache", "sgx1_exception_sim"]
default = ["integrity_only_opt", "sgx_file_cache", "sgx1_exception_sim", "kernel_heap_monitor"]
syscall_timing = [] # Timing for each syscall. But it has cost from more ocall.
integrity_only_opt = [] # Clear bss only. It should be disabled if checking memory reads.
sgx_file_cache = [] # Cache SgxFile objects. Invalidation is unimplemented.
@ -48,6 +50,7 @@ dcap = [] # DCAP support. The compilation relies on DCAP package.
cov = ["sgx_cov"] # Enable coverage colletcion.
hyper_mode = [] # For running in hyper mode.
pku = [] # PKU Support
kernel_heap_monitor = []# Kernel heap usage tracking. With overhead.
[target.'cfg(not(target_env = "sgx"))'.dependencies]
sgx_types = { path = "../../deps/rust-sgx-sdk/sgx_types" }

@ -45,7 +45,7 @@ LIBOS_LOG ?= error
LIBOS_SONAME := libocclum-libos.so.$(MAJOR_VER_NUM)
LIBOS_FEATURES :=
LIBOS_FEATURES := $(LIBOS_FEATURES)
ifeq ($(SGX_MODE), HW)
LIBOS_CORE_LIB_NAME := occlum-libos-core
@ -162,7 +162,11 @@ $(OBJ_DIR)/libos/$(SRC_OBJ)/Enclave_t.o: $(OBJ_DIR)/libos/$(SRC_OBJ)/Enclave_t.c
@echo "CC <= $@"
$(OBJ_DIR)/libos/$(SRC_OBJ)/Enclave_t.c: $(SGX_EDGER8R) ../Enclave.edl
@cd $(OBJ_DIR)/libos/$(SRC_OBJ) && $(SGX_EDGER8R) $(SGX_EDGER8R_MODE) --trusted $(CUR_DIR)/../Enclave.edl --search-path $(SGX_SDK)/include --search-path $(RUST_SGX_SDK_DIR)/edl
@cd $(OBJ_DIR)/libos/$(SRC_OBJ) && \
$(SGX_EDGER8R) $(SGX_EDGER8R_MODE) --trusted $(CUR_DIR)/../Enclave.edl \
--search-path $(SGX_SDK)/include \
--search-path $(RUST_SGX_SDK_DIR)/edl \
--search-path $(CRATES_DIR)/vdso-time/ocalls
@echo "GEN <= $@"
$(C_OBJS):$(OBJ_DIR)/libos/$(SRC_OBJ)/%.o: src/%.c

265
src/libos/crates/errno/Cargo.lock generated Normal file

@ -0,0 +1,265 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
version = 3
[[package]]
name = "bitflags"
version = "1.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
[[package]]
name = "cc"
version = "1.0.83"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0"
dependencies = [
"libc",
]
[[package]]
name = "errno"
version = "0.1.0"
dependencies = [
"log",
"rcore-fs",
"serde_json",
"sgx_tstd 1.1.6",
]
[[package]]
name = "hashbrown_tstd"
version = "0.12.0"
[[package]]
name = "itoa"
version = "0.4.5"
dependencies = [
"sgx_tstd 1.1.0",
]
[[package]]
name = "libc"
version = "0.2.149"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a08173bc88b7955d1b3145aa561539096c421ac8debde8cbc3612ec635fee29b"
[[package]]
name = "log"
version = "0.4.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f"
[[package]]
name = "rcore-fs"
version = "0.1.0"
dependencies = [
"bitflags",
"spin",
]
[[package]]
name = "ryu"
version = "1.0.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1ad4cc8da4ef723ed60bced201181d83791ad433213d8c24efffda1eec85d741"
[[package]]
name = "serde"
version = "1.0.104"
dependencies = [
"sgx_tstd 1.1.0",
]
[[package]]
name = "serde_json"
version = "1.0.40"
dependencies = [
"itoa",
"ryu",
"serde",
"sgx_tstd 1.1.0",
]
[[package]]
name = "sgx_alloc"
version = "1.1.0"
source = "git+https://github.com/apache/teaclave-sgx-sdk.git?rev=v1.1.0#71a88b647bb76a16cbc5c3e29403e2afb67f82fd"
[[package]]
name = "sgx_alloc"
version = "1.1.6"
[[package]]
name = "sgx_backtrace_sys"
version = "1.1.0"
source = "git+https://github.com/apache/teaclave-sgx-sdk.git?rev=v1.1.0#71a88b647bb76a16cbc5c3e29403e2afb67f82fd"
dependencies = [
"cc",
"sgx_build_helper 0.1.0",
"sgx_libc 1.1.0",
]
[[package]]
name = "sgx_backtrace_sys"
version = "1.1.6"
dependencies = [
"cc",
"sgx_build_helper 1.1.6",
"sgx_libc 1.1.6",
]
[[package]]
name = "sgx_build_helper"
version = "0.1.0"
source = "git+https://github.com/apache/teaclave-sgx-sdk.git?rev=v1.1.0#71a88b647bb76a16cbc5c3e29403e2afb67f82fd"
[[package]]
name = "sgx_build_helper"
version = "1.1.6"
[[package]]
name = "sgx_demangle"
version = "1.1.0"
source = "git+https://github.com/apache/teaclave-sgx-sdk.git?rev=v1.1.0#71a88b647bb76a16cbc5c3e29403e2afb67f82fd"
[[package]]
name = "sgx_demangle"
version = "1.1.6"
[[package]]
name = "sgx_libc"
version = "1.1.0"
source = "git+https://github.com/apache/teaclave-sgx-sdk.git?rev=v1.1.0#71a88b647bb76a16cbc5c3e29403e2afb67f82fd"
dependencies = [
"sgx_types 1.1.0",
]
[[package]]
name = "sgx_libc"
version = "1.1.6"
dependencies = [
"sgx_types 1.1.6",
]
[[package]]
name = "sgx_tcrypto"
version = "1.1.0"
source = "git+https://github.com/apache/teaclave-sgx-sdk.git?rev=v1.1.0#71a88b647bb76a16cbc5c3e29403e2afb67f82fd"
dependencies = [
"sgx_types 1.1.0",
]
[[package]]
name = "sgx_tprotected_fs"
version = "1.1.0"
source = "git+https://github.com/apache/teaclave-sgx-sdk.git?rev=v1.1.0#71a88b647bb76a16cbc5c3e29403e2afb67f82fd"
dependencies = [
"sgx_trts 1.1.0",
"sgx_types 1.1.0",
]
[[package]]
name = "sgx_tprotected_fs"
version = "1.1.6"
dependencies = [
"sgx_trts 1.1.6",
"sgx_types 1.1.6",
]
[[package]]
name = "sgx_trts"
version = "1.1.0"
source = "git+https://github.com/apache/teaclave-sgx-sdk.git?rev=v1.1.0#71a88b647bb76a16cbc5c3e29403e2afb67f82fd"
dependencies = [
"sgx_libc 1.1.0",
"sgx_types 1.1.0",
]
[[package]]
name = "sgx_trts"
version = "1.1.6"
dependencies = [
"sgx_libc 1.1.6",
"sgx_types 1.1.6",
]
[[package]]
name = "sgx_tse"
version = "1.1.0"
source = "git+https://github.com/apache/teaclave-sgx-sdk.git?rev=v1.1.0#71a88b647bb76a16cbc5c3e29403e2afb67f82fd"
dependencies = [
"sgx_types 1.1.0",
]
[[package]]
name = "sgx_tseal"
version = "1.1.0"
source = "git+https://github.com/apache/teaclave-sgx-sdk.git?rev=v1.1.0#71a88b647bb76a16cbc5c3e29403e2afb67f82fd"
dependencies = [
"sgx_tcrypto",
"sgx_trts 1.1.0",
"sgx_tse",
"sgx_types 1.1.0",
]
[[package]]
name = "sgx_tstd"
version = "1.1.0"
source = "git+https://github.com/apache/teaclave-sgx-sdk.git?rev=v1.1.0#71a88b647bb76a16cbc5c3e29403e2afb67f82fd"
dependencies = [
"sgx_alloc 1.1.0",
"sgx_backtrace_sys 1.1.0",
"sgx_demangle 1.1.0",
"sgx_libc 1.1.0",
"sgx_tprotected_fs 1.1.0",
"sgx_trts 1.1.0",
"sgx_tseal",
"sgx_types 1.1.0",
"sgx_unwind 0.1.0",
]
[[package]]
name = "sgx_tstd"
version = "1.1.6"
dependencies = [
"hashbrown_tstd",
"sgx_alloc 1.1.6",
"sgx_backtrace_sys 1.1.6",
"sgx_demangle 1.1.6",
"sgx_libc 1.1.6",
"sgx_tprotected_fs 1.1.6",
"sgx_trts 1.1.6",
"sgx_types 1.1.6",
"sgx_unwind 1.1.6",
]
[[package]]
name = "sgx_types"
version = "1.1.0"
source = "git+https://github.com/apache/teaclave-sgx-sdk.git?rev=v1.1.0#71a88b647bb76a16cbc5c3e29403e2afb67f82fd"
[[package]]
name = "sgx_types"
version = "1.1.6"
[[package]]
name = "sgx_unwind"
version = "0.1.0"
source = "git+https://github.com/apache/teaclave-sgx-sdk.git?rev=v1.1.0#71a88b647bb76a16cbc5c3e29403e2afb67f82fd"
dependencies = [
"sgx_build_helper 0.1.0",
]
[[package]]
name = "sgx_unwind"
version = "1.1.6"
dependencies = [
"sgx_build_helper 1.1.6",
]
[[package]]
name = "spin"
version = "0.5.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d"

@ -0,0 +1,19 @@
[package]
name = "errno"
version = "0.1.0"
authors = ["Tate, Hongliang Tian <tate.thl@antgroup.com>"]
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[features]
default = []
std = []
occlum = ["sgx", "serde_json", "rcore-fs"]
sgx = ["sgx_tstd"]
[dependencies]
log = "0.4"
serde_json = { path = "../../../../deps/serde-json-sgx", optional = true }
sgx_tstd = { path = "../../../../deps/rust-sgx-sdk/sgx_tstd", optional = true }
rcore-fs = { path = "../../../../deps/sefs/rcore-fs", optional = true }

@ -1,4 +1,9 @@
use super::*;
use alloc::boxed::Box;
use alloc::string::String;
use alloc::vec::Vec;
use core::fmt;
use super::Error;
#[derive(Debug, Clone)]
pub struct ErrorBacktrace<'a> {
@ -15,7 +20,7 @@ impl<'a> ErrorBacktrace<'a> {
impl<'a> fmt::Display for ErrorBacktrace<'a> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let error_strings: Vec<String> = self.clone().map(|e| e.to_string()).collect();
let error_strings: Vec<String> = self.clone().map(|e| alloc::format!("{}", e)).collect();
let error_backtrace = error_strings.join("\n Caused by ");
write!(f, "{}", error_backtrace)
}
@ -48,18 +53,3 @@ impl Error {
ErrorBacktrace::new(self)
}
}
pub trait ResultExt<T> {
fn cause_err<F>(self, f: F) -> Result<T>
where
F: FnOnce(&Error) -> Error;
}
impl<T> ResultExt<T> for Result<T> {
fn cause_err<F>(self, f: F) -> Result<T>
where
F: FnOnce(&Error) -> Error,
{
self.map_err(|old_e| old_e.cause_err(f))
}
}

@ -1,4 +1,4 @@
use super::*;
use core::fmt;
/// POSIX errno
#[derive(Clone, Copy, Debug, PartialEq)]
@ -46,7 +46,6 @@ pub enum Errno {
ENOSYS = 38,
ENOTEMPTY = 39,
ELOOP = 40,
EWOULDBLOCK = 41,
ENOMSG = 42,
EIDRM = 43,
ECHRNG = 44,
@ -145,6 +144,9 @@ const ERRNO_MIN: u32 = Errno::EPERM as u32;
const ERRNO_MAX: u32 = Errno::EHWPOISON as u32;
impl Errno {
// EWOULDBLOCK was used on BSD/Sun variants of Unix, and EAGAIN was the AT&T System V error code.
// Here we keep same with linux, define EWOULDBLOCK with EAGAIN.
pub const EWOULDBLOCK: Errno = Errno::EAGAIN;
pub(crate) fn as_str(&self) -> &'static str {
use self::Errno::*;
match *self {

@ -1,4 +1,7 @@
use super::*;
use alloc::boxed::Box;
use core::fmt;
use super::{Errno, ToErrno};
#[derive(Debug)]
pub struct Error {
@ -10,7 +13,7 @@ pub struct Error {
#[derive(Debug)]
enum Error__ {
Embedded((Errno, &'static str)),
Boxed(Box<dyn ToErrno + 'static>),
Boxed(Box<dyn ToErrno + Send + 'static>),
}
#[derive(Debug, Clone, Copy)]
@ -30,7 +33,7 @@ impl Error {
pub fn boxed<T>(inner: T, location: Option<ErrorLocation>) -> Error
where
T: ToErrno + 'static,
T: ToErrno + Send + 'static,
{
Error {
inner: Error__::Boxed(Box::new(inner)),
@ -64,23 +67,6 @@ impl ErrorLocation {
}
}
impl std::error::Error for Error {
fn description(&self) -> &str {
self.errno().as_str()
}
fn cause(&self) -> Option<&dyn std::error::Error> {
self.cause.as_ref().map(|e| e as &dyn std::error::Error)
}
/*
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
self.cause
.as_ref()
.map(|e| e as &(dyn std::error::Error + 'static))
}
*/
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.inner)?;
@ -105,3 +91,25 @@ impl fmt::Display for ErrorLocation {
write!(f, "[line = {}, file = {}]", self.line, self.file)
}
}
#[cfg(any(feature = "std", feature = "sgx", test, doctest))]
mod if_std {
use super::*;
impl std::error::Error for Error {
fn description(&self) -> &str {
self.errno().as_str()
}
fn cause(&self) -> Option<&dyn std::error::Error> {
self.cause.as_ref().map(|e| e as &dyn std::error::Error)
}
/*
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
self.cause
.as_ref()
.map(|e| e as &(dyn std::error::Error + 'static))
}
*/
}
}

@ -0,0 +1,204 @@
//! User-friendly error handling with build-in support for POSIX errno.
//!
//! This crate extends Rust's standard error handling with the abilities of
//! reporting error locations, providing backtrace information, and unifying
//! all types of errors with POSIX errno.
//!
//! # Motivation
//!
//! While the built-in error handling mechanism of Rust is undoubtedly superior
//! than that of a traditional system programming language (e.g., C/C++), it
//! is _not perfect_.
//!
//! First, trait `std::error::Error` does not provide any means to
//! record the location of the source code that triggers an error, leading to
//! a slow process of diagnosing errors or bugs.
//!
//! Second, while the `Error` trait (which has a `cause` method)
//! supports backtrace in theory, it is inconvenient---in practice---to implement
//! backtrace. This is because the users still need to manually write the concrete
//! implementation that stores the cause for every error struct.
//!
//! Third, one challenging aspect of error handling in Rust is
//! dealing with the various types of errors. The standard library
//! defines errors like `std::io::Error`, `std::fmt::Error`, `std::str::Utf8Error`, etc.
//! Not to mention the error types defined by third-party libraries.
//! To make it even worse, we, as OS writers, have to convert all these errors
//! into POSIX errno eventually.
//!
//! To cope with the issues above, this crate extends Rust's standard error
//! handling mechanism. Specifically, it aims at the following design goals:
//!
//! * **Fast diagnose** (e.g., reporting the backtrace and the code location of an error).
//! * **First-class POSIX errno** (e.g., every error has an errno).
//! * **Zero-overhead abstraction** (e.g., no heap allocation unless absolutely necesary).
//! * **Ergonomic grammar** (e.g., use macros to avoid writing code manually).
//! * **Compatibility with `no_std`**.
//!
//! # How to Use
//!
//! ## Basic Usage
//!
//! The simplest usage involves just one macro---`errno!`.
//! See the sample code below:
//! ```rust
//! use errno::prelude::*;
//!
//! fn return_err() -> Result<()> {
//! Err(errno!(EINVAL, "the root error"))
//! }
//!
//! # fn main() {
//! if let Err(e) = return_err() {
//! println!("{}", e);
//! }
//! # }
//! ```
//! which prints something like
//! ```text
//! EINVAL (#22, Invalid argument): the root error [line = 45, file = src/lib.rs]
//! ```
//! Note that the specific line and file of source code that generates the error
//! is printed. This facilitates diagnosing errors.
//!
//! ## Backtrace
//!
//! A more interesting usage is to print the backtrace of an error. To create
//! the chains of errors, `std::result::Result` is extended with a new method
//! named `cause_err`. If the result is `Ok`, the method does nothing; otherwise,
//! this method executes a user-given closure to output a new error whose cause
//! is the error contained in the result. The method consumes the current result
//! and generates a new result that contains the new error. The two errors are
//! chained. More calls to `cause_err` form deeper backtraces.
//!
//! See the sample code below:
//! ```rust
//! use errno::prelude::*;
//!
//! fn return_err() -> Result<()> {
//! Err(errno!(EINVAL, "the root error"))
//! }
//!
//! fn cause_err() -> Result<()> {
//! return_err()
//! .cause_err(|_e| errno!(EIO, "another error"))
//! }
//!
//! # fn main() {
//! if let Err(e) = cause_err() {
//! println!("{}", e.backtrace());
//! }
//! # }
//! ```
//! which prints something like
//! ```text
//! EIO (#5, I/O error): another error [line = 71, file = src/lib.rs]
//! Caused by EINVAL (#22, Invalid argument): the root error [line = 68, file = src/lib.rs]
//! ```
//!
#![feature(allocator_api)]
// Use no_std and alloc crate except when given std feature or during test.
#![cfg_attr(not(any(feature = "std", test, doctest)), no_std)]
extern crate alloc;
// Use Rust SGX SDK's std when given SGX feature.
#[cfg(feature = "sgx")]
extern crate sgx_tstd as std;
#[macro_use]
extern crate log;
mod backtrace;
mod errno;
mod error;
pub mod prelude;
mod result;
mod to_errno;
pub use self::backtrace::ErrorBacktrace;
pub use self::errno::*;
pub use self::errno::Errno::*;
pub use self::error::{Error, ErrorLocation};
pub use self::result::{Result, ResultExt};
pub use self::to_errno::ToErrno;
#[macro_export]
macro_rules! errno {
($errno_expr: expr, $error_msg: expr) => {{
let inner_error = {
let errno: Errno = $errno_expr;
let msg: &'static str = $error_msg;
(errno, msg)
};
let error =
$crate::Error::embedded(inner_error, Some($crate::ErrorLocation::new(file!(), line!())));
error
}};
($error_expr: expr) => {{
let inner_error = $error_expr;
let error = $crate::Error::boxed(inner_error, Some($crate::ErrorLocation::new(file!(), line!())));
error
}};
}
#[macro_export]
macro_rules! return_errno {
($errno_expr: expr, $error_msg: expr) => {{
return Err(errno!($errno_expr, $error_msg));
}};
($error_expr: expr) => {{
return Err(errno!($error_expr));
}};
}
// return Err(errno) if libc return -1
#[macro_export]
macro_rules! try_libc {
($ret: expr) => {{
let ret = unsafe { $ret };
if ret < 0 {
let errno = unsafe { libc::errno() };
return_errno!(Errno::from(errno as u32), "libc error");
}
ret
}};
}
// return Err(errno) if libc return -1
// raise SIGPIPE if errno == EPIPE
#[macro_export]
macro_rules! try_libc_may_epipe {
($ret: expr) => {{
let ret = unsafe { $ret };
if ret < 0 {
let errno = unsafe { libc::errno() };
if errno == Errno::EPIPE as i32 {
crate::signal::do_tkill(current!().tid(), crate::signal::SIGPIPE.as_u8() as i32);
}
return_errno!(Errno::from(errno as u32), "libc error");
}
ret
}};
}
#[cfg(test)]
mod tests {
use crate::prelude::*;
#[test]
fn convert_std_io_error() -> Result<()> {
use std::io::{BufWriter, Write};
let mut buf_writer = BufWriter::new(Vec::<u8>::new());
// std::io::Error can be converted crate::Error implicitly
buf_writer.write("foo".as_bytes())?;
Ok(())
}
#[test]
fn convert_std_ffi_nul_error() -> Result<()> {
use std::ffi::CString;
// std::ffi::NulError can be converted crate::Error implicitly
let _ = CString::new(b"foo".to_vec())?;
Ok(())
}
}

@ -0,0 +1,3 @@
pub use crate::{
errno, return_errno, Errno, Errno::*, Error, ErrorLocation, Result, ResultExt, ToErrno,
};

@ -0,0 +1,34 @@
use super::{Errno, Error};
pub type Result<T> = core::result::Result<T, Error>;
/// Extending `Result` with extra functionalities.
pub trait ResultExt<T> {
fn cause_err<F>(self, f: F) -> Result<T>
where
F: FnOnce(&Error) -> Error;
fn errno(&self) -> Option<Errno>;
fn has_errno(&self, errno: Errno) -> bool;
}
impl<T> ResultExt<T> for Result<T> {
fn cause_err<F>(self, f: F) -> Result<T>
where
F: FnOnce(&Error) -> Error,
{
self.map_err(|old_e| old_e.cause_err(f))
}
fn errno(&self) -> Option<Errno> {
match self {
Ok(_) => None,
Err(e) => Some(e.errno()),
}
}
fn has_errno(&self, errno: Errno) -> bool {
self.errno() == Some(errno)
}
}

@ -0,0 +1,138 @@
use core::fmt;
use super::{Errno, Error};
pub trait ToErrno: fmt::Display + fmt::Debug {
fn errno(&self) -> Errno;
}
impl<T> From<T> for Error
where
T: ToErrno + Send + 'static,
{
fn from(t: T) -> Error {
Error::boxed(t, None)
}
}
impl ToErrno for Errno {
fn errno(&self) -> Errno {
*self
}
}
impl ToErrno for core::alloc::AllocError {
fn errno(&self) -> Errno {
Errno::ENOMEM
}
}
impl ToErrno for core::alloc::LayoutError {
fn errno(&self) -> Errno {
Errno::EINVAL
}
}
impl ToErrno for core::num::ParseIntError {
fn errno(&self) -> Errno {
Errno::EINVAL
}
}
#[cfg(any(feature = "std", feature = "sgx", test, doctest))]
mod if_std {
use super::*;
impl From<std::io::ErrorKind> for Errno {
fn from(kind: std::io::ErrorKind) -> Errno {
use std::io::ErrorKind::*;
use Errno::*;
match kind {
NotFound => ENOENT,
PermissionDenied => EPERM,
ConnectionRefused => ECONNREFUSED,
ConnectionReset => ECONNRESET,
ConnectionAborted => ECONNABORTED,
NotConnected => ENOTCONN,
AddrInUse => EADDRINUSE,
AddrNotAvailable => EADDRNOTAVAIL,
BrokenPipe => EPIPE,
AlreadyExists => EEXIST,
WouldBlock => Errno::EWOULDBLOCK,
InvalidInput => EINVAL,
InvalidData => EBADMSG, /* TODO: correct? */
TimedOut => ETIMEDOUT,
Interrupted => EINTR,
WriteZero => EINVAL,
UnexpectedEof => EIO,
Other => EIO,
_ => EIO,
}
}
}
impl ToErrno for std::io::Error {
fn errno(&self) -> Errno {
Errno::from(self.kind())
}
}
impl ToErrno for std::ffi::NulError {
fn errno(&self) -> Errno {
Errno::EINVAL
}
}
}
#[cfg(feature = "occlum")]
mod if_occlum {
use rcore_fs::dev::DevError;
use rcore_fs::vfs::FsError;
use super::*;
impl ToErrno for serde_json::Error {
fn errno(&self) -> Errno {
Errno::EINVAL
}
}
impl ToErrno for FsError {
fn errno(&self) -> Errno {
use Errno::*;
match *self {
FsError::NotSupported => ENOSYS,
FsError::NotFile => EISDIR,
FsError::IsDir => EISDIR,
FsError::NotDir => ENOTDIR,
FsError::EntryNotFound => ENOENT,
FsError::EntryExist => EEXIST,
FsError::NotSameFs => EXDEV,
FsError::InvalidParam => EINVAL,
FsError::NoDeviceSpace => ENOMEM,
FsError::DirRemoved => ENOENT,
FsError::DirNotEmpty => ENOTEMPTY,
FsError::WrongFs => EINVAL,
FsError::DeviceError(_err) => EIO,
FsError::SymLoop => ELOOP,
FsError::NoDevice => ENXIO,
FsError::IOCTLError => EINVAL,
FsError::Again => EAGAIN,
FsError::Busy => EBUSY,
FsError::WrProtected => EROFS,
FsError::NoIntegrity => EIO,
FsError::PermError => EPERM,
FsError::NameTooLong => ENAMETOOLONG,
FsError::FileTooBig => EFBIG,
FsError::OpNotSupported => EOPNOTSUPP,
FsError::NotMountPoint => EINVAL,
}
}
}
impl From<Error> for DevError {
fn from(e: Error) -> Self {
DevError(e.errno() as i32)
}
}
}

10
src/libos/crates/vdso-time/.gitignore vendored Normal file

@ -0,0 +1,10 @@
# Generated by Cargo
# will have compiled files and executables
/target/
# Remove Cargo.lock from gitignore if creating an executable, leave it for libraries
# More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html
Cargo.lock
# These are backup files generated by rustfmt
**/*.rs.bk

@ -0,0 +1,32 @@
[package]
name = "vdso-time"
version = "0.1.0"
authors = ["Shuocheng Wang <shuocheng.wsc@antgroup.com>"]
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[features]
default = ["std"]
std = ["libc"]
sgx = ["sgx_types", "sgx_tstd", "sgx_libc", "sgx_trts"]
[dependencies]
cfg-if = "1.0"
errno = { path = "../errno" }
lazy_static = { version = "1.4.0", features = ["spin_no_std"] }
libc = { version = "0.2", optional = true }
log = "0.4"
sgx_types = { path = "../../../../deps/rust-sgx-sdk/sgx_types", optional = true }
sgx_tstd = { path = "../../../../deps/rust-sgx-sdk/sgx_tstd", optional = true, features = ["backtrace"] }
sgx_libc = { path = "../../../../deps/rust-sgx-sdk/sgx_libc", optional = true }
sgx_trts = { path = "../../../../deps/rust-sgx-sdk/sgx_trts", optional = true }
[dev-dependencies]
criterion = "0.3"
lazy_static = { version = "1.4.0", features = ["spin_no_std"] }
ctor = "0.1"
[[bench]]
name = "bench"
harness = false

@ -0,0 +1,40 @@
# vdso-time
A rust crate for getting time using vDSO. This crate can support host and SGX (based on Rust-SGX-SDK).
## Getting Started
Add the following dependency to your Cargo manifest:
```
vdso-time = { path = "yourpath/vdso-time" }
```
If you want to use in SGX environment, add the following dependency to your Cargo manifest:
```
vdso-time = { path = "yourpath/vdso-time", default-features = false, features = ["sgx"] }
```
## API examples
```
use vdso_time::ClockId;
let time = vdso_time::clock_gettime(ClockId::CLOCK_MONOTONIC).unwrap();
println!("vdso_time::clock_gettime: {:?}", time);
let res = vdso_time::clock_getres(ClockId::CLOCK_MONOTONIC).unwrap();
println!("vdso_time::clock_getres: {:?}", res);
```
```
use vdso_time::{Vdso, ClockId};
let vdso = Vdso::new().unwrap();
let time = vdso.clock_gettime(ClockId::CLOCK_MONOTONIC).unwrap();
println!("vdso.clock_gettime: {:?}", time);
let res = vdso.clock_getres(ClockId::CLOCK_MONOTONIC).unwrap();
println!("vdso.clock_getres: {:?}", res);
}
```

@ -0,0 +1,42 @@
use criterion::{black_box, criterion_group, criterion_main, Criterion};
use vdso_time::ClockId;
fn libc_clock_gettime() -> libc::timespec {
let mut tp = libc::timespec {
tv_sec: 0,
tv_nsec: 0,
};
unsafe {
libc::clock_gettime(ClockId::CLOCK_MONOTONIC as _, &mut tp as *mut _);
}
tp
}
fn libc_clock_getres() -> libc::timespec {
let mut tp = libc::timespec {
tv_sec: 0,
tv_nsec: 0,
};
unsafe {
libc::clock_getres(ClockId::CLOCK_MONOTONIC as _, &mut tp as *mut _);
}
tp
}
fn criterion_benchmark(c: &mut Criterion) {
c.bench_function("libc clock_gettime", |b| {
b.iter(|| black_box(libc_clock_gettime()))
});
c.bench_function("vdso clock_gettime", |b| {
b.iter(|| black_box(vdso_time::clock_gettime(ClockId::CLOCK_MONOTONIC).unwrap()))
});
c.bench_function("libc clock_getres", |b| {
b.iter(|| black_box(libc_clock_getres()))
});
c.bench_function("vdso clock_getres", |b| {
b.iter(|| black_box(vdso_time::clock_getres(ClockId::CLOCK_MONOTONIC).unwrap()))
});
}
criterion_group!(benches, criterion_benchmark);
criterion_main!(benches);

@ -0,0 +1,33 @@
include!("common/bench.rs");
fn libc_clock_gettime() -> Duration {
let mut tp = libc::timespec {
tv_sec: 0,
tv_nsec: 0,
};
unsafe {
libc::clock_gettime(ClockId::CLOCK_MONOTONIC as _, &mut tp as *mut _);
}
Duration::new(tp.tv_sec as u64, tp.tv_nsec as u32)
}
fn libc_clock_getres() -> Duration {
let mut tp = libc::timespec {
tv_sec: 0,
tv_nsec: 0,
};
unsafe {
libc::clock_getres(ClockId::CLOCK_MONOTONIC as _, &mut tp as *mut _);
}
Duration::new(tp.tv_sec as u64, tp.tv_nsec as u32)
}
fn libc_benchmarks() {
benchmark("Libc clock_gettime()", libc_clock_gettime);
benchmark("Libc clock_getres()", libc_clock_getres);
}
fn main() {
libc_benchmarks();
vdso_benchmarks();
}

@ -0,0 +1,31 @@
use std::time::Duration;
use vdso_time::{ClockId, clock_getres, clock_gettime};
/// from criterion crate:
/// A function that is opaque to the optimizer, used to prevent the compiler from
/// optimizing away computations in a benchmark.
///
/// This variant is stable-compatible, but it may cause some performance overhead
/// or fail to prevent code from being eliminated.
fn black_box<T>(dummy: T) -> T {
unsafe {
let ret = std::ptr::read_volatile(&dummy);
std::mem::forget(dummy);
ret
}
}
fn benchmark(name: &str, func: impl Fn() -> Duration) {
let start = clock_gettime(ClockId::CLOCK_MONOTONIC).unwrap();
let loops = 1000000;
for _ in 0..loops {
black_box(func());
}
let end = clock_gettime(ClockId::CLOCK_MONOTONIC).unwrap();
println!("[{}] avg_time: {:?} ns", name, (end - start).as_nanos() / loops);
}
fn vdso_benchmarks() {
benchmark("vdso clock_gettime()", || clock_gettime(ClockId::CLOCK_MONOTONIC).unwrap());
benchmark("vdso clock_getres()", || clock_getres(ClockId::CLOCK_MONOTONIC).unwrap());
}

@ -0,0 +1,26 @@
fn first_example() {
use vdso_time::ClockId;
let time = vdso_time::clock_gettime(ClockId::CLOCK_MONOTONIC).unwrap();
println!("vdso_time::clock_gettime: {:?}", time);
let res = vdso_time::clock_getres(ClockId::CLOCK_MONOTONIC).unwrap();
println!("vdso_time::clock_getres: {:?}", res);
}
fn second_example() {
use vdso_time::{Vdso, ClockId};
let vdso = Vdso::new().unwrap();
let time = vdso.clock_gettime(ClockId::CLOCK_MONOTONIC).unwrap();
println!("vdso.clock_gettime: {:?}", time);
let res = vdso.clock_getres(ClockId::CLOCK_MONOTONIC).unwrap();
println!("vdso.clock_getres: {:?}", res);
}
fn example() {
first_example();
second_example();
}

@ -0,0 +1,5 @@
include!("common/example.rs");
fn main() {
example();
}

@ -0,0 +1,11 @@
Cargo.lock
Enclave_u.c
Enclave_u.h
Enclave_t.c
Enclave_t.h
app/target
enclave/target
bin/app
*.o
*.a
*.so

@ -0,0 +1,167 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
######## SGX SDK Settings ########
SGX_SDK ?= /opt/sgxsdk
SGX_MODE ?= HW
SGX_ARCH ?= x64
include ../../../common.mk
include $(RUST_SGX_SDK_DIR)/buildenv.mk
OCALLS_DIR := ../../ocalls
ifeq ($(shell getconf LONG_BIT), 32)
SGX_ARCH := x86
else ifeq ($(findstring -m32, $(CXXFLAGS)), -m32)
SGX_ARCH := x86
endif
ifeq ($(SGX_ARCH), x86)
SGX_COMMON_CFLAGS := -m32
SGX_LIBRARY_PATH := $(SGX_SDK)/lib
SGX_ENCLAVE_SIGNER := $(SGX_SDK)/bin/x86/sgx_sign
SGX_EDGER8R := $(SGX_SDK)/bin/x86/sgx_edger8r
else
SGX_COMMON_CFLAGS := -m64
SGX_LIBRARY_PATH := $(SGX_SDK)/lib64
SGX_ENCLAVE_SIGNER := $(SGX_SDK)/bin/x64/sgx_sign
SGX_EDGER8R := $(SGX_SDK)/bin/x64/sgx_edger8r
endif
ifeq ($(SGX_DEBUG), 1)
ifeq ($(SGX_PRERELEASE), 1)
$(error Cannot set SGX_DEBUG and SGX_PRERELEASE at the same time!!)
endif
endif
ifeq ($(SGX_DEBUG), 1)
SGX_COMMON_CFLAGS += -O0 -g
else
SGX_COMMON_CFLAGS += -O2
endif
SGX_COMMON_CFLAGS += -fstack-protector
######## CUSTOM Settings ########
CUSTOM_LIBRARY_PATH := ./lib
CUSTOM_BIN_PATH := ./bin
CUSTOM_EDL_PATH := $(RUST_SGX_SDK_DIR)/edl
CUSTOM_COMMON_PATH := $(RUST_SGX_SDK_DIR)/common
######## EDL Settings ########
Enclave_EDL_Files := enclave/Enclave_t.c enclave/Enclave_t.h app/Enclave_u.c app/Enclave_u.h
######## APP Settings ########
App_Rust_Flags := --release
App_SRC_Files := $(shell find app/ -type f -name '*.rs') $(shell find app/ -type f -name 'Cargo.toml')
App_Include_Paths := -I ./app -I./include -I$(SGX_SDK)/include -I$(CUSTOM_EDL_PATH)
App_C_Flags := $(SGX_COMMON_CFLAGS) -fPIC -Wno-attributes $(App_Include_Paths)
App_Rust_Path := ./app/target/release
App_Enclave_u_Object := lib/libEnclave_u.a
App_Ocall_Object_Name := libvdso_time_ocalls.a
App_Ocall_Object := lib/$(App_Ocall_Object_Name)
App_Name := bin/app
######## Enclave Settings ########
ifneq ($(SGX_MODE), HW)
Trts_Library_Name := sgx_trts_sim
Service_Library_Name := sgx_tservice_sim
else
Trts_Library_Name := sgx_trts
Service_Library_Name := sgx_tservice
endif
Crypto_Library_Name := sgx_tcrypto
KeyExchange_Library_Name := sgx_tkey_exchange
ProtectedFs_Library_Name := sgx_tprotected_fs
RustEnclave_C_Files := $(wildcard ./enclave/*.c)
RustEnclave_C_Objects := $(RustEnclave_C_Files:.c=.o)
RustEnclave_Include_Paths := -I$(CUSTOM_COMMON_PATH)/inc -I$(CUSTOM_EDL_PATH) -I$(SGX_SDK)/include -I$(SGX_SDK)/include/tlibc -I$(SGX_SDK)/include/stlport -I$(SGX_SDK)/include/epid -I ./enclave -I./include
RustEnclave_Link_Libs := -L$(CUSTOM_LIBRARY_PATH) -lenclave
RustEnclave_Compile_Flags := $(SGX_COMMON_CFLAGS) $(ENCLAVE_CFLAGS) $(RustEnclave_Include_Paths)
RustEnclave_Link_Flags := -Wl,--no-undefined -nostdlib -nodefaultlibs -nostartfiles -L$(SGX_LIBRARY_PATH) \
-Wl,--whole-archive -l$(Trts_Library_Name) -Wl,--no-whole-archive \
-Wl,--start-group -lsgx_tstdc -l$(Service_Library_Name) -l$(Crypto_Library_Name) $(RustEnclave_Link_Libs) -Wl,--end-group \
-Wl,--version-script=enclave/Enclave.lds \
$(ENCLAVE_LDFLAGS)
RustEnclave_Name := enclave/enclave.so
Signed_RustEnclave_Name := bin/enclave.signed.so
.PHONY: all
all: $(App_Name) $(Signed_RustEnclave_Name)
######## EDL Objects ########
$(Enclave_EDL_Files): $(SGX_EDGER8R) enclave/Enclave.edl
$(SGX_EDGER8R) --trusted enclave/Enclave.edl --search-path $(OCALLS_DIR) --search-path $(SGX_SDK)/include --search-path $(CUSTOM_EDL_PATH) --trusted-dir enclave
$(SGX_EDGER8R) --untrusted enclave/Enclave.edl --search-path $(OCALLS_DIR) --search-path $(SGX_SDK)/include --search-path $(CUSTOM_EDL_PATH) --untrusted-dir app
@echo "GEN => $(Enclave_EDL_Files)"
######## App Objects ########
app/Enclave_u.o: $(Enclave_EDL_Files)
@$(CC) $(App_C_Flags) -c app/Enclave_u.c -o $@
@echo "CC <= $<"
$(App_Enclave_u_Object): app/Enclave_u.o
$(AR) rcsD $@ $^
$(App_Ocall_Object):
@$(MAKE) -C $(OCALLS_DIR)
@cp $(OCALLS_DIR)/$(App_Ocall_Object_Name) $@
$(App_Name): $(App_Ocall_Object) $(App_Enclave_u_Object) $(App_SRC_Files)
@cd app && SGX_SDK=$(SGX_SDK) cargo build $(App_Rust_Flags)
@echo "Cargo => $@"
mkdir -p bin
cp $(App_Rust_Path)/app ./bin
######## Enclave Objects ########
enclave/Enclave_t.o: $(Enclave_EDL_Files)
@$(CC) $(RustEnclave_Compile_Flags) -c enclave/Enclave_t.c -o $@
@echo "CC <= $<"
$(RustEnclave_Name): enclave enclave/Enclave_t.o
@$(CXX) enclave/Enclave_t.o -o $@ $(RustEnclave_Link_Flags)
@echo "LINK => $@"
$(Signed_RustEnclave_Name): $(RustEnclave_Name)
mkdir -p bin
@$(SGX_ENCLAVE_SIGNER) sign -key enclave/Enclave_private.pem -enclave $(RustEnclave_Name) -out $@ -config enclave/Enclave.config.xml
@echo "SIGN => $@"
.PHONY: enclave
enclave:
$(MAKE) -C ./enclave/
.PHONY: clean
clean:
@rm -f $(App_Name) $(RustEnclave_Name) $(Signed_RustEnclave_Name) enclave/*_t.* app/*_u.* lib/*.a
@cd enclave && cargo clean && rm -f Cargo.lock
@cd app && cargo clean && rm -f Cargo.lock

@ -0,0 +1,11 @@
## example for SGX
This is an example of using vdso-time in SGX.
This example combines vdso-time example of io_uring and hello-rust example of incubator-teaclave-sgx-sdk.
- ./app : untrusted code
- ./bin : executable program
- ./enclave : trusted code
- ./lib : library
### run example in SGX
1. ```make```
2. ```cd bin && ./app```

@ -0,0 +1,10 @@
[package]
name = "app"
version = "1.0.0"
build = "build.rs"
[dependencies]
sgx_types = { path = "../../../../../../../deps/rust-sgx-sdk/sgx_types" }
sgx_urts = { path = "../../../../../../../deps/rust-sgx-sdk/sgx_urts" }
[workspace]

@ -0,0 +1,34 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License..
use std::env;
fn main() {
let sdk_dir = env::var("SGX_SDK").unwrap_or_else(|_| "/opt/sgxsdk".to_string());
let is_sim = env::var("SGX_MODE").unwrap_or_else(|_| "HW".to_string());
println!("cargo:rustc-link-search=native=../lib");
println!("cargo:rustc-link-lib=static=Enclave_u");
println!("cargo:rustc-link-lib=static=vdso_time_ocalls");
println!("cargo:rustc-link-search=native={}/lib64", sdk_dir);
match is_sim.as_ref() {
"SW" => println!("cargo:rustc-link-lib=dylib=sgx_urts_sim"),
"HW" => println!("cargo:rustc-link-lib=dylib=sgx_urts"),
_ => println!("cargo:rustc-link-lib=dylib=sgx_urts"), // Treat undefined as HW
}
}

@ -0,0 +1 @@
nightly-2020-10-25

@ -0,0 +1,78 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License..
extern crate sgx_types;
extern crate sgx_urts;
use sgx_types::*;
use sgx_urts::SgxEnclave;
static ENCLAVE_FILE: &'static str = "enclave.signed.so";
extern "C" {
fn run_sgx_example(eid: sgx_enclave_id_t, retval: *mut sgx_status_t) -> sgx_status_t;
}
fn init_enclave() -> SgxResult<SgxEnclave> {
let mut launch_token: sgx_launch_token_t = [0; 1024];
let mut launch_token_updated: i32 = 0;
// call sgx_create_enclave to initialize an enclave instance
// Debug Support: set 2nd parameter to 1
let debug = 1;
let mut misc_attr = sgx_misc_attribute_t {
secs_attr: sgx_attributes_t { flags: 0, xfrm: 0 },
misc_select: 0,
};
SgxEnclave::create(
ENCLAVE_FILE,
debug,
&mut launch_token,
&mut launch_token_updated,
&mut misc_attr,
)
}
fn main() {
let enclave = match init_enclave() {
Ok(r) => {
println!("[+] Init Enclave Successful {}!", r.geteid());
r
}
Err(x) => {
println!("[-] Init Enclave Failed {}!", x.as_str());
return;
}
};
let mut retval = sgx_status_t::SGX_SUCCESS;
let result = unsafe { run_sgx_example(enclave.geteid(), &mut retval) };
match result {
sgx_status_t::SGX_SUCCESS => {}
_ => {
println!("[-] ECALL Enclave Failed {}!", result.as_str());
return;
}
}
match retval {
sgx_status_t::SGX_SUCCESS => {}
_ => {
println!("[-] ECALL Returned Error {}!", retval.as_str());
return;
}
}
println!("[+] run_sgx_example success...");
enclave.destroy();
}

@ -0,0 +1 @@
bin

@ -0,0 +1,22 @@
[package]
name = "Helloworldsampleenclave"
version = "1.0.0"
[lib]
name = "helloworldsampleenclave"
crate-type = ["staticlib"]
[features]
default = []
[dependencies]
vdso-time = { path = "../../../../vdso-time", default-features = false, features = ["sgx"] }
lazy_static = { version = "1.4.0", features = ["spin_no_std"] }
[target.'cfg(not(target_env = "sgx"))'.dependencies]
sgx_types = { path = "../../../../../../../deps/rust-sgx-sdk/sgx_types" }
sgx_tstd = { path = "../../../../../../../deps/rust-sgx-sdk/sgx_tstd", features = ["backtrace", "thread"] }
sgx_trts = { path = "../../../../../../../deps/rust-sgx-sdk/sgx_trts" }
sgx_libc = { path = "../../../../../../../deps/rust-sgx-sdk/sgx_libc" }
[workspace]

@ -0,0 +1,13 @@
<!-- Please refer to User's Guide for the explanation of each field -->
<EnclaveConfiguration>
<ProdID>0</ProdID>
<ISVSVN>0</ISVSVN>
<StackMaxSize>0x40000</StackMaxSize>
<HeapMaxSize>0x400000</HeapMaxSize>
<TCSNum>1</TCSNum>
<TCSMaxNum>1</TCSMaxNum>
<TCSPolicy>0</TCSPolicy>
<DisableDebug>0</DisableDebug>
<MiscSelect>0</MiscSelect>
<MiscMask>0xFFFFFFFF</MiscMask>
</EnclaveConfiguration>

@ -0,0 +1,36 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
enclave {
from "sgx_tstd.edl" import *;
from "sgx_stdio.edl" import *;
from "sgx_backtrace.edl" import *;
from "sgx_tstdc.edl" import *;
from "sgx_net.edl" import *;
from "sgx_thread.edl" import *;
from "sgx_vdso_time_ocalls.edl" import *;
trusted {
/* define ECALLs here. */
public sgx_status_t run_sgx_example();
};
untrusted {
/* define OCALLs here. */
};
};

@ -0,0 +1,9 @@
enclave.so
{
global:
g_global_data_sim;
g_global_data;
enclave_entry;
local:
*;
};

@ -0,0 +1,39 @@
-----BEGIN RSA PRIVATE KEY-----
MIIG4gIBAAKCAYEAroOogvsj/fZDZY8XFdkl6dJmky0lRvnWMmpeH41Bla6U1qLZ
AmZuyIF+mQC/cgojIsrBMzBxb1kKqzATF4+XwPwgKz7fmiddmHyYz2WDJfAjIveJ
ZjdMjM4+EytGlkkJ52T8V8ds0/L2qKexJ+NBLxkeQLfV8n1mIk7zX7jguwbCG1Pr
nEMdJ3Sew20vnje+RsngAzdPChoJpVsWi/K7cettX/tbnre1DL02GXc5qJoQYk7b
3zkmhz31TgFrd9VVtmUGyFXAysuSAb3EN+5VnHGr0xKkeg8utErea2FNtNIgua8H
ONfm9Eiyaav1SVKzPHlyqLtcdxH3I8Wg7yqMsaprZ1n5A1v/levxnL8+It02KseD
5HqV4rf/cImSlCt3lpRg8U5E1pyFQ2IVEC/XTDMiI3c+AR+w2jSRB3Bwn9zJtFlW
KHG3m1xGI4ck+Lci1JvWWLXQagQSPtZTsubxTQNx1gsgZhgv1JHVZMdbVlAbbRMC
1nSuJNl7KPAS/VfzAgEDAoIBgHRXxaynbVP5gkO0ug6Qw/E27wzIw4SmjsxG6Wpe
K7kfDeRskKxESdsA/xCrKkwGwhcx1iIgS5+Qscd1Yg+1D9X9asd/P7waPmWoZd+Z
AhlKwhdPsO7PiF3e1AzHhGQwsUTt/Y/aSI1MpHBvy2/s1h9mFCslOUxTmWw0oj/Q
ldIEgWeNR72CE2+jFIJIyml6ftnb6qzPiga8Bm48ubKh0kvySOqnkmnPzgh+JBD6
JnBmtZbfPT97bwTT+N6rnPqOOApvfHPf15kWI8yDbprG1l4OCUaIUH1AszxLd826
5IPM+8gINLRDP1MA6azECPjTyHXhtnSIBZCyWSVkc05vYmNXYUNiXWMajcxW9M02
wKzFELO8NCEAkaTPxwo4SCyIjUxiK1LbQ9h8PSy4c1+gGP4LAMR8xqP4QKg6zdu9
osUGG/xRe/uufgTBFkcjqBHtK5L5VI0jeNIUAgW/6iNbYXjBMJ0GfauLs+g1VsOm
WfdgXzsb9DYdMa0OXXHypmV4GwKBwQDUwQj8RKJ6c8cT4vcWCoJvJF00+RFL+P3i
Gx2DLERxRrDa8AVGfqaCjsR+3vLgG8V/py+z+dxZYSqeB80Qeo6PDITcRKoeAYh9
xlT3LJOS+k1cJcEmlbbO2IjLkTmzSwa80fWexKu8/Xv6vv15gpqYl1ngYoqJM3pd
vzmTIOi7MKSZ0WmEQavrZj8zK4endE3v0eAEeQ55j1GImbypSf7Idh7wOXtjZ7WD
Dg6yWDrri+AP/L3gClMj8wsAxMV4ZR8CgcEA0fzDHkFa6raVOxWnObmRoDhAtE0a
cjUj976NM5yyfdf2MrKy4/RhdTiPZ6b08/lBC/+xRfV3xKVGzacm6QjqjZrUpgHC
0LKiZaMtccCJjLtPwQd0jGQEnKfMFaPsnhOc5y8qVkCzVOSthY5qhz0XNotHHFmJ
gffVgB0iqrMTvSL7IA2yqqpOqNRlhaYhNl8TiFP3gIeMtVa9rZy31JPgT2uJ+kfo
gV7sdTPEjPWZd7OshGxWpT6QfVDj/T9T7L6tAoHBAI3WBf2DFvxNL2KXT2QHAZ9t
k3imC4f7U+wSE6zILaDZyzygA4RUbwG0gv8/TJVn2P/Eynf76DuWHGlaiLWnCbSz
Az2DHBQBBaku409zDQym3j1ugMRjzzSQWzJg0SIyBH3hTmnYcn3+Uqcp/lEBvGW6
O+rsXFt3pukqJmIV8HzLGGaLm62BHUeZf3dyWm+i3p/hQAL7Xvu04QW70xuGqdr5
afV7p5eaeQIJXyGQJ0eylV/90+qxjMKiB1XYg6WYvwKBwQCL/ddpgOdHJGN8uRom
e7Zq0Csi3hGheMKlKbN3vcxT5U7MdyHtTZZOJbTvxKNNUNYH/8uD+PqDGNneb29G
BfGzvI3EASyLIcGZF3OhKwZd0jUrWk2y7Vhob91jwp2+t73vdMbkKyI4mHOuXvGv
fg95si9oO7EBT+Oqvhccd2J+F1IVXncccYnF4u5ZGWt5lLewN/pVr7MjjykeaHqN
t+rfnQam2psA6fL4zS2zTmZPzR2tnY8Y1GBTi0Ko1OKd1HMCgcAb5cB/7/AQlhP9
yQa04PLH9ygQkKKptZp7dy5WcWRx0K/hAHRoi2aw1wZqfm7VBNu2SLcs90kCCCxp
6C5sfJi6b8NpNbIPC+sc9wsFr7pGo9SFzQ78UlcWYK2Gu2FxlMjonhka5hvo4zvg
WxlpXKEkaFt3gLd92m/dMqBrHfafH7VwOJY2zT3WIpjwuk0ZzmRg5p0pG/svVQEH
NZmwRwlopysbR69B/n1nefJ84UO50fLh5s5Zr3gBRwbWNZyzhXk=
-----END RSA PRIVATE KEY-----

@ -0,0 +1,38 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
Rust_Enclave_Name := libenclave.a
Rust_Enclave_Files := $(wildcard src/*.rs)
Rust_Target_Path := $(CURDIR)/../../../../../../deps/rust-sgx-sdk/xargo
ifeq ($(MITIGATION-CVE-2020-0551), LOAD)
export MITIGATION_CVE_2020_0551=LOAD
else ifeq ($(MITIGATION-CVE-2020-0551), CF)
export MITIGATION_CVE_2020_0551=CF
endif
.PHONY: all
all: $(Rust_Enclave_Name)
$(Rust_Enclave_Name): $(Rust_Enclave_Files)
ifeq ($(XARGO_SGX), 1)
RUST_TARGET_PATH=$(Rust_Target_Path) xargo build --target x86_64-unknown-linux-sgx --release
cp ./target/x86_64-unknown-linux-sgx/release/libhelloworldsampleenclave.a ../lib/libenclave.a
else
cargo build --release
cp ./target/release/libhelloworldsampleenclave.a ../lib/libenclave.a
endif

@ -0,0 +1 @@
nightly-2020-10-25

@ -0,0 +1,49 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License..
#![crate_name = "helloworldsampleenclave"]
#![crate_type = "staticlib"]
#![cfg_attr(not(target_env = "sgx"), no_std)]
#![cfg_attr(target_env = "sgx", feature(rustc_private))]
extern crate sgx_trts;
extern crate sgx_types;
#[cfg(not(target_env = "sgx"))]
#[macro_use]
extern crate sgx_tstd as std;
extern crate sgx_libc as libc;
extern crate vdso_time;
extern crate lazy_static;
use sgx_types::*;
use std::prelude::v1::*;
include!("../../../common/example.rs");
include!("../../../common/bench.rs");
#[no_mangle]
pub extern "C" fn run_sgx_example() -> sgx_status_t {
// std::backtrace::enable_backtrace("enclave.signed.so", std::backtrace::PrintFormat::Full);
println!("[ECALL] run_sgx_example");
example();
vdso_benchmarks();
sgx_status_t::SGX_SUCCESS
}

@ -0,0 +1,31 @@
{
"arch": "x86_64",
"cpu": "x86-64",
"data-layout": "e-m:e-i64:64-f80:128-n8:16:32:64-S128",
"dynamic-linking": true,
"env": "sgx",
"exe-allocation-crate": "alloc_system",
"executables": true,
"has-elf-tls": true,
"has-rpath": true,
"linker-flavor": "gcc",
"linker-is-gnu": true,
"llvm-target": "x86_64-unknown-linux-gnu",
"max-atomic-width": 64,
"os": "linux",
"position-independent-executables": true,
"pre-link-args": {
"gcc": [
"-Wl,--as-needed",
"-Wl,-z,noexecstack",
"-m64"
]
},
"relro-level": "full",
"stack-probes": true,
"target-c-int-width": "32",
"target-endian": "little",
"target-family": "unix",
"target-pointer-width": "64",
"vendor": "mesalock"
}

@ -0,0 +1 @@
lib

@ -0,0 +1,3 @@
*.o
*.a
*.so

@ -0,0 +1,12 @@
all: libs
libs: libvdso_time_ocalls.a
libvdso_time_ocalls.a: vdso_time_ocalls.o
ar rcs $@ $^
vdso_time_ocalls.o: vdso-time-ocalls.c
gcc -O3 -c -o $@ $<
clean:
rm -f *.o *.a

@ -0,0 +1,14 @@
enclave {
include "time.h"
untrusted {
int vdso_ocall_get_vdso_info(
[out] unsigned long* vdso_addr,
[out, size = release_len] char* release,
int release_len
);
int vdso_ocall_clock_gettime(int clockid, [out] struct timespec* ts);
int vdso_ocall_clock_getres(int clockid, [out] struct timespec* res);
};
};

@ -0,0 +1,30 @@
#include <sys/auxv.h>
#include <sys/utsname.h>
#include <time.h>
#include <string.h>
int vdso_ocall_get_vdso_info(
unsigned long *vdso_addr,
char *release,
int release_len) {
// If AT_SYSINFO_EHDR isn't found, getauxval will return 0.
*vdso_addr = getauxval(AT_SYSINFO_EHDR);
struct utsname buf;
int ret = uname(&buf);
// uname should always succeed here, since uname only fails when buf is not invalid.
if (ret != 0) { return -1; }
strncpy(release, buf.release, release_len);
release[release_len - 1] = '\0';
return 0;
}
int vdso_ocall_clock_gettime(int clockid, struct timespec *tp) {
return clock_gettime(clockid, tp);
}
int vdso_ocall_clock_getres(int clockid, struct timespec *res) {
return clock_getres(clockid, res);
}

@ -0,0 +1,616 @@
#![cfg_attr(feature = "sgx", no_std)]
#[cfg(feature = "sgx")]
extern crate sgx_types;
#[cfg(feature = "sgx")]
#[macro_use]
extern crate sgx_tstd as std;
#[cfg(feature = "sgx")]
extern crate sgx_libc as libc;
#[cfg(feature = "sgx")]
extern crate sgx_trts;
mod sys;
use errno::prelude::*;
use lazy_static::lazy_static;
use log::trace;
use std::convert::TryFrom;
use std::time::Duration;
use std::{hint, str};
use sys::*;
pub const NANOS_PER_SEC: u32 = 1_000_000_000;
pub const NANOS_PER_MILLI: u32 = 1_000_000;
pub const NANOS_PER_MICRO: u32 = 1_000;
pub const MILLIS_PER_SEC: u64 = 1_000;
pub const MICROS_PER_SEC: u64 = 1_000_000;
/// Clocks supported by the linux kernel, corresponding to clockid_t in Linux.
#[derive(Debug, Eq, PartialEq, Copy, Clone)]
#[allow(non_camel_case_types)]
pub enum ClockId {
CLOCK_REALTIME = 0,
CLOCK_MONOTONIC = 1,
// vDSO doesn't support CLOCK_PROCESS_CPUTIME_ID.
CLOCK_PROCESS_CPUTIME_ID = 2,
// vDSO doesn't support CLOCK_THREAD_CPUTIME_ID.
CLOCK_THREAD_CPUTIME_ID = 3,
CLOCK_MONOTONIC_RAW = 4,
CLOCK_REALTIME_COARSE = 5,
CLOCK_MONOTONIC_COARSE = 6,
CLOCK_BOOTTIME = 7,
}
impl TryFrom<i32> for ClockId {
type Error = Error;
fn try_from(clockid: i32) -> Result<Self> {
Ok(match clockid {
0 => ClockId::CLOCK_REALTIME,
1 => ClockId::CLOCK_MONOTONIC,
2 => ClockId::CLOCK_PROCESS_CPUTIME_ID,
3 => ClockId::CLOCK_THREAD_CPUTIME_ID,
4 => ClockId::CLOCK_MONOTONIC_RAW,
5 => ClockId::CLOCK_REALTIME_COARSE,
6 => ClockId::CLOCK_MONOTONIC_COARSE,
7 => ClockId::CLOCK_BOOTTIME,
_ => return_errno!(EINVAL, "Unsupported clockid"),
})
}
}
/// An abstraction of Linux vDSO provides the clock and time interface through Linux vDSO.
pub struct Vdso {
vdso_data_ptr: VdsoDataPtr,
// hres resolution for clock_getres
hres_resolution: Option<Duration>,
// coarse resolution for clock_getres
coarse_resolution: Option<Duration>,
}
impl Vdso {
/// Try to create a new Vdso by libc or SGX OCALL.
///
/// # Examples
///
/// ```
/// use vdso_time::Vdso;
/// let vdso = Vdso::new().unwrap();
/// ```
pub fn new() -> Result<Self> {
let vdso_data_ptr = Self::get_vdso_data_ptr_from_host()?;
let hres_resolution = clock_getres_slow(ClockId::CLOCK_MONOTONIC).ok();
let coarse_resolution = clock_getres_slow(ClockId::CLOCK_MONOTONIC_COARSE).ok();
let vdso = Self {
vdso_data_ptr,
hres_resolution,
coarse_resolution,
};
vdso.check_accuracy()?;
Ok(vdso)
}
#[cfg(feature = "sgx")]
fn get_vdso_data_ptr_from_host() -> Result<VdsoDataPtr> {
extern "C" {
fn vdso_ocall_get_vdso_info(
ret: *mut libc::c_int,
vdso_addr: *mut libc::c_ulong,
release: *mut libc::c_char,
release_len: libc::c_int,
) -> sgx_types::sgx_status_t;
}
let mut vdso_addr: libc::c_ulong = 0;
let mut release = [0 as libc::c_char; 65];
let mut ret: libc::c_int = 0;
unsafe {
vdso_ocall_get_vdso_info(
&mut ret as *mut _,
&mut vdso_addr as *mut _,
release.as_mut_ptr(),
release.len() as _,
);
}
if ret != 0 {
return_errno!(EINVAL, "Vdso vdso_ocall_get_vdso_info() failed")
}
Self::match_kernel_version(vdso_addr, &release)
}
#[cfg(not(feature = "sgx"))]
fn get_vdso_data_ptr_from_host() -> Result<VdsoDataPtr> {
const AT_SYSINFO_EHDR: u64 = 33;
let vdso_addr = unsafe { libc::getauxval(AT_SYSINFO_EHDR) };
let mut utsname: libc::utsname = unsafe { std::mem::zeroed() };
let ret = unsafe { libc::uname(&mut utsname as *mut _) };
if ret != 0 {
return_errno!(EINVAL, "Vdso get utsname failed");
}
let release = utsname.release;
Self::match_kernel_version(vdso_addr, &release)
}
fn check_vdso_addr(vdso_addr: &u64) -> Result<()> {
let vdso_addr = *vdso_addr;
if vdso_addr == 0 {
return_errno!(EFAULT, "Vdso vdso_addr is 0")
}
const VDSO_DATA_MAX_SIZE: u64 = 4 * PAGE_SIZE;
if vdso_addr < VDSO_DATA_MAX_SIZE {
return_errno!(EFAULT, "Vdso vdso_addr is less than vdso data size");
}
#[cfg(feature = "sgx")]
if !sgx_trts::trts::rsgx_raw_is_outside_enclave(
(vdso_addr - VDSO_DATA_MAX_SIZE) as *const u8,
VDSO_DATA_MAX_SIZE as _,
) {
return_errno!(EFAULT, "Vdso vdso_addr we got is not outside enclave")
}
Ok(())
}
fn match_kernel_version(vdso_addr: u64, release: &[libc::c_char]) -> Result<VdsoDataPtr> {
Self::check_vdso_addr(&vdso_addr)?;
// release, e.g., "5.9.6-050906-generic"
let release = unsafe { &*(release as *const [i8] as *const [u8]) };
let release = str::from_utf8(release);
if release.is_err() {
return_errno!(EINVAL, "Vdso get kernel release failed")
}
let mut release = release.unwrap().split(&['-', '.', ' '][..]);
let version_big: u8 = release
.next()
.ok_or(errno!(EINVAL, "Vdso get kernel big version failed"))?
.parse()?;
let version_little: u8 = release
.next()
.ok_or(errno!(EINVAL, "Vdso get kernel little version failed"))?
.parse()?;
Ok(match (version_big, version_little) {
(4, 0..=4) | (4, 7..=11) => VdsoDataPtr::V4_0(vdso_data_v4_0::vdsodata_ptr(vdso_addr)),
(4, 5..=6) | (4, 12..=19) => VdsoDataPtr::V4_5(vdso_data_v4_5::vdsodata_ptr(vdso_addr)),
(5, 0..=2) => VdsoDataPtr::V5_0(vdso_data_v5_0::vdsodata_ptr(vdso_addr)),
(5, 3..=5) => VdsoDataPtr::V5_3(vdso_data_v5_3::vdsodata_ptr(vdso_addr)),
(5, 6..=8) => VdsoDataPtr::V5_6(vdso_data_v5_6::vdsodata_ptr(vdso_addr)),
(5, 9..=19) | (6, 0..=2) => VdsoDataPtr::V5_9(vdso_data_v5_9::vdsodata_ptr(vdso_addr)),
(_, _) => return_errno!(EINVAL, "Vdso match kernel release failed"),
})
}
/// Compare the results of Linux syscall and vdso to check whether vdso can support the clockid correctly.
fn check_accuracy(&self) -> Result<()> {
let vdso_supported_clockids = [
ClockId::CLOCK_REALTIME,
ClockId::CLOCK_MONOTONIC,
ClockId::CLOCK_MONOTONIC_RAW,
ClockId::CLOCK_REALTIME_COARSE,
ClockId::CLOCK_MONOTONIC_COARSE,
ClockId::CLOCK_BOOTTIME,
];
const MAX_INACCURACY: Duration = Duration::from_millis(1);
const MAX_RETRY_NUM: u32 = 3;
for &clockid in vdso_supported_clockids.iter() {
for retry_num in 0..MAX_RETRY_NUM {
let time = match self.do_clock_gettime(clockid) {
Ok(time) => time,
Err(_) => break,
};
let host_time = match clock_gettime_slow(clockid) {
Ok(host_time) => host_time,
Err(_) => break,
};
let estimated_inaccuracy = match host_time.checked_sub(time) {
Some(diff) => diff,
None => return_errno!(EOPNOTSUPP, "Vdso can not provide valid time"),
};
if estimated_inaccuracy > MAX_INACCURACY {
if retry_num == MAX_RETRY_NUM - 1 {
return_errno!(EOPNOTSUPP, "Vdso reached max retry number");
}
continue;
}
break;
}
}
Ok(())
}
/// Try to get time according to ClockId.
/// Firstly try to get time through vDSO, if failed, then try fallback.
///
/// # Examples
///
/// ```
/// use vdso_time::{Vdso, ClockId};
/// let vdso = Vdso::new().unwrap();
/// let time = vdso.clock_gettime(ClockId::CLOCK_MONOTONIC).unwrap();
/// println!("{:?}", time);
/// ```
pub fn clock_gettime(&self, clockid: ClockId) -> Result<Duration> {
self.do_clock_gettime(clockid)
.or_else(|_| clock_gettime_slow(clockid))
}
/// Try to get time resolution according to ClockId.
/// Firstly try to return resolution inside self, if failed, then try fallback.
///
/// # Examples
///
/// ```
/// use vdso_time::{Vdso, ClockId};
/// let vdso = Vdso::new().unwrap();
/// let res = vdso.clock_getres(ClockId::CLOCK_MONOTONIC).unwrap();
/// println!("{:?}", res);
/// ```
pub fn clock_getres(&self, clockid: ClockId) -> Result<Duration> {
self.do_clock_getres(clockid)
.or_else(|_| clock_getres_slow(clockid))
}
fn do_clock_gettime(&self, clockid: ClockId) -> Result<Duration> {
match clockid {
ClockId::CLOCK_REALTIME | ClockId::CLOCK_MONOTONIC | ClockId::CLOCK_BOOTTIME => {
self.do_hres(ClockSource::CS_HRES_COARSE, clockid)
}
ClockId::CLOCK_MONOTONIC_RAW => self.do_hres(ClockSource::CS_RAW, clockid),
ClockId::CLOCK_REALTIME_COARSE | ClockId::CLOCK_MONOTONIC_COARSE => {
self.do_coarse(ClockSource::CS_HRES_COARSE, clockid)
}
// TODO: support CLOCK_PROCESS_CPUTIME_ID and CLOCK_THREAD_CPUTIME_ID.
_ => return_errno!(EINVAL, "Unsupported clockid in do_clock_gettime()"),
}
}
fn do_clock_getres(&self, clockid: ClockId) -> Result<Duration> {
match clockid {
ClockId::CLOCK_REALTIME
| ClockId::CLOCK_MONOTONIC
| ClockId::CLOCK_BOOTTIME
| ClockId::CLOCK_MONOTONIC_RAW => self
.hres_resolution
.ok_or(errno!(EOPNOTSUPP, "hres_resolution is none")),
ClockId::CLOCK_REALTIME_COARSE | ClockId::CLOCK_MONOTONIC_COARSE => self
.coarse_resolution
.ok_or(errno!(EOPNOTSUPP, "coarse_resolution is none")),
// TODO: support CLOCK_PROCESS_CPUTIME_ID and CLOCK_THREAD_CPUTIME_ID.
_ => return_errno!(EINVAL, "Unsupported clockid in do_clock_getres()"),
}
}
fn vdso_data(&self, cs: ClockSource) -> &'static dyn VdsoData {
match self.vdso_data_ptr {
VdsoDataPtr::V4_0(ptr) => unsafe { &*(ptr) },
VdsoDataPtr::V4_5(ptr) => unsafe { &*(ptr) },
VdsoDataPtr::V5_0(ptr) => unsafe { &*(ptr) },
VdsoDataPtr::V5_3(ptr) => unsafe { &*(ptr.add(cs as _)) },
VdsoDataPtr::V5_6(ptr) => unsafe { &*(ptr.add(cs as _)) },
VdsoDataPtr::V5_9(ptr) => unsafe { &*(ptr.add(cs as _)) },
}
}
fn do_hres(&self, cs: ClockSource, clockid: ClockId) -> Result<Duration> {
let vdso_data = self.vdso_data(cs);
loop {
let seq = vdso_data.seq();
// if seq is odd, it might means that a concurrent update is in progress.
// Hence, we do some instructions to spin waiting for seq to become even again.
if seq & 1 != 0 {
hint::spin_loop();
continue;
}
// Make sure that all prior load-from-memory instructions have completed locally,
// and no later instruction begins execution until LFENCE completes.
// We want to make sure the execution order as followning:
// seq -> [cycles, cycle_last, mult, shift, sec, secs] -> seq
// This LFENCE can ensure that the first seq is before [cycles, cycle_last, mult, shift, sec, secs]
lfence();
// Get hardware counter according to vdso_data's clock_mode.
let cycles = Self::get_hw_counter(vdso_data)?;
let cycle_last = vdso_data.cycle_last();
let mult = vdso_data.mult();
let shift = vdso_data.shift();
let secs = vdso_data.sec(clockid as _)?;
let mut nanos = vdso_data.nsec(clockid as _)?;
if !Self::vdso_read_retry(vdso_data, seq) {
// On x86 arch, the TSC can be slightly off across sockets,
// which might cause cycles < cycle_last. Since they are u64 type,
// cycles - cycle_last will panic in this case.
// Hence we need to verify that cycles is greater than cycle_last.
// If not then just use cycle_last, which is the base time of the
// current conversion period.
// And the vdso mask is always u64_MAX on x86, we don't need use mask.
if cycles > cycle_last {
nanos += (cycles - cycle_last) * mult as u64
}
nanos = nanos >> shift;
return Ok(Duration::new(secs, nanos as u32));
}
}
}
fn do_coarse(&self, cs: ClockSource, clockid: ClockId) -> Result<Duration> {
let vdso_data = self.vdso_data(cs);
loop {
let seq = vdso_data.seq();
// see comments in do_hres
if seq & 1 != 0 {
hint::spin_loop();
continue;
}
// see comments in do_hres
lfence();
let secs = vdso_data.sec(clockid as _)?;
let nanos = vdso_data.nsec(clockid as _)?;
if !Self::vdso_read_retry(vdso_data, seq) {
return Ok(Duration::new(secs, nanos as u32));
}
}
}
fn vdso_read_retry(vdso_data: &dyn VdsoData, old_seq: u32) -> bool {
// Make sure that all prior load-from-memory instructions have completed locally,
// and no later instruction begins execution until LFENCE completes
lfence();
old_seq != vdso_data.seq()
}
fn get_hw_counter(vdso_data: &dyn VdsoData) -> Result<u64> {
let clock_mode = vdso_data.clock_mode();
if clock_mode == VdsoClockMode::VDSO_CLOCKMODE_TSC as i32 {
return Ok(rdtsc_ordered());
} else if clock_mode == VdsoClockMode::VDSO_CLOCKMODE_PVCLOCK as i32 {
// TODO: support pvclock
return_errno!(
EOPNOTSUPP,
"VDSO_CLOCKMODE_PVCLOCK support is not implemented"
);
} else if clock_mode == VdsoClockMode::VDSO_CLOCKMODE_HVCLOCK as i32 {
// TODO: support hvclock
return_errno!(
EOPNOTSUPP,
"VDSO_CLOCKMODE_HVCLOCK support is not implemented"
);
} else if clock_mode == VdsoClockMode::VDSO_CLOCKMODE_TIMENS as i32 {
// TODO: support timens
return_errno!(
EOPNOTSUPP,
"VDSO_CLOCKMODE_TIMENS support is not implemented"
);
} else if clock_mode == VdsoClockMode::VDSO_CLOCKMODE_NONE as i32 {
// In x86 Linux, the clock_mode will never be VDSO_CLOCKMODE_NONE.
return_errno!(EINVAL, "The clock_mode must not be VDSO_CLOCKMODE_NONE");
}
return_errno!(EINVAL, "Unsupported clock_mode");
}
}
unsafe impl Sync for Vdso {}
unsafe impl Send for Vdso {}
lazy_static! {
static ref VDSO: Option<Vdso> = Vdso::new().ok();
}
/// Try to get time according to ClockId.
/// Firstly try to get time through vDSO, if failed, then try fallback.
///
/// # Examples
///
/// ```
/// use vdso_time::ClockId;
///
/// let time = vdso_time::clock_gettime(ClockId::CLOCK_MONOTONIC).unwrap();
/// println!("{:?}", time);
/// ```
pub fn clock_gettime(clockid: ClockId) -> Result<Duration> {
if VDSO.is_none() {
clock_gettime_slow(clockid)
} else {
VDSO.as_ref().unwrap().clock_gettime(clockid)
}
}
/// Try to get time resolution according to ClockId.
/// Firstly try to get time through vDSO, if failed, then try fallback.
///
/// # Examples
///
/// ```
/// use vdso_time::ClockId;
///
/// let time = vdso_time::clock_getres(ClockId::CLOCK_MONOTONIC).unwrap();
/// println!("{:?}", time);
/// ```
pub fn clock_getres(clockid: ClockId) -> Result<Duration> {
if VDSO.is_none() {
clock_getres_slow(clockid)
} else {
VDSO.as_ref().unwrap().clock_getres(clockid)
}
}
pub fn clock_gettime_slow(clockid: ClockId) -> Result<Duration> {
let mut ts = libc::timespec {
tv_sec: 0,
tv_nsec: 0,
};
cfg_if::cfg_if! {
if #[cfg(feature = "sgx")] {
extern "C" {
fn vdso_ocall_clock_gettime(
ret: *mut libc::c_int,
clockid: libc::c_int,
ts: *mut libc::timespec,
) -> sgx_types::sgx_status_t;
}
let mut ret: libc::c_int = 0;
unsafe {
vdso_ocall_clock_gettime(&mut ret as *mut _, clockid as _, &mut ts as *mut _);
}
} else {
let ret = unsafe { libc::clock_gettime(clockid as _, &mut ts as *mut _) };
}
}
if ret == 0 {
Ok(Duration::new(ts.tv_sec as u64, ts.tv_nsec as u32))
} else {
return_errno!(EINVAL, "clock_gettime_slow failed")
}
}
pub fn clock_getres_slow(clockid: ClockId) -> Result<Duration> {
let mut res = libc::timespec {
tv_sec: 0,
tv_nsec: 0,
};
cfg_if::cfg_if! {
if #[cfg(feature = "sgx")] {
extern "C" {
fn vdso_ocall_clock_getres(
ret: *mut libc::c_int,
clockid: libc::c_int,
res: *mut libc::timespec,
) -> sgx_types::sgx_status_t;
}
let mut ret: libc::c_int = 0;
unsafe {
vdso_ocall_clock_getres(&mut ret as *mut _, clockid as _, &mut res as *mut _);
}
} else {
let ret = unsafe { libc::clock_getres(clockid as _, &mut res as *mut _) };
}
}
if ret == 0 {
Ok(Duration::new(res.tv_sec as u64, res.tv_nsec as u32))
} else {
return_errno!(EINVAL, "clock_getres_slow failed")
}
}
// All unit tests
#[cfg(test)]
#[allow(deprecated)]
mod tests {
use super::*;
use std::thread;
const LOOPS: usize = 3;
const SLEEP_DURATION: u64 = 10;
const HRES_MAX_DIFF_NANOS: u64 = 50_000;
const COARSE_MAX_DIFF_NANOS: u64 = 4_000_000;
#[test]
fn test_clock_gettime() {
test_single_clock_gettime(ClockId::CLOCK_REALTIME_COARSE, COARSE_MAX_DIFF_NANOS);
test_single_clock_gettime(ClockId::CLOCK_MONOTONIC_COARSE, COARSE_MAX_DIFF_NANOS);
test_single_clock_gettime(ClockId::CLOCK_REALTIME, HRES_MAX_DIFF_NANOS);
test_single_clock_gettime(ClockId::CLOCK_MONOTONIC, HRES_MAX_DIFF_NANOS);
test_single_clock_gettime(ClockId::CLOCK_BOOTTIME, HRES_MAX_DIFF_NANOS);
test_single_clock_gettime(ClockId::CLOCK_MONOTONIC_RAW, HRES_MAX_DIFF_NANOS);
}
fn test_single_clock_gettime(clockid: ClockId, max_diff_nanos: u64) {
for _ in 0..LOOPS {
let mut libc_tp = libc::timespec {
tv_sec: 0,
tv_nsec: 0,
};
unsafe { libc::clock_gettime(clockid as _, &mut libc_tp as *mut _) };
let libc_time = Duration::new(libc_tp.tv_sec as u64, libc_tp.tv_nsec as u32);
let vdso_time = clock_gettime(clockid).unwrap();
assert!(vdso_time - libc_time <= Duration::from_nanos(max_diff_nanos));
thread::sleep(Duration::from_millis(SLEEP_DURATION));
}
}
#[test]
fn test_clock_getres() {
test_single_clock_getres(ClockId::CLOCK_REALTIME_COARSE);
test_single_clock_getres(ClockId::CLOCK_MONOTONIC_COARSE);
test_single_clock_getres(ClockId::CLOCK_REALTIME);
test_single_clock_getres(ClockId::CLOCK_MONOTONIC);
test_single_clock_getres(ClockId::CLOCK_BOOTTIME);
test_single_clock_getres(ClockId::CLOCK_MONOTONIC_RAW);
}
fn test_single_clock_getres(clockid: ClockId) {
for _ in 0..LOOPS {
let mut libc_tp = libc::timespec {
tv_sec: 0,
tv_nsec: 0,
};
unsafe { libc::clock_getres(clockid as _, &mut libc_tp as *mut _) };
let res = clock_getres(clockid).unwrap();
assert_eq!(res.as_secs(), libc_tp.tv_sec as u64);
assert_eq!(res.subsec_nanos(), libc_tp.tv_nsec as u32);
}
}
#[test]
fn test_monotonic() {
let mut last_now = Duration::new(0, 0);
for _ in 0..1_000_000 {
let now = clock_gettime(ClockId::CLOCK_MONOTONIC).unwrap();
assert!(now >= last_now);
last_now = now;
}
}
mod logger {
use log::{Level, LevelFilter, Metadata, Record};
#[ctor::ctor]
fn auto_init() {
log::set_logger(&LOGGER)
.map(|()| log::set_max_level(LevelFilter::Trace))
.expect("failed to init the logger");
}
static LOGGER: SimpleLogger = SimpleLogger;
struct SimpleLogger;
impl log::Log for SimpleLogger {
fn enabled(&self, metadata: &Metadata) -> bool {
metadata.level() <= Level::Trace
}
fn log(&self, record: &Record) {
if self.enabled(record.metadata()) {
println!("[{}] {}", record.level(), record.args());
}
}
fn flush(&self) {}
}
}
}

@ -0,0 +1,595 @@
use super::*;
use std::sync::atomic::{AtomicU32, Ordering};
pub const PAGE_SIZE: u64 = 4096;
pub const CLOCK_TAI: usize = 11;
pub const VDSO_BASES: usize = CLOCK_TAI + 1;
#[cfg(not(any(arget_arch = "x86", target_arch = "x86_64")))]
compile_error!("Only support x86 or x86_64 architecture now.");
/// Reads the current value of the processors time-stamp counter.
///
/// The processor monotonically increments the time-stamp counter MSR every clock cycle
/// and resets it to 0 whenever the processor is reset.
///
/// The RDTSC instruction is not a serializing instruction. It does not necessarily
/// wait until all previous instructions have been executed before reading the counter.
/// Similarly, subsequent instructions may begin execution before the read operation is performed.
pub fn rdtsc() -> u64 {
cfg_if::cfg_if! {
if #[cfg(target_arch = "x86_64")] {
unsafe { core::arch::x86_64::_rdtsc() as u64 }
} else if #[cfg(target_arch = "x86")] {
unsafe { core::arch::x86::_rdtsc() as u64 }
}
}
}
/// Reads the current value of the processors time-stamp counter.
///
/// The processor monotonically increments the time-stamp counter MSR every clock cycle
/// and resets it to 0 whenever the processor is reset.
/// The RDTSCP instruction waits until all previous instructions have been executed before
/// reading the counter. However, subsequent instructions may begin execution before
/// the read operation is performed.
#[allow(dead_code)]
pub fn rdtscp() -> u64 {
cfg_if::cfg_if! {
if #[cfg(target_arch = "x86_64")] {
let mut aux: u32 = 0;
unsafe { core::arch::x86_64::__rdtscp(&mut aux) as u64 }
} else if #[cfg(target_arch = "x86")] {
let mut aux: u32 = 0;
unsafe { core::arch::x86::__rdtscp(&mut aux) as u64 }
}
}
}
/// Performs a serializing operation on all load-from-memory instructions
/// that were issued prior to this instruction.
///
/// Guarantees that every load instruction that precedes, in program order,
/// is globally visible before any load instruction which follows the fence in program order.
pub fn lfence() {
cfg_if::cfg_if! {
if #[cfg(target_arch = "x86_64")] {
unsafe { core::arch::x86_64::_mm_lfence() }
} else if #[cfg(target_arch = "x86")] {
unsafe { core::arch::x86::_mm_lfence() }
}
}
}
/// Read the current TSC in program order.
///
/// The RDTSC instruction might not be ordered relative to memory access.
/// But an RDTSC immediately after an appropriate barrier appears to be ordered as a normal load.
/// Hence, we could use a barrier before RDTSC to get ordered TSC.
///
/// We also can just use RDTSCP, which is also ordered.
pub fn rdtsc_ordered() -> u64 {
lfence();
rdtsc()
}
/// The timers is divided in 3 sets (HRES, COARSE, RAW) since Linux v5.3.
/// CS_HRES_COARSE refers to the first two and CS_RAW to the third.
#[derive(Debug, Copy, Clone)]
#[allow(non_camel_case_types)]
pub enum ClockSource {
CS_HRES_COARSE = 0,
CS_RAW = 1,
}
#[derive(Debug, Copy, Clone)]
#[allow(non_camel_case_types)]
pub enum VdsoClockMode {
VDSO_CLOCKMODE_NONE = 0,
VDSO_CLOCKMODE_TSC = 1,
VDSO_CLOCKMODE_PVCLOCK = 2,
VDSO_CLOCKMODE_HVCLOCK = 3,
VDSO_CLOCKMODE_TIMENS = i32::MAX as isize,
}
// Struct VdsoDataPtr must impl this trait to unify vdso_data interface of different linux verisons.
pub trait VdsoData {
fn sec(&self, clockid: ClockId) -> Result<u64>;
fn nsec(&self, clockid: ClockId) -> Result<u64>;
fn seq(&self) -> u32;
fn clock_mode(&self) -> i32;
fn cycle_last(&self) -> u64;
fn mask(&self) -> u64;
fn mult(&self) -> u32;
fn shift(&self) -> u32;
fn tz_minuteswest(&self) -> i32;
fn tz_dsttime(&self) -> i32;
fn vdsodata_ptr(vdso_addr: u64) -> *const Self
where
Self: Sized;
}
pub enum VdsoDataPtr {
// === Linux 4.0 - 4.4, 4.7 - 4.11 ===
V4_0(*const vdso_data_v4_0),
// === Linux 4.5 - 4.6, 4.12 - 4.19 ===
V4_5(*const vdso_data_v4_5),
// === Linux 5.0 - 5.2 ===
V5_0(*const vdso_data_v5_0),
// === Linux 5.3 - 5.5 ===
V5_3(*const vdso_data_v5_3),
// === Linux 5.6 - 5.8 ===
V5_6(*const vdso_data_v5_6),
// === Linux 5.9 - 6.2 ===
V5_9(*const vdso_data_v5_9),
}
// === Linux 4.0 - 4.4, 4.7 - 4.11 ===
// struct vsyscall_gtod_data
#[repr(C)]
pub struct vdso_data_v4_0 {
pub seq: AtomicU32,
pub vclock_mode: i32,
pub cycle_last: u64,
pub mask: u64,
pub mult: u32,
pub shift: u32,
pub wall_time_snsec: u64,
pub wall_time_sec: u64,
pub monotonic_time_sec: u64,
pub monotonic_time_snsec: u64,
pub wall_time_coarse_sec: u64,
pub wall_time_coarse_nsec: u64,
pub monotonic_time_coarse_sec: u64,
pub monotonic_time_coarse_nsec: u64,
pub tz_minuteswest: i32,
pub tz_dsttime: i32,
}
impl VdsoData for vdso_data_v4_0 {
fn vdsodata_ptr(vdso_addr: u64) -> *const Self {
(vdso_addr - 2 * PAGE_SIZE + 128) as *const Self
}
fn sec(&self, clockid: ClockId) -> Result<u64> {
match clockid {
ClockId::CLOCK_REALTIME => Ok(self.wall_time_sec),
ClockId::CLOCK_MONOTONIC => Ok(self.monotonic_time_sec),
ClockId::CLOCK_REALTIME_COARSE => Ok(self.wall_time_coarse_sec),
ClockId::CLOCK_MONOTONIC_COARSE => Ok(self.monotonic_time_coarse_sec),
_ => return_errno!(EINVAL, "Unsupported clockid in sec()"),
}
}
fn nsec(&self, clockid: ClockId) -> Result<u64> {
match clockid {
ClockId::CLOCK_REALTIME => Ok(self.wall_time_snsec),
ClockId::CLOCK_MONOTONIC => Ok(self.monotonic_time_snsec),
ClockId::CLOCK_REALTIME_COARSE => Ok(self.wall_time_coarse_nsec),
ClockId::CLOCK_MONOTONIC_COARSE => Ok(self.monotonic_time_coarse_nsec),
_ => return_errno!(EINVAL, "Unsupported clockid in nsec()"),
}
}
fn seq(&self) -> u32 {
self.seq.load(Ordering::Relaxed)
}
fn clock_mode(&self) -> i32 {
self.vclock_mode
}
fn cycle_last(&self) -> u64 {
self.cycle_last
}
fn mask(&self) -> u64 {
self.mask
}
fn mult(&self) -> u32 {
self.mult
}
fn shift(&self) -> u32 {
self.shift
}
fn tz_minuteswest(&self) -> i32 {
self.tz_minuteswest
}
fn tz_dsttime(&self) -> i32 {
self.tz_dsttime
}
}
// === Linux 4.5 - 4.6, 4.12 - 4.19 ===
// struct vsyscall_gtod_data
#[repr(C)]
pub struct vdso_data_v4_5 {
pub seq: AtomicU32,
pub vclock_mode: i32,
pub cycle_last: u64,
pub mask: u64,
pub mult: u32,
pub shift: u32,
pub wall_time_snsec: u64,
pub wall_time_sec: u64,
pub monotonic_time_sec: u64,
pub monotonic_time_snsec: u64,
pub wall_time_coarse_sec: u64,
pub wall_time_coarse_nsec: u64,
pub monotonic_time_coarse_sec: u64,
pub monotonic_time_coarse_nsec: u64,
pub tz_minuteswest: i32,
pub tz_dsttime: i32,
}
impl VdsoData for vdso_data_v4_5 {
fn vdsodata_ptr(vdso_addr: u64) -> *const Self {
(vdso_addr - 3 * PAGE_SIZE + 128) as *const Self
}
fn sec(&self, clockid: ClockId) -> Result<u64> {
match clockid {
ClockId::CLOCK_REALTIME => Ok(self.wall_time_sec),
ClockId::CLOCK_MONOTONIC => Ok(self.monotonic_time_sec),
ClockId::CLOCK_REALTIME_COARSE => Ok(self.wall_time_coarse_sec),
ClockId::CLOCK_MONOTONIC_COARSE => Ok(self.monotonic_time_coarse_sec),
_ => return_errno!(EINVAL, "Unsupported clockid in sec()"),
}
}
fn nsec(&self, clockid: ClockId) -> Result<u64> {
match clockid {
ClockId::CLOCK_REALTIME => Ok(self.wall_time_snsec),
ClockId::CLOCK_MONOTONIC => Ok(self.monotonic_time_snsec),
ClockId::CLOCK_REALTIME_COARSE => Ok(self.wall_time_coarse_nsec),
ClockId::CLOCK_MONOTONIC_COARSE => Ok(self.monotonic_time_coarse_nsec),
_ => return_errno!(EINVAL, "Unsupported clockid in nsec()"),
}
}
fn seq(&self) -> u32 {
self.seq.load(Ordering::Relaxed)
}
fn clock_mode(&self) -> i32 {
self.vclock_mode
}
fn cycle_last(&self) -> u64 {
self.cycle_last
}
fn mask(&self) -> u64 {
self.mask
}
fn mult(&self) -> u32 {
self.mult
}
fn shift(&self) -> u32 {
self.shift
}
fn tz_minuteswest(&self) -> i32 {
self.tz_minuteswest
}
fn tz_dsttime(&self) -> i32 {
self.tz_dsttime
}
}
// === Linux 5.0 - 5.2 ===
// struct vsyscall_gtod_data
#[repr(C)]
pub struct vdso_data_v5_0 {
pub seq: AtomicU32,
pub vclock_mode: i32,
pub cycle_last: u64,
pub mask: u64,
pub mult: u32,
pub shift: u32,
pub basetime: [vgtod_ts; VDSO_BASES],
pub tz_minuteswest: i32,
pub tz_dsttime: i32,
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone)]
pub struct vgtod_ts {
pub sec: u64,
pub nsec: u64,
}
impl VdsoData for vdso_data_v5_0 {
fn vdsodata_ptr(vdso_addr: u64) -> *const Self {
(vdso_addr - 3 * PAGE_SIZE + 128) as *const Self
}
fn sec(&self, clockid: ClockId) -> Result<u64> {
Ok(self.basetime[clockid as usize].sec)
}
fn nsec(&self, clockid: ClockId) -> Result<u64> {
Ok(self.basetime[clockid as usize].nsec)
}
fn seq(&self) -> u32 {
self.seq.load(Ordering::Relaxed)
}
fn clock_mode(&self) -> i32 {
self.vclock_mode
}
fn cycle_last(&self) -> u64 {
self.cycle_last
}
fn mask(&self) -> u64 {
self.mask
}
fn mult(&self) -> u32 {
self.mult
}
fn shift(&self) -> u32 {
self.shift
}
fn tz_minuteswest(&self) -> i32 {
self.tz_minuteswest
}
fn tz_dsttime(&self) -> i32 {
self.tz_dsttime
}
}
// === Linux 5.3 - 5.5 ===
// struct vdso_data
#[repr(C)]
pub struct vdso_data_v5_3 {
pub seq: AtomicU32,
pub clock_mode: i32,
pub cycle_last: u64,
pub mask: u64,
pub mult: u32,
pub shift: u32,
pub basetime: [vdso_timestamp; VDSO_BASES],
pub tz_minuteswest: i32,
pub tz_dsttime: i32,
pub hrtimer_res: u32,
pub __unused: u32,
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone)]
pub struct vdso_timestamp {
pub sec: u64,
pub nsec: u64,
}
impl VdsoData for vdso_data_v5_3 {
fn vdsodata_ptr(vdso_addr: u64) -> *const Self {
(vdso_addr - 3 * PAGE_SIZE + 128) as *const Self
}
fn sec(&self, clockid: ClockId) -> Result<u64> {
Ok(self.basetime[clockid as usize].sec)
}
fn nsec(&self, clockid: ClockId) -> Result<u64> {
Ok(self.basetime[clockid as usize].nsec)
}
fn seq(&self) -> u32 {
self.seq.load(Ordering::Relaxed)
}
fn clock_mode(&self) -> i32 {
self.clock_mode
}
fn cycle_last(&self) -> u64 {
self.cycle_last
}
fn mask(&self) -> u64 {
self.mask
}
fn mult(&self) -> u32 {
self.mult
}
fn shift(&self) -> u32 {
self.shift
}
fn tz_minuteswest(&self) -> i32 {
self.tz_minuteswest
}
fn tz_dsttime(&self) -> i32 {
self.tz_dsttime
}
}
// === Linux 5.6 - 5.8 ===
// struct vdso_data
#[repr(C)]
pub struct vdso_data_v5_6 {
pub seq: AtomicU32,
pub clock_mode: i32,
pub cycle_last: u64,
pub mask: u64,
pub mult: u32,
pub shift: u32,
pub union_1: vdso_data_v5_6_union_1,
pub tz_minuteswest: i32,
pub tz_dsttime: i32,
pub hrtimer_res: u32,
pub __unused: u32,
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone)]
pub struct timens_offset {
pub sec: i64,
pub nsec: u64,
}
#[repr(C)]
#[derive(Copy, Clone)]
pub union vdso_data_v5_6_union_1 {
pub basetime: [vdso_timestamp; VDSO_BASES],
pub offset: [timens_offset; VDSO_BASES],
}
impl VdsoData for vdso_data_v5_6 {
fn vdsodata_ptr(vdso_addr: u64) -> *const Self {
(vdso_addr - 4 * PAGE_SIZE + 128) as *const Self
}
fn sec(&self, clockid: ClockId) -> Result<u64> {
unsafe { Ok(self.union_1.basetime[clockid as usize].sec) }
}
fn nsec(&self, clockid: ClockId) -> Result<u64> {
unsafe { Ok(self.union_1.basetime[clockid as usize].nsec) }
}
fn seq(&self) -> u32 {
self.seq.load(Ordering::Relaxed)
}
fn clock_mode(&self) -> i32 {
self.clock_mode
}
fn cycle_last(&self) -> u64 {
self.cycle_last
}
fn mask(&self) -> u64 {
self.mask
}
fn mult(&self) -> u32 {
self.mult
}
fn shift(&self) -> u32 {
self.shift
}
fn tz_minuteswest(&self) -> i32 {
self.tz_minuteswest
}
fn tz_dsttime(&self) -> i32 {
self.tz_dsttime
}
}
// === Linux 5.9 - 5.19, 6.0 - 6.2 ===
// struct vdso_data
#[repr(C)]
pub struct vdso_data_v5_9 {
pub seq: AtomicU32,
pub clock_mode: i32,
pub cycle_last: u64,
pub mask: u64,
pub mult: u32,
pub shift: u32,
pub union_1: vdso_data_v5_6_union_1,
pub tz_minuteswest: i32,
pub tz_dsttime: i32,
pub hrtimer_res: u32,
pub __unused: u32,
pub arch_data: arch_vdso_data,
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone)]
pub struct arch_vdso_data {}
impl VdsoData for vdso_data_v5_9 {
fn vdsodata_ptr(vdso_addr: u64) -> *const Self {
(vdso_addr - 4 * PAGE_SIZE + 128) as *const Self
}
fn sec(&self, clockid: ClockId) -> Result<u64> {
unsafe { Ok(self.union_1.basetime[clockid as usize].sec) }
}
fn nsec(&self, clockid: ClockId) -> Result<u64> {
unsafe { Ok(self.union_1.basetime[clockid as usize].nsec) }
}
fn seq(&self) -> u32 {
self.seq.load(Ordering::Relaxed)
}
fn clock_mode(&self) -> i32 {
self.clock_mode
}
fn cycle_last(&self) -> u64 {
self.cycle_last
}
fn mask(&self) -> u64 {
self.mask
}
fn mult(&self) -> u32 {
self.mult
}
fn shift(&self) -> u32 {
self.shift
}
fn tz_minuteswest(&self) -> i32 {
self.tz_minuteswest
}
fn tz_dsttime(&self) -> i32 {
self.tz_dsttime
}
}

@ -1,5 +1,6 @@
use super::*;
use crate::std::untrusted::path::PathEx;
use crate::util::sgx::allow_debug as sgx_allow_debug;
use serde::{Deserialize, Serialize};
use std::collections::HashSet;
use std::ffi::CString;
@ -10,6 +11,8 @@ use std::sgxfs::SgxFile;
use crate::util::mem_util::from_user;
use log::{set_max_level, LevelFilter};
lazy_static! {
pub static ref LIBOS_CONFIG: Config = {
let config_path =
@ -104,6 +107,7 @@ pub struct Config {
pub process: ConfigProcess,
pub env: ConfigEnv,
pub app: Vec<ConfigApp>,
pub feature: ConfigFeature,
}
#[derive(Debug)]
@ -140,6 +144,14 @@ pub struct ConfigApp {
pub mount: Vec<ConfigMount>,
}
#[derive(Clone, Debug)]
pub struct ConfigFeature {
pub amx: u32,
pub pkru: u32,
pub enable_edmm: bool,
pub enable_posix_shm: bool,
}
#[derive(Clone, Debug, PartialEq)]
#[allow(non_camel_case_types)]
pub enum ConfigMountFsType {
@ -184,7 +196,6 @@ impl Config {
let resource_limits = ConfigResourceLimits::from_input(&input.resource_limits)?;
let process = ConfigProcess::from_input(&input.process)?;
let env = ConfigEnv::from_input(&input.env)?;
let app = {
let mut app = Vec::new();
for input_app in &input.app {
@ -192,12 +203,28 @@ impl Config {
}
app
};
let feature = ConfigFeature::from_input(&input.feature)?;
if input.disable_log {
log::set_max_level(LevelFilter::Off);
} else if !sgx_allow_debug() {
if log::max_level() != LevelFilter::Off {
// Release enclave can only set error level log
log::set_max_level(LevelFilter::Error);
}
eprintln!("Warnning: Occlum Log is enabled for release enclave!");
eprintln!(
"Uses can disable Occlum Log by setting metadata.disable_log=true \
in Occlum.json and rebuild Occlum instance.\n"
);
}
Ok(Config {
resource_limits,
process,
env,
app,
feature,
})
}
@ -275,6 +302,17 @@ impl ConfigApp {
}
}
impl ConfigFeature {
fn from_input(input: &InputConfigFeature) -> Result<ConfigFeature> {
Ok(ConfigFeature {
amx: input.amx,
pkru: input.pkru,
enable_edmm: input.enable_edmm,
enable_posix_shm: input.enable_posix_shm,
})
}
}
impl ConfigMount {
fn from_input(input: &InputConfigMount) -> Result<ConfigMount> {
let type_ = ConfigMountFsType::from_input(input.type_.as_str())?;
@ -368,7 +406,11 @@ struct InputConfig {
#[serde(default)]
pub env: InputConfigEnv,
#[serde(default)]
pub disable_log: bool,
#[serde(default)]
pub app: Vec<InputConfigApp>,
#[serde(default)]
pub feature: InputConfigFeature,
}
#[derive(Deserialize, Debug)]
@ -488,6 +530,30 @@ struct InputConfigApp {
pub mount: Vec<InputConfigMount>,
}
#[derive(Deserialize, Debug)]
#[serde(deny_unknown_fields)]
struct InputConfigFeature {
#[serde(default)]
pub amx: u32,
#[serde(default)]
pub pkru: u32,
#[serde(default)]
pub enable_edmm: bool,
#[serde(default)]
pub enable_posix_shm: bool,
}
impl Default for InputConfigFeature {
fn default() -> InputConfigFeature {
InputConfigFeature {
amx: 0,
pkru: 0,
enable_edmm: false,
enable_posix_shm: false,
}
}
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
#[allow(non_camel_case_types)]

@ -10,7 +10,7 @@ use crate::interrupt;
use crate::process::idle_reap_zombie_children;
use crate::process::{ProcessFilter, SpawnAttr};
use crate::signal::SigNum;
use crate::time::up_time::init;
use crate::time::init;
use crate::util::host_file_util::{host_file_buffer, parse_host_file, write_host_file, HostFile};
use crate::util::log::LevelFilter;
use crate::util::mem_util::from_untrusted::*;
@ -60,24 +60,17 @@ pub extern "C" fn occlum_ecall_init(
assert!(!instance_dir.is_null());
let log_level = {
let input_log_level = match parse_log_level(log_level) {
Err(e) => {
eprintln!("invalid log level: {}", e.backtrace());
return ecall_errno!(EINVAL);
}
Ok(log_level) => log_level,
};
// Use the input log level if and only if the enclave allows debug
if sgx_allow_debug() {
input_log_level
} else {
LevelFilter::Off
let log_level = match parse_log_level(log_level) {
Err(e) => {
eprintln!("invalid log level: {}", e.backtrace());
return ecall_errno!(EINVAL);
}
Ok(log_level) => log_level,
};
INIT_ONCE.call_once(|| {
// Init the log infrastructure first so that log messages will be printed afterwards
// The log level may be set to off later if disable_log is true in user configuration
util::log::init(log_level);
let report = rsgx_self_report();
@ -94,9 +87,6 @@ pub extern "C" fn occlum_ecall_init(
}
}
// Register exception handlers (support cpuid & rdtsc for now)
register_exception_handlers();
unsafe {
let dir_str: &str = CStr::from_ptr(instance_dir).to_str().unwrap();
INSTANCE_DIR.push_str(dir_str);
@ -106,10 +96,15 @@ pub extern "C" fn occlum_ecall_init(
interrupt::init();
HAS_INIT.store(true, Ordering::Release);
// Init vdso and boot up time stamp here.
time::init();
// Init boot up time stamp here.
time::up_time::init();
vm::init_user_space();
// Register exception handlers (support cpuid & rdtsc for now)
register_exception_handlers();
HAS_INIT.store(true, Ordering::Release);
// Enable global backtrace
unsafe { backtrace::enable_backtrace(&ENCLAVE_PATH, PrintFormat::Short) };
@ -437,12 +432,12 @@ fn parse_host_files(file_buffer: *const host_file_buffer) -> Result<i32> {
let resolv_conf_ptr = unsafe { (*file_buffer).resolv_conf_buf };
match parse_host_file(HostFile::ResolvConf, resolv_conf_ptr) {
Err(e) => {
error!("failed to parse /etc/resolv.conf: {}", e.backtrace());
warn!("failed to parse /etc/resolv.conf: {}", e.backtrace());
}
Ok(resolv_conf_str) => {
*RESOLV_CONF_STR.write().unwrap() = Some(resolv_conf_str);
if let Err(e) = write_host_file(HostFile::ResolvConf) {
error!("failed to write /etc/resolv.conf: {}", e.backtrace());
warn!("failed to write /etc/resolv.conf: {}", e.backtrace());
}
}
}
@ -450,13 +445,13 @@ fn parse_host_files(file_buffer: *const host_file_buffer) -> Result<i32> {
let hostname_ptr = unsafe { (*file_buffer).hostname_buf };
match parse_host_file(HostFile::HostName, hostname_ptr) {
Err(e) => {
error!("failed to parse /etc/hostname: {}", e.backtrace());
warn!("failed to parse /etc/hostname: {}", e.backtrace());
}
Ok(hostname_str) => {
misc::init_nodename(&hostname_str);
*HOSTNAME_STR.write().unwrap() = Some(hostname_str);
if let Err(e) = write_host_file(HostFile::HostName) {
error!("failed to write /etc/hostname: {}", e.backtrace());
warn!("failed to write /etc/hostname: {}", e.backtrace());
}
}
}
@ -464,12 +459,12 @@ fn parse_host_files(file_buffer: *const host_file_buffer) -> Result<i32> {
let hosts_ptr = unsafe { (*file_buffer).hosts_buf };
match parse_host_file(HostFile::Hosts, hosts_ptr) {
Err(e) => {
error!("failed to parse /etc/hosts: {}", e.backtrace());
warn!("failed to parse /etc/hosts: {}", e.backtrace());
}
Ok(hosts_str) => {
*HOSTS_STR.write().unwrap() = Some(hosts_str);
if let Err(e) = write_host_file(HostFile::Hosts) {
error!("failed to write /etc/hosts: {}", e.backtrace());
warn!("failed to write /etc/hosts: {}", e.backtrace());
}
}
}

@ -1,69 +1 @@
use super::*;
use std::fmt;
mod backtrace;
mod errno;
mod error;
mod to_errno;
pub use self::backtrace::{ErrorBacktrace, ResultExt};
pub use self::errno::Errno;
pub use self::errno::Errno::*;
pub use self::error::{Error, ErrorLocation};
pub use self::to_errno::ToErrno;
pub type Result<T> = std::result::Result<T, Error>;
macro_rules! errno {
($errno_expr: expr, $error_msg: expr) => {{
let inner_error = {
let errno: Errno = $errno_expr;
let msg: &'static str = $error_msg;
(errno, msg)
};
let error = Error::embedded(inner_error, Some(ErrorLocation::new(file!(), line!())));
error
}};
($error_expr: expr) => {{
let inner_error = $error_expr;
let error = Error::boxed(inner_error, Some(ErrorLocation::new(file!(), line!())));
error
}};
}
macro_rules! return_errno {
($errno_expr: expr, $error_msg: expr) => {{
return Err(errno!($errno_expr, $error_msg));
}};
($error_expr: expr) => {{
return Err(errno!($error_expr));
}};
}
// return Err(errno) if libc return -1
macro_rules! try_libc {
($ret: expr) => {{
let ret = unsafe { $ret };
if ret < 0 {
let errno = unsafe { libc::errno() };
return_errno!(Errno::from(errno as u32), "libc error");
}
ret
}};
}
// return Err(errno) if libc return -1
// raise SIGPIPE if errno == EPIPE
macro_rules! try_libc_may_epipe {
($ret: expr) => {{
let ret = unsafe { $ret };
if ret < 0 {
let errno = unsafe { libc::errno() };
if errno == Errno::EPIPE as i32 {
crate::signal::do_tkill(current!().tid(), crate::signal::SIGPIPE.as_u8() as i32);
}
return_errno!(Errno::from(errno as u32), "libc error");
}
ret
}};
}
pub use errno::{Errno::*, *};

@ -1,116 +0,0 @@
use super::*;
pub trait ToErrno: fmt::Display + fmt::Debug {
fn errno(&self) -> Errno;
}
impl ToErrno for Errno {
fn errno(&self) -> Errno {
*self
}
}
impl<T> From<T> for Error
where
T: ToErrno + 'static,
{
fn from(t: T) -> Error {
Error::boxed(t, None)
}
}
impl From<std::io::ErrorKind> for Errno {
fn from(kind: std::io::ErrorKind) -> Errno {
use std::io::ErrorKind::*;
match kind {
NotFound => ENOENT,
PermissionDenied => EPERM,
ConnectionRefused => ECONNREFUSED,
ConnectionReset => ECONNRESET,
ConnectionAborted => ECONNABORTED,
NotConnected => ENOTCONN,
AddrInUse => EADDRINUSE,
AddrNotAvailable => EADDRNOTAVAIL,
BrokenPipe => EPIPE,
AlreadyExists => EEXIST,
WouldBlock => EWOULDBLOCK,
InvalidInput => EINVAL,
InvalidData => EBADMSG, /* TODO: correct? */
TimedOut => ETIMEDOUT,
Interrupted => EINTR,
WriteZero => EINVAL,
UnexpectedEof => EIO,
Other => EIO,
_ => EIO,
}
}
}
impl ToErrno for std::io::Error {
fn errno(&self) -> Errno {
Errno::from(self.kind())
}
}
impl ToErrno for std::ffi::NulError {
fn errno(&self) -> Errno {
EINVAL
}
}
impl ToErrno for std::num::ParseIntError {
fn errno(&self) -> Errno {
EINVAL
}
}
impl ToErrno for serde_json::Error {
fn errno(&self) -> Errno {
EINVAL
}
}
impl ToErrno for rcore_fs::vfs::FsError {
fn errno(&self) -> Errno {
use rcore_fs::vfs::FsError;
match *self {
FsError::NotSupported => ENOSYS,
FsError::NotFile => EISDIR,
FsError::IsDir => EISDIR,
FsError::NotDir => ENOTDIR,
FsError::EntryNotFound => ENOENT,
FsError::EntryExist => EEXIST,
FsError::NotSameFs => EXDEV,
FsError::InvalidParam => EINVAL,
FsError::NoDeviceSpace => ENOMEM,
FsError::DirRemoved => ENOENT,
FsError::DirNotEmpty => ENOTEMPTY,
FsError::WrongFs => EINVAL,
FsError::DeviceError(err) => EIO,
FsError::SymLoop => ELOOP,
FsError::NoDevice => ENXIO,
FsError::IOCTLError => EINVAL,
FsError::Again => EAGAIN,
FsError::Busy => EBUSY,
FsError::WrProtected => EROFS,
FsError::NoIntegrity => EIO,
FsError::PermError => EPERM,
FsError::NameTooLong => ENAMETOOLONG,
FsError::FileTooBig => EFBIG,
FsError::OpNotSupported => EOPNOTSUPP,
FsError::NotMountPoint => EINVAL,
}
}
}
impl ToErrno for std::alloc::AllocError {
fn errno(&self) -> Errno {
ENOMEM
}
}
impl ToErrno for std::alloc::LayoutError {
fn errno(&self) -> Errno {
EINVAL
}
}

@ -18,7 +18,7 @@ struct CpuIdInput {
#[repr(C)]
#[derive(Eq, PartialEq, Hash, Clone, Copy, Debug)]
struct CpuIdResult {
pub struct CpuIdResult {
eax: u32,
ebx: u32,
ecx: u32,
@ -57,6 +57,7 @@ impl CpuIdCache {
fn generate_cpuid_cache(&mut self, max_basic_leaf: u32, max_extend_leaf: u32) {
let mut sgx_support: bool = false;
let mut pconfig_support: bool = false;
// Generate basic leaf cpuid cache
for leaf in CPUID_MIN_BASIC_LEAF..=max_basic_leaf {
// Intel SGX Capability Enumeration Leaf,
@ -64,6 +65,11 @@ impl CpuIdCache {
if leaf == 0x12 && !sgx_support {
continue;
}
// Intel PCONFIG Enumeration Leaf,
// Leaf 1BH is supported if CPUID.(EAX=07H, ECX=0H):EDX[18] = 1.
if leaf == 0x1B && !pconfig_support {
continue;
}
let mut max_subleaf = 0;
for subleaf in (0..) {
let cpuid_input = CpuIdInput { leaf, subleaf };
@ -96,13 +102,15 @@ impl CpuIdCache {
0xD => 63,
// (Sub-leaf == 0) can not decide max_subleaf for these leaf,
// later match expression will decide the max_subleaf.
0x4 | 0xB | 0x12 | 0x1F => CPUID_MAX_SUBLEAF,
0x4 | 0xB | 0x12 | 0x1B | 0x1F => CPUID_MAX_SUBLEAF,
// Default max_subleaf is 0.
_ => 0,
};
if leaf == 0x7 {
// EBX Bit 02: Supports Intel® SGX Extensions if 1.
sgx_support = (cpuid_result.ebx & 0x0000_0004) != 0;
// EDX Bit 18: Supports PCONFIG if 1.
pconfig_support = (cpuid_result.edx & 0x40000) != 0;
}
}
// These leafs determine the maximum supported sub-leaf according to
@ -120,6 +128,9 @@ impl CpuIdCache {
// EAX Bit 03 - 00: Sub-leaf Type.
// 0000b: Indicates this sub-leaf is invalid.
0x12 if subleaf >= 2 && (cpuid_result.eax & 0x0000000F) == 0 => subleaf,
// If a sub-leaf type (EAX) is 0, the sub-leaf is invalid and zero is returned
// in EBX, ECX, and EDX.
0x1B if (cpuid_result.eax == 0) => subleaf,
// V2 Extended Topology Enumeration Leaf
// CPUID leaf 0x1F is a preferred superset to leaf 0xB.
0x1F if (cpuid_result.ecx & 0x0000_FF00) == 0 => subleaf,
@ -207,6 +218,15 @@ impl CpuId {
};
cpuid_result
}
pub fn support_sgx2(&self) -> bool {
const SGX_CPUID: u32 = 0x12;
let cpuid = self.get_cpuid_info(SGX_CPUID, 0);
// The 0th bit set to 1 in `cpuid.eax` indicates that the SGX feature is enabled.
// The 1st bit set to 1 in `cpuid.eax` indicates that the SGX2 feature is enabled.
(cpuid.eax & 0b11) == 0b11
}
}
lazy_static! {
@ -214,8 +234,9 @@ lazy_static! {
}
fn is_cpuid_leaf_has_subleaves(leaf: u32) -> bool {
const CPUID_LEAF_WITH_SUBLEAF: [u32; 11] =
[0x4, 0x7, 0xB, 0xD, 0xF, 0x10, 0x12, 0x14, 0x17, 0x18, 0x1F];
const CPUID_LEAF_WITH_SUBLEAF: [u32; 12] = [
0x4, 0x7, 0xB, 0xD, 0xF, 0x10, 0x12, 0x14, 0x17, 0x18, 0x1B, 0x1F,
];
CPUID_LEAF_WITH_SUBLEAF.contains(&leaf)
}
@ -235,15 +256,26 @@ fn get_cpuid_info_via_ocall(cpuid_input: CpuIdInput) -> CpuIdResult {
cpuid_result
}
pub fn is_cpu_support_sgx2() -> bool {
CPUID.support_sgx2()
}
pub fn get_cpuid_info(leaf: u32, subleaf: u32) -> CpuIdResult {
CPUID.get_cpuid_info(leaf, subleaf)
}
pub fn setup_cpuid_info() {
// Make lazy_static to be executed at runtime in order to be initialized
let max_basic_leaf = CPUID.get_max_basic_leaf();
}
pub fn handle_cpuid_exception(user_context: &mut CpuContext) -> Result<isize> {
debug!("handle CPUID exception");
let leaf = user_context.rax as u32;
let subleaf = user_context.rcx as u32;
debug!(
"handle CPUID exception: leaf = 0x{:x?}, subleaf = 0x{:x?}",
leaf, subleaf
);
let cpuid_result = CPUID.get_cpuid_info(leaf, subleaf);
trace!("cpuid result: {:?}", cpuid_result);
user_context.rax = cpuid_result.eax as u64;

@ -7,10 +7,12 @@ use super::*;
use crate::signal::{FaultSignal, SigSet};
use crate::syscall::exception_interrupt_syscall_c_abi;
use crate::syscall::{CpuContext, ExtraContext, SyscallNum};
use crate::vm::{enclave_page_fault_handler, USER_SPACE_VM_MANAGER};
use crate::vm::{enclave_page_fault_handler, is_page_committed, VMRange, USER_SPACE_VM_MANAGER};
use sgx_types::*;
use sgx_types::{sgx_exception_type_t, sgx_exception_vector_t};
pub use self::cpuid::{get_cpuid_info, is_cpu_support_sgx2};
const ENCLU: u32 = 0xd7010f;
const EACCEPT: u32 = 0x5;
const EACCEPTCOPY: u32 = 0x7;
@ -21,12 +23,22 @@ mod rdtsc;
mod syscall;
pub fn register_exception_handlers() {
setup_cpuid_info();
// Register handlers whose priorities go from low to high
unsafe {
let is_first = 1;
sgx_register_exception_handler(is_first, handle_exception);
extern "C" {
fn sgx_register_exception_handler_for_occlum_user_space(
user_space_ranges: *const [VMRange; 2],
handler: sgx_exception_handler_t,
) -> sgx_status_t;
}
setup_cpuid_info();
let user_space_ranges: [VMRange; 2] = USER_SPACE_VM_MANAGER.get_user_space_ranges();
let ret = unsafe {
sgx_register_exception_handler_for_occlum_user_space(
&user_space_ranges as *const _,
handle_exception,
)
};
assert!(ret == sgx_status_t::SGX_SUCCESS);
}
fn try_handle_kernel_exception(info: &sgx_exception_info_t) -> i32 {
@ -51,6 +63,12 @@ fn try_handle_kernel_exception(info: &sgx_exception_info_t) -> i32 {
return SGX_MM_EXCEPTION_CONTINUE_EXECUTION;
}
// Check spurious #PF
// FIXME: We can re-consider this check when we know the root cause
if is_page_committed(pf_addr) {
return SGX_MM_EXCEPTION_CONTINUE_EXECUTION;
}
// If the triggered code is not user's code and the #PF address is in the userspace, then it is a
// kernel-triggered #PF that we can handle. This can happen e.g. when read syscall triggers user buffer #PF
info!("kernel code triggers #PF");
@ -131,7 +149,7 @@ pub fn do_handle_exception(
return Ok(0);
}
warn!(
error!(
"#PF not handled. Turn to signal. user context = {:?}",
user_context
);

@ -5,8 +5,6 @@ use sgx_types::*;
pub const SYSCALL_OPCODE: u16 = 0x050F;
pub fn handle_syscall_exception(user_context: &mut CpuContext) -> ! {
debug!("handle SYSCALL exception");
// SYSCALL instruction saves RIP into RCX and RFLAGS into R11. This is to
// comply with hardware's behavoir. Not useful for us.
user_context.rcx = user_context.rip;
@ -19,6 +17,7 @@ pub fn handle_syscall_exception(user_context: &mut CpuContext) -> ! {
let num = user_context.rax as u32;
assert!(num != SyscallNum::HandleException as u32);
debug!("handle SYSCALL exception: syscall number = {:?}", num);
// FIXME: occlum syscall must use Linux ABI
occlum_syscall(user_context);

@ -22,6 +22,7 @@ impl AccessibilityCheckMode {
}
}
#[allow(dead_code)]
bitflags! {
pub struct AccessibilityCheckFlags : u32 {
/// If path is a symbolic link, do not dereference it
@ -31,31 +32,21 @@ bitflags! {
}
}
#[allow(dead_code)]
impl AccessibilityCheckFlags {
pub fn from_u32(bits: u32) -> Result<Self> {
AccessibilityCheckFlags::from_bits(bits).ok_or_else(|| errno!(EINVAL, "invalid flags"))
}
}
pub fn do_faccessat(
fs_path: &FsPath,
mode: AccessibilityCheckMode,
flags: AccessibilityCheckFlags,
) -> Result<()> {
debug!(
"faccessat: fs_path: {:?}, mode: {:?}, flags: {:?}",
fs_path, mode, flags
);
pub fn do_faccessat(fs_path: &FsPath, mode: AccessibilityCheckMode) -> Result<()> {
debug!("faccessat: fs_path: {:?}, mode: {:?}", fs_path, mode);
let inode = {
let path = fs_path.to_abs_path()?;
let current = current!();
let fs = current.fs().read().unwrap();
if flags.contains(AccessibilityCheckFlags::AT_SYMLINK_NOFOLLOW) {
fs.lookup_inode_no_follow(&path)?
} else {
fs.lookup_inode(&path)?
}
fs.lookup_inode(&path)?
};
if mode.test_for_exist() {
return Ok(());

@ -7,12 +7,19 @@ bitflags! {
}
}
pub fn do_fchownat(fs_path: &FsPath, uid: u32, gid: u32, flags: ChownFlags) -> Result<()> {
pub fn do_fchownat(fs_path: &FsPath, uid: i32, gid: i32, flags: ChownFlags) -> Result<()> {
debug!(
"fchownat: fs_path: {:?}, uid: {}, gid: {}, flags: {:?}",
fs_path, uid, gid, flags
);
let uid = to_opt(uid)?;
let gid = to_opt(gid)?;
// Return early if owner and group are -1
if uid.is_none() && gid.is_none() {
return Ok(());
}
let inode = {
let path = fs_path.to_abs_path()?;
let current = current!();
@ -24,19 +31,47 @@ pub fn do_fchownat(fs_path: &FsPath, uid: u32, gid: u32, flags: ChownFlags) -> R
}
};
let mut info = inode.metadata()?;
info.uid = uid as usize;
info.gid = gid as usize;
if let Some(uid) = uid {
info.uid = uid as usize;
}
if let Some(gid) = gid {
info.gid = gid as usize;
}
inode.set_metadata(&info)?;
Ok(())
}
pub fn do_fchown(fd: FileDesc, uid: u32, gid: u32) -> Result<()> {
pub fn do_fchown(fd: FileDesc, uid: i32, gid: i32) -> Result<()> {
debug!("fchown: fd: {}, uid: {}, gid: {}", fd, uid, gid);
let uid = to_opt(uid)?;
let gid = to_opt(gid)?;
// Return early if owner and group are -1
if uid.is_none() && gid.is_none() {
return Ok(());
}
let file_ref = current!().file(fd)?;
let mut info = file_ref.metadata()?;
info.uid = uid as usize;
info.gid = gid as usize;
if let Some(uid) = uid {
info.uid = uid as usize;
}
if let Some(gid) = gid {
info.gid = gid as usize;
}
file_ref.set_metadata(&info)?;
Ok(())
}
fn to_opt(id: i32) -> Result<Option<u32>> {
let id = if id >= 0 {
Some(id as u32)
} else if id == -1 {
// If the ID is specified as -1, then that ID is not changed
None
} else {
return_errno!(EINVAL, "invalid id");
};
Ok(id)
}

@ -91,6 +91,16 @@ impl<'a> FsPath<'a> {
}
Ok(abs_path)
}
/// Returns `Some(fd)` if the `FsPath` is exactly a file descriptor.
///
/// This method is useful for handling system calls with `AT_EMPTY_PATH` flag.
pub fn as_fd(&self) -> Option<FileDesc> {
match &self.inner {
FsPathInner::Fd(fd) => Some(*fd),
_ => None,
}
}
}
impl<'a> Debug for FsPath<'a> {

@ -1,7 +1,7 @@
use super::*;
use process::Process;
pub use self::access::{do_faccessat, AccessibilityCheckFlags, AccessibilityCheckMode};
pub use self::access::{do_faccessat, AccessibilityCheckMode};
pub use self::chmod::{do_fchmod, do_fchmodat, FileMode};
pub use self::chown::{do_fchown, do_fchownat, ChownFlags};
pub use self::close::do_close;

@ -19,5 +19,7 @@ pub fn do_openat(fs_path: &FsPath, flags: u32, mode: FileMode) -> Result<FileDes
let creation_flags = CreationFlags::from_bits_truncate(flags);
current.add_file(file_ref, creation_flags.must_close_on_spawn())
};
debug!("openat: result fd: {:?}", fd);
Ok(fd)
}

@ -15,6 +15,17 @@ pub fn do_sendfile(
let current = current!();
let in_file = current.file(in_fd)?;
let out_file = current.file(out_fd)?;
let in_file_access = in_file.access_mode()?;
if !in_file_access.readable() {
return_errno!(EBADF, "The in file is non-readable");
}
let out_file_access = out_file.access_mode()?;
if !out_file_access.writable() {
return_errno!(EBADF, "The out file is non-writable");
}
let mut buffer: [u8; 1024 * 11] = unsafe { MaybeUninit::uninit().assume_init() };
let mut read_offset = match offset {

@ -1,4 +1,5 @@
use super::*;
use crate::util::kernel_alloc::KernelAlloc;
use crate::vm::USER_SPACE_VM_MANAGER;
pub struct MemInfoINode;
@ -15,13 +16,26 @@ impl ProcINode for MemInfoINode {
fn generate_data_in_bytes(&self) -> vfs::Result<Vec<u8>> {
let total_ram = USER_SPACE_VM_MANAGER.get_total_size();
let free_ram = USER_SPACE_VM_MANAGER.get_precise_free_size();
let kernel_heap_total = KernelAlloc::get_kernel_heap_config();
let kernel_heap_peak_used = KernelAlloc::get_kernel_heap_peak_used();
let kernel_heap_in_use = if let Some(bytes) = KernelAlloc::get_kernel_mem_size() {
format!("{} kB", bytes / KB)
} else {
"Feature not enabled".to_string()
};
Ok(format!(
"MemTotal: {} kB\n\
MemFree: {} kB\n\
MemAvailable: {} kB\n",
"MemTotal: {} kB\n\
MemFree: {} kB\n\
MemAvailable: {} kB\n\
KernelHeapTotal: {} kB\n\
KernelHeapPeakUsed: {} kB\n\
KernelHeapInUse: {}\n",
total_ram / KB,
free_ram / KB,
free_ram / KB,
kernel_heap_total / KB,
kernel_heap_peak_used / KB,
kernel_heap_in_use,
)
.into_bytes())
}

@ -36,8 +36,9 @@ impl ProcINode for ProcStatINode {
let stime = 0;
let cutime = 0;
let cstime = 0;
let priority = main_thread.nice().read().unwrap().to_priority_val();
let nice = main_thread.nice().read().unwrap().raw_val();
// Convert [19,-20] to [39,0].
let priority = main_thread.nice().read().unwrap().to_raw_val() + 20;
let nice = main_thread.nice().read().unwrap().to_raw_val();
let num_threads = self.0.threads().len();
let itrealvalue = 0;
let starttime = self.0.start_time();

@ -321,10 +321,3 @@ impl File for LockedFile {
Ok(SefsMac(file.get_mac().unwrap()))
}
}
impl From<Error> for DevError {
fn from(e: Error) -> Self {
error!("SGX protected file I/O error: {}", e.backtrace());
DevError(e.errno() as i32)
}
}

@ -1,12 +1,12 @@
use super::event_file::EventCreationFlags;
use super::file_ops;
use super::file_ops::{
get_abs_path_by_fd, get_utimes, AccessibilityCheckFlags, AccessibilityCheckMode, ChownFlags,
FcntlCmd, FsPath, LinkFlags, StatFlags, UnlinkFlags, Utime, UtimeFlags, AT_FDCWD, UTIME_OMIT,
get_abs_path_by_fd, get_utimes, AccessibilityCheckMode, ChownFlags, FcntlCmd, FsPath,
LinkFlags, StatFlags, UnlinkFlags, Utime, UtimeFlags, AT_FDCWD, UTIME_OMIT,
};
use super::fs_ops;
use super::fs_ops::{MountFlags, MountOptions, UmountFlags};
use super::time::{clockid_t, itimerspec_t, timespec_t, timeval_t, ClockID};
use super::time::{clockid_t, itimerspec_t, timespec_t, timeval_t, ClockId};
use super::timer_file::{TimerCreationFlags, TimerSetFlags};
use super::*;
use crate::config::{user_rootfs_config, ConfigApp, ConfigMountFsType};
@ -42,9 +42,9 @@ pub fn do_eventfd2(init_val: u32, flags: i32) -> Result<isize> {
pub fn do_timerfd_create(clockid: clockid_t, flags: i32) -> Result<isize> {
debug!("timerfd: clockid {}, flags {} ", clockid, flags);
let clockid = ClockID::from_raw(clockid)?;
let clockid = ClockId::try_from(clockid)?;
match clockid {
ClockID::CLOCK_REALTIME | ClockID::CLOCK_MONOTONIC => {}
ClockId::CLOCK_REALTIME | ClockId::CLOCK_MONOTONIC => {}
_ => {
return_errno!(EINVAL, "invalid clockid");
}
@ -167,8 +167,11 @@ fn do_writev_offset(
for iov_i in 0..count {
let iov_ptr = unsafe { iov.offset(iov_i as isize) };
let iov = unsafe { &*iov_ptr };
let buf = unsafe { std::slice::from_raw_parts(iov.base as *const u8, iov.len) };
bufs_vec.push(buf);
if iov.len != 0 {
from_user::check_array(iov.base as *const u8, iov.len)?;
let buf = unsafe { std::slice::from_raw_parts(iov.base as *const u8, iov.len) };
bufs_vec.push(buf);
}
}
bufs_vec
};
@ -206,8 +209,11 @@ fn do_readv_offset(
for iov_i in 0..count {
let iov_ptr = unsafe { iov.offset(iov_i as isize) };
let iov = unsafe { &*iov_ptr };
let buf = unsafe { std::slice::from_raw_parts_mut(iov.base as *mut u8, iov.len) };
bufs_vec.push(buf);
if iov.len != 0 {
from_user::check_mut_array(iov.base as *mut u8, iov.len)?;
let buf = unsafe { std::slice::from_raw_parts_mut(iov.base as *mut u8, iov.len) };
bufs_vec.push(buf);
}
}
bufs_vec
};
@ -289,6 +295,12 @@ pub fn do_fstatat(dirfd: i32, path: *const i8, stat_buf: *mut Stat, flags: u32)
.into_owned();
let flags = StatFlags::from_bits(flags).ok_or_else(|| errno!(EINVAL, "invalid flags"))?;
let fs_path = FsPath::new(&path, dirfd, flags.contains(StatFlags::AT_EMPTY_PATH))?;
// In this case, the behavior of fstatat() is similar to that of fstat().
if let Some(fd) = fs_path.as_fd() {
return self::do_fstat(fd, stat_buf);
}
from_user::check_mut_ptr(stat_buf)?;
let stat = file_ops::do_fstatat(&fs_path, flags)?;
unsafe {
@ -298,17 +310,16 @@ pub fn do_fstatat(dirfd: i32, path: *const i8, stat_buf: *mut Stat, flags: u32)
}
pub fn do_access(path: *const i8, mode: u32) -> Result<isize> {
self::do_faccessat(AT_FDCWD, path, mode, 0)
self::do_faccessat(AT_FDCWD, path, mode)
}
pub fn do_faccessat(dirfd: i32, path: *const i8, mode: u32, flags: u32) -> Result<isize> {
pub fn do_faccessat(dirfd: i32, path: *const i8, mode: u32) -> Result<isize> {
let path = from_user::clone_cstring_safely(path)?
.to_string_lossy()
.into_owned();
let fs_path = FsPath::new(&path, dirfd, false)?;
let mode = AccessibilityCheckMode::from_u32(mode)?;
let flags = AccessibilityCheckFlags::from_u32(flags)?;
file_ops::do_faccessat(&fs_path, mode, flags).map(|_| 0)
file_ops::do_faccessat(&fs_path, mode).map(|_| 0)
}
pub fn do_lseek(fd: FileDesc, offset: off_t, whence: i32) -> Result<isize> {
@ -517,6 +528,7 @@ pub fn do_linkat(
.to_string_lossy()
.into_owned();
let flags = LinkFlags::from_bits(flags).ok_or_else(|| errno!(EINVAL, "invalid flags"))?;
// The oldpath must be an inode.
let old_fs_path = FsPath::new(&oldpath, olddirfd, flags.contains(LinkFlags::AT_EMPTY_PATH))?;
let new_fs_path = FsPath::new(&newpath, newdirfd, false)?;
file_ops::do_linkat(&old_fs_path, &new_fs_path, flags)?;
@ -547,6 +559,9 @@ pub fn do_readlinkat(dirfd: i32, path: *const i8, buf: *mut u8, size: usize) ->
.to_string_lossy()
.into_owned();
let buf = {
if size == 0 {
return_errno!(EINVAL, "bufsiz is not a positive number");
}
from_user::check_array(buf, size)?;
unsafe { std::slice::from_raw_parts_mut(buf, size) }
};
@ -591,26 +606,32 @@ pub fn do_fchmodat(dirfd: i32, path: *const i8, mode: u16) -> Result<isize> {
Ok(0)
}
pub fn do_chown(path: *const i8, uid: u32, gid: u32) -> Result<isize> {
pub fn do_chown(path: *const i8, uid: i32, gid: i32) -> Result<isize> {
self::do_fchownat(AT_FDCWD, path, uid, gid, 0)
}
pub fn do_fchown(fd: FileDesc, uid: u32, gid: u32) -> Result<isize> {
pub fn do_fchown(fd: FileDesc, uid: i32, gid: i32) -> Result<isize> {
file_ops::do_fchown(fd, uid, gid)?;
Ok(0)
}
pub fn do_fchownat(dirfd: i32, path: *const i8, uid: u32, gid: u32, flags: i32) -> Result<isize> {
pub fn do_fchownat(dirfd: i32, path: *const i8, uid: i32, gid: i32, flags: i32) -> Result<isize> {
let path = from_user::clone_cstring_safely(path)?
.to_string_lossy()
.into_owned();
let flags = ChownFlags::from_bits(flags).ok_or_else(|| errno!(EINVAL, "invalid flags"))?;
let fs_path = FsPath::new(&path, dirfd, flags.contains(ChownFlags::AT_EMPTY_PATH))?;
// In this case, the behavior of fchownat() is similar to that of fchown().
if let Some(fd) = fs_path.as_fd() {
return self::do_fchown(fd, uid, gid);
}
file_ops::do_fchownat(&fs_path, uid, gid, flags)?;
Ok(0)
}
pub fn do_lchown(path: *const i8, uid: u32, gid: u32) -> Result<isize> {
pub fn do_lchown(path: *const i8, uid: i32, gid: i32) -> Result<isize> {
self::do_fchownat(
AT_FDCWD,
path,

@ -1,6 +1,6 @@
use super::*;
use crate::time::{clockid_t, itimerspec_t, timespec_t, ClockID};
use crate::time::{clockid_t, itimerspec_t, timespec_t, ClockId};
use atomic::{Atomic, Ordering};
use std::time::Duration;
@ -13,7 +13,7 @@ pub struct TimerFile {
}
impl TimerFile {
pub fn new(clockid: ClockID, flags: TimerCreationFlags) -> Result<Self> {
pub fn new(clockid: ClockId, flags: TimerCreationFlags) -> Result<Self> {
let raw_host_fd = try_libc!({
let mut ret: i32 = 0;
let status = occlum_ocall_timerfd_create(&mut ret, clockid as clockid_t, flags.bits());

@ -38,11 +38,16 @@ pub fn do_handle_interrupt(
/// Broadcast interrupts to threads by sending POSIX signals.
pub fn broadcast_interrupts() -> Result<usize> {
let should_interrupt_thread = |thread: &&ThreadRef| -> bool {
// TODO: check Thread::sig_mask to reduce false positives
thread.process().is_forced_to_exit()
|| thread.is_forced_to_stop()
|| !thread.sig_queues().read().unwrap().empty()
|| !thread.process().sig_queues().read().unwrap().empty()
if thread.process().is_forced_to_exit() || thread.is_forced_to_stop() {
return true;
}
let interested = !*thread.sig_mask().read().unwrap();
// In the nightly-2022-10-22 Rust compiler, this expression holds two nested read locks.
// However, in the stable-2023-12-21 Rust compiler, the expression drops the temporary variables
// (including: read lock guard) after each division code completes.
!((thread.process().sig_queues().read().unwrap().pending() & interested).empty())
|| !((thread.sig_queues().read().unwrap().pending() & interested).empty())
};
let num_signaled_threads = crate::process::table::get_all_threads()

Some files were not shown because too many files have changed in this diff Show More