Skip to content

Commit 8466436

Browse files
Sean Christophersonsuryasaimadhu
authored andcommitted
x86/vdso: Implement a vDSO for Intel SGX enclave call
Enclaves encounter exceptions for lots of reasons: everything from enclave page faults to NULL pointer dereferences, to system calls that must be “proxied” to the kernel from outside the enclave. In addition to the code contained inside an enclave, there is also supporting code outside the enclave called an “SGX runtime”, which is virtually always implemented inside a shared library. The runtime helps build the enclave and handles things like *re*building the enclave if it got destroyed by something like a suspend/resume cycle. The rebuilding has traditionally been handled in SIGSEGV handlers, registered by the library. But, being process-wide, shared state, signal handling and shared libraries do not mix well. Introduce a vDSO function call that wraps the enclave entry functions (EENTER/ERESUME functions of the ENCLU instruciton) and returns information about any exceptions to the caller in the SGX runtime. Instead of generating a signal, the kernel places exception information in RDI, RSI and RDX. The kernel-provided userspace portion of the vDSO handler will place this information in a user-provided buffer or trigger a user-provided callback at the time of the exception. The vDSO function calling convention uses the standard RDI RSI, RDX, RCX, R8 and R9 registers. This makes it possible to declare the vDSO as a C prototype, but other than that there is no specific support for SystemV ABI. Things like storing XSAVE are the responsibility of the enclave and the runtime. [ bp: Change vsgx.o build dependency to CONFIG_X86_SGX. ] Suggested-by: Andy Lutomirski <[email protected]> Signed-off-by: Sean Christopherson <[email protected]> Co-developed-by: Cedric Xing <[email protected]> Signed-off-by: Cedric Xing <[email protected]> Co-developed-by: Jarkko Sakkinen <[email protected]> Signed-off-by: Jarkko Sakkinen <[email protected]> Signed-off-by: Borislav Petkov <[email protected]> Tested-by: Jethro Beekman <[email protected]> Link: https://lkml.kernel.org/r/[email protected]
1 parent 334872a commit 8466436

File tree

5 files changed

+254
-0
lines changed

5 files changed

+254
-0
lines changed

arch/x86/entry/vdso/Makefile

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@ VDSO32-$(CONFIG_IA32_EMULATION) := y
2727
vobjs-y := vdso-note.o vclock_gettime.o vgetcpu.o
2828
vobjs32-y := vdso32/note.o vdso32/system_call.o vdso32/sigreturn.o
2929
vobjs32-y += vdso32/vclock_gettime.o
30+
vobjs-$(CONFIG_X86_SGX) += vsgx.o
3031

3132
# files to link into kernel
3233
obj-y += vma.o extable.o
@@ -98,6 +99,7 @@ $(vobjs): KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS) $(RETPOLINE_CFLAGS
9899
CFLAGS_REMOVE_vclock_gettime.o = -pg
99100
CFLAGS_REMOVE_vdso32/vclock_gettime.o = -pg
100101
CFLAGS_REMOVE_vgetcpu.o = -pg
102+
CFLAGS_REMOVE_vsgx.o = -pg
101103

102104
#
103105
# X32 processes use x32 vDSO to access 64bit kernel data.

arch/x86/entry/vdso/vdso.lds.S

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@ VERSION {
2727
__vdso_time;
2828
clock_getres;
2929
__vdso_clock_getres;
30+
__vdso_sgx_enter_enclave;
3031
local: *;
3132
};
3233
}

arch/x86/entry/vdso/vsgx.S

Lines changed: 151 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,151 @@
1+
/* SPDX-License-Identifier: GPL-2.0 */
2+
3+
#include <linux/linkage.h>
4+
#include <asm/export.h>
5+
#include <asm/errno.h>
6+
#include <asm/enclu.h>
7+
8+
#include "extable.h"
9+
10+
/* Relative to %rbp. */
11+
#define SGX_ENCLAVE_OFFSET_OF_RUN 16
12+
13+
/* The offsets relative to struct sgx_enclave_run. */
14+
#define SGX_ENCLAVE_RUN_TCS 0
15+
#define SGX_ENCLAVE_RUN_LEAF 8
16+
#define SGX_ENCLAVE_RUN_EXCEPTION_VECTOR 12
17+
#define SGX_ENCLAVE_RUN_EXCEPTION_ERROR_CODE 14
18+
#define SGX_ENCLAVE_RUN_EXCEPTION_ADDR 16
19+
#define SGX_ENCLAVE_RUN_USER_HANDLER 24
20+
#define SGX_ENCLAVE_RUN_USER_DATA 32 /* not used */
21+
#define SGX_ENCLAVE_RUN_RESERVED_START 40
22+
#define SGX_ENCLAVE_RUN_RESERVED_END 256
23+
24+
.code64
25+
.section .text, "ax"
26+
27+
SYM_FUNC_START(__vdso_sgx_enter_enclave)
28+
/* Prolog */
29+
.cfi_startproc
30+
push %rbp
31+
.cfi_adjust_cfa_offset 8
32+
.cfi_rel_offset %rbp, 0
33+
mov %rsp, %rbp
34+
.cfi_def_cfa_register %rbp
35+
push %rbx
36+
.cfi_rel_offset %rbx, -8
37+
38+
mov %ecx, %eax
39+
.Lenter_enclave:
40+
/* EENTER <= function <= ERESUME */
41+
cmp $EENTER, %eax
42+
jb .Linvalid_input
43+
cmp $ERESUME, %eax
44+
ja .Linvalid_input
45+
46+
mov SGX_ENCLAVE_OFFSET_OF_RUN(%rbp), %rcx
47+
48+
/* Validate that the reserved area contains only zeros. */
49+
mov $SGX_ENCLAVE_RUN_RESERVED_START, %rbx
50+
1:
51+
cmpq $0, (%rcx, %rbx)
52+
jne .Linvalid_input
53+
add $8, %rbx
54+
cmpq $SGX_ENCLAVE_RUN_RESERVED_END, %rbx
55+
jne 1b
56+
57+
/* Load TCS and AEP */
58+
mov SGX_ENCLAVE_RUN_TCS(%rcx), %rbx
59+
lea .Lasync_exit_pointer(%rip), %rcx
60+
61+
/* Single ENCLU serving as both EENTER and AEP (ERESUME) */
62+
.Lasync_exit_pointer:
63+
.Lenclu_eenter_eresume:
64+
enclu
65+
66+
/* EEXIT jumps here unless the enclave is doing something fancy. */
67+
mov SGX_ENCLAVE_OFFSET_OF_RUN(%rbp), %rbx
68+
69+
/* Set exit_reason. */
70+
movl $EEXIT, SGX_ENCLAVE_RUN_LEAF(%rbx)
71+
72+
/* Invoke userspace's exit handler if one was provided. */
73+
.Lhandle_exit:
74+
cmpq $0, SGX_ENCLAVE_RUN_USER_HANDLER(%rbx)
75+
jne .Linvoke_userspace_handler
76+
77+
/* Success, in the sense that ENCLU was attempted. */
78+
xor %eax, %eax
79+
80+
.Lout:
81+
pop %rbx
82+
leave
83+
.cfi_def_cfa %rsp, 8
84+
ret
85+
86+
/* The out-of-line code runs with the pre-leave stack frame. */
87+
.cfi_def_cfa %rbp, 16
88+
89+
.Linvalid_input:
90+
mov $(-EINVAL), %eax
91+
jmp .Lout
92+
93+
.Lhandle_exception:
94+
mov SGX_ENCLAVE_OFFSET_OF_RUN(%rbp), %rbx
95+
96+
/* Set the exception info. */
97+
mov %eax, (SGX_ENCLAVE_RUN_LEAF)(%rbx)
98+
mov %di, (SGX_ENCLAVE_RUN_EXCEPTION_VECTOR)(%rbx)
99+
mov %si, (SGX_ENCLAVE_RUN_EXCEPTION_ERROR_CODE)(%rbx)
100+
mov %rdx, (SGX_ENCLAVE_RUN_EXCEPTION_ADDR)(%rbx)
101+
jmp .Lhandle_exit
102+
103+
.Linvoke_userspace_handler:
104+
/* Pass the untrusted RSP (at exit) to the callback via %rcx. */
105+
mov %rsp, %rcx
106+
107+
/* Save struct sgx_enclave_exception %rbx is about to be clobbered. */
108+
mov %rbx, %rax
109+
110+
/* Save the untrusted RSP offset in %rbx (non-volatile register). */
111+
mov %rsp, %rbx
112+
and $0xf, %rbx
113+
114+
/*
115+
* Align stack per x86_64 ABI. Note, %rsp needs to be 16-byte aligned
116+
* _after_ pushing the parameters on the stack, hence the bonus push.
117+
*/
118+
and $-0x10, %rsp
119+
push %rax
120+
121+
/* Push struct sgx_enclave_exception as a param to the callback. */
122+
push %rax
123+
124+
/* Clear RFLAGS.DF per x86_64 ABI */
125+
cld
126+
127+
/*
128+
* Load the callback pointer to %rax and lfence for LVI (load value
129+
* injection) protection before making the call.
130+
*/
131+
mov SGX_ENCLAVE_RUN_USER_HANDLER(%rax), %rax
132+
lfence
133+
call *%rax
134+
135+
/* Undo the post-exit %rsp adjustment. */
136+
lea 0x10(%rsp, %rbx), %rsp
137+
138+
/*
139+
* If the return from callback is zero or negative, return immediately,
140+
* else re-execute ENCLU with the postive return value interpreted as
141+
* the requested ENCLU function.
142+
*/
143+
cmp $0, %eax
144+
jle .Lout
145+
jmp .Lenter_enclave
146+
147+
.cfi_endproc
148+
149+
_ASM_VDSO_EXTABLE_HANDLE(.Lenclu_eenter_eresume, .Lhandle_exception)
150+
151+
SYM_FUNC_END(__vdso_sgx_enter_enclave)

arch/x86/include/asm/enclu.h

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
/* SPDX-License-Identifier: GPL-2.0 */
2+
#ifndef _ASM_X86_ENCLU_H
3+
#define _ASM_X86_ENCLU_H
4+
5+
#define EENTER 0x02
6+
#define ERESUME 0x03
7+
#define EEXIT 0x04
8+
9+
#endif /* _ASM_X86_ENCLU_H */

arch/x86/include/uapi/asm/sgx.h

Lines changed: 91 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -74,4 +74,95 @@ struct sgx_enclave_provision {
7474
__u64 fd;
7575
};
7676

77+
struct sgx_enclave_run;
78+
79+
/**
80+
* typedef sgx_enclave_user_handler_t - Exit handler function accepted by
81+
* __vdso_sgx_enter_enclave()
82+
* @run: The run instance given by the caller
83+
*
84+
* The register parameters contain the snapshot of their values at enclave
85+
* exit. An invalid ENCLU function number will cause -EINVAL to be returned
86+
* to the caller.
87+
*
88+
* Return:
89+
* - <= 0: The given value is returned back to the caller.
90+
* - > 0: ENCLU function to invoke, either EENTER or ERESUME.
91+
*/
92+
typedef int (*sgx_enclave_user_handler_t)(long rdi, long rsi, long rdx,
93+
long rsp, long r8, long r9,
94+
struct sgx_enclave_run *run);
95+
96+
/**
97+
* struct sgx_enclave_run - the execution context of __vdso_sgx_enter_enclave()
98+
* @tcs: TCS used to enter the enclave
99+
* @function: The last seen ENCLU function (EENTER, ERESUME or EEXIT)
100+
* @exception_vector: The interrupt vector of the exception
101+
* @exception_error_code: The exception error code pulled out of the stack
102+
* @exception_addr: The address that triggered the exception
103+
* @user_handler: User provided callback run on exception
104+
* @user_data: Data passed to the user handler
105+
* @reserved Reserved for future extensions
106+
*
107+
* If @user_handler is provided, the handler will be invoked on all return paths
108+
* of the normal flow. The user handler may transfer control, e.g. via a
109+
* longjmp() call or a C++ exception, without returning to
110+
* __vdso_sgx_enter_enclave().
111+
*/
112+
struct sgx_enclave_run {
113+
__u64 tcs;
114+
__u32 function;
115+
__u16 exception_vector;
116+
__u16 exception_error_code;
117+
__u64 exception_addr;
118+
__u64 user_handler;
119+
__u64 user_data;
120+
__u8 reserved[216];
121+
};
122+
123+
/**
124+
* typedef vdso_sgx_enter_enclave_t - Prototype for __vdso_sgx_enter_enclave(),
125+
* a vDSO function to enter an SGX enclave.
126+
* @rdi: Pass-through value for RDI
127+
* @rsi: Pass-through value for RSI
128+
* @rdx: Pass-through value for RDX
129+
* @function: ENCLU function, must be EENTER or ERESUME
130+
* @r8: Pass-through value for R8
131+
* @r9: Pass-through value for R9
132+
* @run: struct sgx_enclave_run, must be non-NULL
133+
*
134+
* NOTE: __vdso_sgx_enter_enclave() does not ensure full compliance with the
135+
* x86-64 ABI, e.g. doesn't handle XSAVE state. Except for non-volatile
136+
* general purpose registers, EFLAGS.DF, and RSP alignment, preserving/setting
137+
* state in accordance with the x86-64 ABI is the responsibility of the enclave
138+
* and its runtime, i.e. __vdso_sgx_enter_enclave() cannot be called from C
139+
* code without careful consideration by both the enclave and its runtime.
140+
*
141+
* All general purpose registers except RAX, RBX and RCX are passed as-is to the
142+
* enclave. RAX, RBX and RCX are consumed by EENTER and ERESUME and are loaded
143+
* with @function, asynchronous exit pointer, and @run.tcs respectively.
144+
*
145+
* RBP and the stack are used to anchor __vdso_sgx_enter_enclave() to the
146+
* pre-enclave state, e.g. to retrieve @run.exception and @run.user_handler
147+
* after an enclave exit. All other registers are available for use by the
148+
* enclave and its runtime, e.g. an enclave can push additional data onto the
149+
* stack (and modify RSP) to pass information to the optional user handler (see
150+
* below).
151+
*
152+
* Most exceptions reported on ENCLU, including those that occur within the
153+
* enclave, are fixed up and reported synchronously instead of being delivered
154+
* via a standard signal. Debug Exceptions (#DB) and Breakpoints (#BP) are
155+
* never fixed up and are always delivered via standard signals. On synchrously
156+
* reported exceptions, -EFAULT is returned and details about the exception are
157+
* recorded in @run.exception, the optional sgx_enclave_exception struct.
158+
*
159+
* Return:
160+
* - 0: ENCLU function was successfully executed.
161+
* - -EINVAL: Invalid ENCL number (neither EENTER nor ERESUME).
162+
*/
163+
typedef int (*vdso_sgx_enter_enclave_t)(unsigned long rdi, unsigned long rsi,
164+
unsigned long rdx, unsigned int function,
165+
unsigned long r8, unsigned long r9,
166+
struct sgx_enclave_run *run);
167+
77168
#endif /* _UAPI_ASM_X86_SGX_H */

0 commit comments

Comments
 (0)