@@ -101,6 +101,287 @@ static long sgx_ioc_enclave_create(struct sgx_encl *encl, void __user *arg)
101
101
return ret ;
102
102
}
103
103
104
+ static struct sgx_encl_page * sgx_encl_page_alloc (struct sgx_encl * encl ,
105
+ unsigned long offset ,
106
+ u64 secinfo_flags )
107
+ {
108
+ struct sgx_encl_page * encl_page ;
109
+ unsigned long prot ;
110
+
111
+ encl_page = kzalloc (sizeof (* encl_page ), GFP_KERNEL );
112
+ if (!encl_page )
113
+ return ERR_PTR (- ENOMEM );
114
+
115
+ encl_page -> desc = encl -> base + offset ;
116
+ encl_page -> encl = encl ;
117
+
118
+ prot = _calc_vm_trans (secinfo_flags , SGX_SECINFO_R , PROT_READ ) |
119
+ _calc_vm_trans (secinfo_flags , SGX_SECINFO_W , PROT_WRITE ) |
120
+ _calc_vm_trans (secinfo_flags , SGX_SECINFO_X , PROT_EXEC );
121
+
122
+ /*
123
+ * TCS pages must always RW set for CPU access while the SECINFO
124
+ * permissions are *always* zero - the CPU ignores the user provided
125
+ * values and silently overwrites them with zero permissions.
126
+ */
127
+ if ((secinfo_flags & SGX_SECINFO_PAGE_TYPE_MASK ) == SGX_SECINFO_TCS )
128
+ prot |= PROT_READ | PROT_WRITE ;
129
+
130
+ /* Calculate maximum of the VM flags for the page. */
131
+ encl_page -> vm_max_prot_bits = calc_vm_prot_bits (prot , 0 );
132
+
133
+ return encl_page ;
134
+ }
135
+
136
+ static int sgx_validate_secinfo (struct sgx_secinfo * secinfo )
137
+ {
138
+ u64 perm = secinfo -> flags & SGX_SECINFO_PERMISSION_MASK ;
139
+ u64 pt = secinfo -> flags & SGX_SECINFO_PAGE_TYPE_MASK ;
140
+
141
+ if (pt != SGX_SECINFO_REG && pt != SGX_SECINFO_TCS )
142
+ return - EINVAL ;
143
+
144
+ if ((perm & SGX_SECINFO_W ) && !(perm & SGX_SECINFO_R ))
145
+ return - EINVAL ;
146
+
147
+ /*
148
+ * CPU will silently overwrite the permissions as zero, which means
149
+ * that we need to validate it ourselves.
150
+ */
151
+ if (pt == SGX_SECINFO_TCS && perm )
152
+ return - EINVAL ;
153
+
154
+ if (secinfo -> flags & SGX_SECINFO_RESERVED_MASK )
155
+ return - EINVAL ;
156
+
157
+ if (memchr_inv (secinfo -> reserved , 0 , sizeof (secinfo -> reserved )))
158
+ return - EINVAL ;
159
+
160
+ return 0 ;
161
+ }
162
+
163
+ static int __sgx_encl_add_page (struct sgx_encl * encl ,
164
+ struct sgx_encl_page * encl_page ,
165
+ struct sgx_epc_page * epc_page ,
166
+ struct sgx_secinfo * secinfo , unsigned long src )
167
+ {
168
+ struct sgx_pageinfo pginfo ;
169
+ struct vm_area_struct * vma ;
170
+ struct page * src_page ;
171
+ int ret ;
172
+
173
+ /* Deny noexec. */
174
+ vma = find_vma (current -> mm , src );
175
+ if (!vma )
176
+ return - EFAULT ;
177
+
178
+ if (!(vma -> vm_flags & VM_MAYEXEC ))
179
+ return - EACCES ;
180
+
181
+ ret = get_user_pages (src , 1 , 0 , & src_page , NULL );
182
+ if (ret < 1 )
183
+ return - EFAULT ;
184
+
185
+ pginfo .secs = (unsigned long )sgx_get_epc_virt_addr (encl -> secs .epc_page );
186
+ pginfo .addr = encl_page -> desc & PAGE_MASK ;
187
+ pginfo .metadata = (unsigned long )secinfo ;
188
+ pginfo .contents = (unsigned long )kmap_atomic (src_page );
189
+
190
+ ret = __eadd (& pginfo , sgx_get_epc_virt_addr (epc_page ));
191
+
192
+ kunmap_atomic ((void * )pginfo .contents );
193
+ put_page (src_page );
194
+
195
+ return ret ? - EIO : 0 ;
196
+ }
197
+
198
+ /*
199
+ * If the caller requires measurement of the page as a proof for the content,
200
+ * use EEXTEND to add a measurement for 256 bytes of the page. Repeat this
201
+ * operation until the entire page is measured."
202
+ */
203
+ static int __sgx_encl_extend (struct sgx_encl * encl ,
204
+ struct sgx_epc_page * epc_page )
205
+ {
206
+ unsigned long offset ;
207
+ int ret ;
208
+
209
+ for (offset = 0 ; offset < PAGE_SIZE ; offset += SGX_EEXTEND_BLOCK_SIZE ) {
210
+ ret = __eextend (sgx_get_epc_virt_addr (encl -> secs .epc_page ),
211
+ sgx_get_epc_virt_addr (epc_page ) + offset );
212
+ if (ret ) {
213
+ if (encls_failed (ret ))
214
+ ENCLS_WARN (ret , "EEXTEND" );
215
+
216
+ return - EIO ;
217
+ }
218
+ }
219
+
220
+ return 0 ;
221
+ }
222
+
223
+ static int sgx_encl_add_page (struct sgx_encl * encl , unsigned long src ,
224
+ unsigned long offset , struct sgx_secinfo * secinfo ,
225
+ unsigned long flags )
226
+ {
227
+ struct sgx_encl_page * encl_page ;
228
+ struct sgx_epc_page * epc_page ;
229
+ int ret ;
230
+
231
+ encl_page = sgx_encl_page_alloc (encl , offset , secinfo -> flags );
232
+ if (IS_ERR (encl_page ))
233
+ return PTR_ERR (encl_page );
234
+
235
+ epc_page = __sgx_alloc_epc_page ();
236
+ if (IS_ERR (epc_page )) {
237
+ kfree (encl_page );
238
+ return PTR_ERR (epc_page );
239
+ }
240
+
241
+ mmap_read_lock (current -> mm );
242
+ mutex_lock (& encl -> lock );
243
+
244
+ /*
245
+ * Insert prior to EADD in case of OOM. EADD modifies MRENCLAVE, i.e.
246
+ * can't be gracefully unwound, while failure on EADD/EXTEND is limited
247
+ * to userspace errors (or kernel/hardware bugs).
248
+ */
249
+ ret = xa_insert (& encl -> page_array , PFN_DOWN (encl_page -> desc ),
250
+ encl_page , GFP_KERNEL );
251
+ if (ret )
252
+ goto err_out_unlock ;
253
+
254
+ ret = __sgx_encl_add_page (encl , encl_page , epc_page , secinfo ,
255
+ src );
256
+ if (ret )
257
+ goto err_out ;
258
+
259
+ /*
260
+ * Complete the "add" before doing the "extend" so that the "add"
261
+ * isn't in a half-baked state in the extremely unlikely scenario
262
+ * the enclave will be destroyed in response to EEXTEND failure.
263
+ */
264
+ encl_page -> encl = encl ;
265
+ encl_page -> epc_page = epc_page ;
266
+ encl -> secs_child_cnt ++ ;
267
+
268
+ if (flags & SGX_PAGE_MEASURE ) {
269
+ ret = __sgx_encl_extend (encl , epc_page );
270
+ if (ret )
271
+ goto err_out ;
272
+ }
273
+
274
+ mutex_unlock (& encl -> lock );
275
+ mmap_read_unlock (current -> mm );
276
+ return ret ;
277
+
278
+ err_out :
279
+ xa_erase (& encl -> page_array , PFN_DOWN (encl_page -> desc ));
280
+
281
+ err_out_unlock :
282
+ mutex_unlock (& encl -> lock );
283
+ mmap_read_unlock (current -> mm );
284
+
285
+ sgx_free_epc_page (epc_page );
286
+ kfree (encl_page );
287
+
288
+ return ret ;
289
+ }
290
+
291
+ /**
292
+ * sgx_ioc_enclave_add_pages() - The handler for %SGX_IOC_ENCLAVE_ADD_PAGES
293
+ * @encl: an enclave pointer
294
+ * @arg: a user pointer to a struct sgx_enclave_add_pages instance
295
+ *
296
+ * Add one or more pages to an uninitialized enclave, and optionally extend the
297
+ * measurement with the contents of the page. The SECINFO and measurement mask
298
+ * are applied to all pages.
299
+ *
300
+ * A SECINFO for a TCS is required to always contain zero permissions because
301
+ * CPU silently zeros them. Allowing anything else would cause a mismatch in
302
+ * the measurement.
303
+ *
304
+ * mmap()'s protection bits are capped by the page permissions. For each page
305
+ * address, the maximum protection bits are computed with the following
306
+ * heuristics:
307
+ *
308
+ * 1. A regular page: PROT_R, PROT_W and PROT_X match the SECINFO permissions.
309
+ * 2. A TCS page: PROT_R | PROT_W.
310
+ *
311
+ * mmap() is not allowed to surpass the minimum of the maximum protection bits
312
+ * within the given address range.
313
+ *
314
+ * The function deinitializes kernel data structures for enclave and returns
315
+ * -EIO in any of the following conditions:
316
+ *
317
+ * - Enclave Page Cache (EPC), the physical memory holding enclaves, has
318
+ * been invalidated. This will cause EADD and EEXTEND to fail.
319
+ * - If the source address is corrupted somehow when executing EADD.
320
+ *
321
+ * Return:
322
+ * - 0: Success.
323
+ * - -EACCES: The source page is located in a noexec partition.
324
+ * - -ENOMEM: Out of EPC pages.
325
+ * - -EINTR: The call was interrupted before data was processed.
326
+ * - -EIO: Either EADD or EEXTEND failed because invalid source address
327
+ * or power cycle.
328
+ * - -errno: POSIX error.
329
+ */
330
+ static long sgx_ioc_enclave_add_pages (struct sgx_encl * encl , void __user * arg )
331
+ {
332
+ struct sgx_enclave_add_pages add_arg ;
333
+ struct sgx_secinfo secinfo ;
334
+ unsigned long c ;
335
+ int ret ;
336
+
337
+ if (!test_bit (SGX_ENCL_CREATED , & encl -> flags ))
338
+ return - EINVAL ;
339
+
340
+ if (copy_from_user (& add_arg , arg , sizeof (add_arg )))
341
+ return - EFAULT ;
342
+
343
+ if (!IS_ALIGNED (add_arg .offset , PAGE_SIZE ) ||
344
+ !IS_ALIGNED (add_arg .src , PAGE_SIZE ))
345
+ return - EINVAL ;
346
+
347
+ if (add_arg .length & (PAGE_SIZE - 1 ))
348
+ return - EINVAL ;
349
+
350
+ if (add_arg .offset + add_arg .length - PAGE_SIZE >= encl -> size )
351
+ return - EINVAL ;
352
+
353
+ if (copy_from_user (& secinfo , (void __user * )add_arg .secinfo ,
354
+ sizeof (secinfo )))
355
+ return - EFAULT ;
356
+
357
+ if (sgx_validate_secinfo (& secinfo ))
358
+ return - EINVAL ;
359
+
360
+ for (c = 0 ; c < add_arg .length ; c += PAGE_SIZE ) {
361
+ if (signal_pending (current )) {
362
+ if (!c )
363
+ ret = - EINTR ;
364
+
365
+ break ;
366
+ }
367
+
368
+ if (need_resched ())
369
+ cond_resched ();
370
+
371
+ ret = sgx_encl_add_page (encl , add_arg .src + c , add_arg .offset + c ,
372
+ & secinfo , add_arg .flags );
373
+ if (ret )
374
+ break ;
375
+ }
376
+
377
+ add_arg .count = c ;
378
+
379
+ if (copy_to_user (arg , & add_arg , sizeof (add_arg )))
380
+ return - EFAULT ;
381
+
382
+ return ret ;
383
+ }
384
+
104
385
long sgx_ioctl (struct file * filep , unsigned int cmd , unsigned long arg )
105
386
{
106
387
struct sgx_encl * encl = filep -> private_data ;
@@ -113,6 +394,9 @@ long sgx_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
113
394
case SGX_IOC_ENCLAVE_CREATE :
114
395
ret = sgx_ioc_enclave_create (encl , (void __user * )arg );
115
396
break ;
397
+ case SGX_IOC_ENCLAVE_ADD_PAGES :
398
+ ret = sgx_ioc_enclave_add_pages (encl , (void __user * )arg );
399
+ break ;
116
400
default :
117
401
ret = - ENOIOCTLCMD ;
118
402
break ;
0 commit comments