Skip to content

Commit 09184ae

Browse files
toddkjosgregkh
authored andcommitted
binder: defer copies of pre-patched txn data
BINDER_TYPE_PTR objects point to memory areas in the source process to be copied into the target buffer as part of a transaction. This implements a scatter- gather model where non-contiguous memory in a source process is "gathered" into a contiguous region in the target buffer. The data can include pointers that must be fixed up to correctly point to the copied data. To avoid making source process pointers visible to the target process, this patch defers the copy until the fixups are known and then copies and fixeups are done together. There is a special case of BINDER_TYPE_FDA which applies the fixup later in the target process context. In this case the user data is skipped (so no untranslated fds become visible to the target). Reviewed-by: Martijn Coenen <[email protected]> Signed-off-by: Todd Kjos <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Greg Kroah-Hartman <[email protected]>
1 parent 656e01f commit 09184ae

File tree

1 file changed

+274
-25
lines changed

1 file changed

+274
-25
lines changed

drivers/android/binder.c

Lines changed: 274 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -2233,7 +2233,246 @@ static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
22332233
return ret;
22342234
}
22352235

2236-
static int binder_translate_fd_array(struct binder_fd_array_object *fda,
2236+
/**
2237+
* struct binder_ptr_fixup - data to be fixed-up in target buffer
2238+
* @offset offset in target buffer to fixup
2239+
* @skip_size bytes to skip in copy (fixup will be written later)
2240+
* @fixup_data data to write at fixup offset
2241+
* @node list node
2242+
*
2243+
* This is used for the pointer fixup list (pf) which is created and consumed
2244+
* during binder_transaction() and is only accessed locally. No
2245+
* locking is necessary.
2246+
*
2247+
* The list is ordered by @offset.
2248+
*/
2249+
struct binder_ptr_fixup {
2250+
binder_size_t offset;
2251+
size_t skip_size;
2252+
binder_uintptr_t fixup_data;
2253+
struct list_head node;
2254+
};
2255+
2256+
/**
2257+
* struct binder_sg_copy - scatter-gather data to be copied
2258+
* @offset offset in target buffer
2259+
* @sender_uaddr user address in source buffer
2260+
* @length bytes to copy
2261+
* @node list node
2262+
*
2263+
* This is used for the sg copy list (sgc) which is created and consumed
2264+
* during binder_transaction() and is only accessed locally. No
2265+
* locking is necessary.
2266+
*
2267+
* The list is ordered by @offset.
2268+
*/
2269+
struct binder_sg_copy {
2270+
binder_size_t offset;
2271+
const void __user *sender_uaddr;
2272+
size_t length;
2273+
struct list_head node;
2274+
};
2275+
2276+
/**
2277+
* binder_do_deferred_txn_copies() - copy and fixup scatter-gather data
2278+
* @alloc: binder_alloc associated with @buffer
2279+
* @buffer: binder buffer in target process
2280+
* @sgc_head: list_head of scatter-gather copy list
2281+
* @pf_head: list_head of pointer fixup list
2282+
*
2283+
* Processes all elements of @sgc_head, applying fixups from @pf_head
2284+
* and copying the scatter-gather data from the source process' user
2285+
* buffer to the target's buffer. It is expected that the list creation
2286+
* and processing all occurs during binder_transaction() so these lists
2287+
* are only accessed in local context.
2288+
*
2289+
* Return: 0=success, else -errno
2290+
*/
2291+
static int binder_do_deferred_txn_copies(struct binder_alloc *alloc,
2292+
struct binder_buffer *buffer,
2293+
struct list_head *sgc_head,
2294+
struct list_head *pf_head)
2295+
{
2296+
int ret = 0;
2297+
struct binder_sg_copy *sgc, *tmpsgc;
2298+
struct binder_ptr_fixup *pf =
2299+
list_first_entry_or_null(pf_head, struct binder_ptr_fixup,
2300+
node);
2301+
2302+
list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
2303+
size_t bytes_copied = 0;
2304+
2305+
while (bytes_copied < sgc->length) {
2306+
size_t copy_size;
2307+
size_t bytes_left = sgc->length - bytes_copied;
2308+
size_t offset = sgc->offset + bytes_copied;
2309+
2310+
/*
2311+
* We copy up to the fixup (pointed to by pf)
2312+
*/
2313+
copy_size = pf ? min(bytes_left, (size_t)pf->offset - offset)
2314+
: bytes_left;
2315+
if (!ret && copy_size)
2316+
ret = binder_alloc_copy_user_to_buffer(
2317+
alloc, buffer,
2318+
offset,
2319+
sgc->sender_uaddr + bytes_copied,
2320+
copy_size);
2321+
bytes_copied += copy_size;
2322+
if (copy_size != bytes_left) {
2323+
BUG_ON(!pf);
2324+
/* we stopped at a fixup offset */
2325+
if (pf->skip_size) {
2326+
/*
2327+
* we are just skipping. This is for
2328+
* BINDER_TYPE_FDA where the translated
2329+
* fds will be fixed up when we get
2330+
* to target context.
2331+
*/
2332+
bytes_copied += pf->skip_size;
2333+
} else {
2334+
/* apply the fixup indicated by pf */
2335+
if (!ret)
2336+
ret = binder_alloc_copy_to_buffer(
2337+
alloc, buffer,
2338+
pf->offset,
2339+
&pf->fixup_data,
2340+
sizeof(pf->fixup_data));
2341+
bytes_copied += sizeof(pf->fixup_data);
2342+
}
2343+
list_del(&pf->node);
2344+
kfree(pf);
2345+
pf = list_first_entry_or_null(pf_head,
2346+
struct binder_ptr_fixup, node);
2347+
}
2348+
}
2349+
list_del(&sgc->node);
2350+
kfree(sgc);
2351+
}
2352+
BUG_ON(!list_empty(pf_head));
2353+
BUG_ON(!list_empty(sgc_head));
2354+
2355+
return ret > 0 ? -EINVAL : ret;
2356+
}
2357+
2358+
/**
2359+
* binder_cleanup_deferred_txn_lists() - free specified lists
2360+
* @sgc_head: list_head of scatter-gather copy list
2361+
* @pf_head: list_head of pointer fixup list
2362+
*
2363+
* Called to clean up @sgc_head and @pf_head if there is an
2364+
* error.
2365+
*/
2366+
static void binder_cleanup_deferred_txn_lists(struct list_head *sgc_head,
2367+
struct list_head *pf_head)
2368+
{
2369+
struct binder_sg_copy *sgc, *tmpsgc;
2370+
struct binder_ptr_fixup *pf, *tmppf;
2371+
2372+
list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
2373+
list_del(&sgc->node);
2374+
kfree(sgc);
2375+
}
2376+
list_for_each_entry_safe(pf, tmppf, pf_head, node) {
2377+
list_del(&pf->node);
2378+
kfree(pf);
2379+
}
2380+
}
2381+
2382+
/**
2383+
* binder_defer_copy() - queue a scatter-gather buffer for copy
2384+
* @sgc_head: list_head of scatter-gather copy list
2385+
* @offset: binder buffer offset in target process
2386+
* @sender_uaddr: user address in source process
2387+
* @length: bytes to copy
2388+
*
2389+
* Specify a scatter-gather block to be copied. The actual copy must
2390+
* be deferred until all the needed fixups are identified and queued.
2391+
* Then the copy and fixups are done together so un-translated values
2392+
* from the source are never visible in the target buffer.
2393+
*
2394+
* We are guaranteed that repeated calls to this function will have
2395+
* monotonically increasing @offset values so the list will naturally
2396+
* be ordered.
2397+
*
2398+
* Return: 0=success, else -errno
2399+
*/
2400+
static int binder_defer_copy(struct list_head *sgc_head, binder_size_t offset,
2401+
const void __user *sender_uaddr, size_t length)
2402+
{
2403+
struct binder_sg_copy *bc = kzalloc(sizeof(*bc), GFP_KERNEL);
2404+
2405+
if (!bc)
2406+
return -ENOMEM;
2407+
2408+
bc->offset = offset;
2409+
bc->sender_uaddr = sender_uaddr;
2410+
bc->length = length;
2411+
INIT_LIST_HEAD(&bc->node);
2412+
2413+
/*
2414+
* We are guaranteed that the deferred copies are in-order
2415+
* so just add to the tail.
2416+
*/
2417+
list_add_tail(&bc->node, sgc_head);
2418+
2419+
return 0;
2420+
}
2421+
2422+
/**
2423+
* binder_add_fixup() - queue a fixup to be applied to sg copy
2424+
* @pf_head: list_head of binder ptr fixup list
2425+
* @offset: binder buffer offset in target process
2426+
* @fixup: bytes to be copied for fixup
2427+
* @skip_size: bytes to skip when copying (fixup will be applied later)
2428+
*
2429+
* Add the specified fixup to a list ordered by @offset. When copying
2430+
* the scatter-gather buffers, the fixup will be copied instead of
2431+
* data from the source buffer. For BINDER_TYPE_FDA fixups, the fixup
2432+
* will be applied later (in target process context), so we just skip
2433+
* the bytes specified by @skip_size. If @skip_size is 0, we copy the
2434+
* value in @fixup.
2435+
*
2436+
* This function is called *mostly* in @offset order, but there are
2437+
* exceptions. Since out-of-order inserts are relatively uncommon,
2438+
* we insert the new element by searching backward from the tail of
2439+
* the list.
2440+
*
2441+
* Return: 0=success, else -errno
2442+
*/
2443+
static int binder_add_fixup(struct list_head *pf_head, binder_size_t offset,
2444+
binder_uintptr_t fixup, size_t skip_size)
2445+
{
2446+
struct binder_ptr_fixup *pf = kzalloc(sizeof(*pf), GFP_KERNEL);
2447+
struct binder_ptr_fixup *tmppf;
2448+
2449+
if (!pf)
2450+
return -ENOMEM;
2451+
2452+
pf->offset = offset;
2453+
pf->fixup_data = fixup;
2454+
pf->skip_size = skip_size;
2455+
INIT_LIST_HEAD(&pf->node);
2456+
2457+
/* Fixups are *mostly* added in-order, but there are some
2458+
* exceptions. Look backwards through list for insertion point.
2459+
*/
2460+
list_for_each_entry_reverse(tmppf, pf_head, node) {
2461+
if (tmppf->offset < pf->offset) {
2462+
list_add(&pf->node, &tmppf->node);
2463+
return 0;
2464+
}
2465+
}
2466+
/*
2467+
* if we get here, then the new offset is the lowest so
2468+
* insert at the head
2469+
*/
2470+
list_add(&pf->node, pf_head);
2471+
return 0;
2472+
}
2473+
2474+
static int binder_translate_fd_array(struct list_head *pf_head,
2475+
struct binder_fd_array_object *fda,
22372476
const void __user *sender_ubuffer,
22382477
struct binder_buffer_object *parent,
22392478
struct binder_buffer_object *sender_uparent,
@@ -2245,6 +2484,7 @@ static int binder_translate_fd_array(struct binder_fd_array_object *fda,
22452484
binder_size_t fda_offset;
22462485
const void __user *sender_ufda_base;
22472486
struct binder_proc *proc = thread->proc;
2487+
int ret;
22482488

22492489
fd_buf_size = sizeof(u32) * fda->num_fds;
22502490
if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
@@ -2276,9 +2516,12 @@ static int binder_translate_fd_array(struct binder_fd_array_object *fda,
22762516
proc->pid, thread->pid);
22772517
return -EINVAL;
22782518
}
2519+
ret = binder_add_fixup(pf_head, fda_offset, 0, fda->num_fds * sizeof(u32));
2520+
if (ret)
2521+
return ret;
2522+
22792523
for (fdi = 0; fdi < fda->num_fds; fdi++) {
22802524
u32 fd;
2281-
int ret;
22822525
binder_size_t offset = fda_offset + fdi * sizeof(fd);
22832526
binder_size_t sender_uoffset = fdi * sizeof(fd);
22842527

@@ -2292,7 +2535,8 @@ static int binder_translate_fd_array(struct binder_fd_array_object *fda,
22922535
return 0;
22932536
}
22942537

2295-
static int binder_fixup_parent(struct binder_transaction *t,
2538+
static int binder_fixup_parent(struct list_head *pf_head,
2539+
struct binder_transaction *t,
22962540
struct binder_thread *thread,
22972541
struct binder_buffer_object *bp,
22982542
binder_size_t off_start_offset,
@@ -2338,14 +2582,7 @@ static int binder_fixup_parent(struct binder_transaction *t,
23382582
}
23392583
buffer_offset = bp->parent_offset +
23402584
(uintptr_t)parent->buffer - (uintptr_t)b->user_data;
2341-
if (binder_alloc_copy_to_buffer(&target_proc->alloc, b, buffer_offset,
2342-
&bp->buffer, sizeof(bp->buffer))) {
2343-
binder_user_error("%d:%d got transaction with invalid parent offset\n",
2344-
proc->pid, thread->pid);
2345-
return -EINVAL;
2346-
}
2347-
2348-
return 0;
2585+
return binder_add_fixup(pf_head, buffer_offset, bp->buffer, 0);
23492586
}
23502587

23512588
/**
@@ -2487,8 +2724,12 @@ static void binder_transaction(struct binder_proc *proc,
24872724
int t_debug_id = atomic_inc_return(&binder_last_id);
24882725
char *secctx = NULL;
24892726
u32 secctx_sz = 0;
2727+
struct list_head sgc_head;
2728+
struct list_head pf_head;
24902729
const void __user *user_buffer = (const void __user *)
24912730
(uintptr_t)tr->data.ptr.buffer;
2731+
INIT_LIST_HEAD(&sgc_head);
2732+
INIT_LIST_HEAD(&pf_head);
24922733

24932734
e = binder_transaction_log_add(&binder_transaction_log);
24942735
e->debug_id = t_debug_id;
@@ -3005,8 +3246,8 @@ static void binder_transaction(struct binder_proc *proc,
30053246
return_error_line = __LINE__;
30063247
goto err_bad_parent;
30073248
}
3008-
ret = binder_translate_fd_array(fda, user_buffer,
3009-
parent,
3249+
ret = binder_translate_fd_array(&pf_head, fda,
3250+
user_buffer, parent,
30103251
&user_object.bbo, t,
30113252
thread, in_reply_to);
30123253
if (!ret)
@@ -3038,19 +3279,14 @@ static void binder_transaction(struct binder_proc *proc,
30383279
return_error_line = __LINE__;
30393280
goto err_bad_offset;
30403281
}
3041-
if (binder_alloc_copy_user_to_buffer(
3042-
&target_proc->alloc,
3043-
t->buffer,
3044-
sg_buf_offset,
3045-
(const void __user *)
3046-
(uintptr_t)bp->buffer,
3047-
bp->length)) {
3048-
binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3049-
proc->pid, thread->pid);
3050-
return_error_param = -EFAULT;
3282+
ret = binder_defer_copy(&sgc_head, sg_buf_offset,
3283+
(const void __user *)(uintptr_t)bp->buffer,
3284+
bp->length);
3285+
if (ret) {
30513286
return_error = BR_FAILED_REPLY;
3287+
return_error_param = ret;
30523288
return_error_line = __LINE__;
3053-
goto err_copy_data_failed;
3289+
goto err_translate_failed;
30543290
}
30553291
/* Fixup buffer pointer to target proc address space */
30563292
bp->buffer = (uintptr_t)
@@ -3059,7 +3295,8 @@ static void binder_transaction(struct binder_proc *proc,
30593295

30603296
num_valid = (buffer_offset - off_start_offset) /
30613297
sizeof(binder_size_t);
3062-
ret = binder_fixup_parent(t, thread, bp,
3298+
ret = binder_fixup_parent(&pf_head, t,
3299+
thread, bp,
30633300
off_start_offset,
30643301
num_valid,
30653302
last_fixup_obj_off,
@@ -3099,6 +3336,17 @@ static void binder_transaction(struct binder_proc *proc,
30993336
return_error_line = __LINE__;
31003337
goto err_copy_data_failed;
31013338
}
3339+
3340+
ret = binder_do_deferred_txn_copies(&target_proc->alloc, t->buffer,
3341+
&sgc_head, &pf_head);
3342+
if (ret) {
3343+
binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3344+
proc->pid, thread->pid);
3345+
return_error = BR_FAILED_REPLY;
3346+
return_error_param = ret;
3347+
return_error_line = __LINE__;
3348+
goto err_copy_data_failed;
3349+
}
31023350
if (t->buffer->oneway_spam_suspect)
31033351
tcomplete->type = BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT;
31043352
else
@@ -3172,6 +3420,7 @@ static void binder_transaction(struct binder_proc *proc,
31723420
err_bad_offset:
31733421
err_bad_parent:
31743422
err_copy_data_failed:
3423+
binder_cleanup_deferred_txn_lists(&sgc_head, &pf_head);
31753424
binder_free_txn_fixups(t);
31763425
trace_binder_transaction_failed_buffer_release(t->buffer);
31773426
binder_transaction_buffer_release(target_proc, NULL, t->buffer,

0 commit comments

Comments
 (0)