Skip to content

Commit 65e51e3

Browse files
Steven Pricerobherring
authored andcommitted
drm/panfrost: Prevent race when handling page fault
When handling a GPU page fault addr_to_drm_mm_node() is used to translate the GPU address to a buffer object. However it is possible for the buffer object to be freed after the function has returned resulting in a use-after-free of the BO. Change addr_to_drm_mm_node to return the panfrost_gem_object with an extra reference on it, preventing the BO from being freed until after the page fault has been handled. Signed-off-by: Steven Price <[email protected]> Signed-off-by: Rob Herring <[email protected]> Link: https://patchwork.freedesktop.org/patch/msgid/[email protected]
1 parent d18a966 commit 65e51e3

File tree

1 file changed

+36
-19
lines changed

1 file changed

+36
-19
lines changed

drivers/gpu/drm/panfrost/panfrost_mmu.c

Lines changed: 36 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -386,58 +386,69 @@ void panfrost_mmu_pgtable_free(struct panfrost_file_priv *priv)
386386
free_io_pgtable_ops(mmu->pgtbl_ops);
387387
}
388388

389-
static struct drm_mm_node *addr_to_drm_mm_node(struct panfrost_device *pfdev, int as, u64 addr)
389+
static struct panfrost_gem_object *
390+
addr_to_drm_mm_node(struct panfrost_device *pfdev, int as, u64 addr)
390391
{
391-
struct drm_mm_node *node = NULL;
392+
struct panfrost_gem_object *bo = NULL;
393+
struct panfrost_file_priv *priv;
394+
struct drm_mm_node *node;
392395
u64 offset = addr >> PAGE_SHIFT;
393396
struct panfrost_mmu *mmu;
394397

395398
spin_lock(&pfdev->as_lock);
396399
list_for_each_entry(mmu, &pfdev->as_lru_list, list) {
397-
struct panfrost_file_priv *priv;
398-
if (as != mmu->as)
399-
continue;
400+
if (as == mmu->as)
401+
break;
402+
}
403+
if (as != mmu->as)
404+
goto out;
405+
406+
priv = container_of(mmu, struct panfrost_file_priv, mmu);
400407

401-
priv = container_of(mmu, struct panfrost_file_priv, mmu);
402-
drm_mm_for_each_node(node, &priv->mm) {
403-
if (offset >= node->start && offset < (node->start + node->size))
404-
goto out;
408+
spin_lock(&priv->mm_lock);
409+
410+
drm_mm_for_each_node(node, &priv->mm) {
411+
if (offset >= node->start &&
412+
offset < (node->start + node->size)) {
413+
bo = drm_mm_node_to_panfrost_bo(node);
414+
drm_gem_object_get(&bo->base.base);
415+
break;
405416
}
406417
}
407418

419+
spin_unlock(&priv->mm_lock);
408420
out:
409421
spin_unlock(&pfdev->as_lock);
410-
return node;
422+
return bo;
411423
}
412424

413425
#define NUM_FAULT_PAGES (SZ_2M / PAGE_SIZE)
414426

415427
int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, u64 addr)
416428
{
417429
int ret, i;
418-
struct drm_mm_node *node;
419430
struct panfrost_gem_object *bo;
420431
struct address_space *mapping;
421432
pgoff_t page_offset;
422433
struct sg_table *sgt;
423434
struct page **pages;
424435

425-
node = addr_to_drm_mm_node(pfdev, as, addr);
426-
if (!node)
436+
bo = addr_to_drm_mm_node(pfdev, as, addr);
437+
if (!bo)
427438
return -ENOENT;
428439

429-
bo = drm_mm_node_to_panfrost_bo(node);
430440
if (!bo->is_heap) {
431441
dev_WARN(pfdev->dev, "matching BO is not heap type (GPU VA = %llx)",
432-
node->start << PAGE_SHIFT);
433-
return -EINVAL;
442+
bo->node.start << PAGE_SHIFT);
443+
ret = -EINVAL;
444+
goto err_bo;
434445
}
435446
WARN_ON(bo->mmu->as != as);
436447

437448
/* Assume 2MB alignment and size multiple */
438449
addr &= ~((u64)SZ_2M - 1);
439450
page_offset = addr >> PAGE_SHIFT;
440-
page_offset -= node->start;
451+
page_offset -= bo->node.start;
441452

442453
mutex_lock(&bo->base.pages_lock);
443454

@@ -446,7 +457,8 @@ int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, u64 addr)
446457
sizeof(struct sg_table), GFP_KERNEL | __GFP_ZERO);
447458
if (!bo->sgts) {
448459
mutex_unlock(&bo->base.pages_lock);
449-
return -ENOMEM;
460+
ret = -ENOMEM;
461+
goto err_bo;
450462
}
451463

452464
pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT,
@@ -455,7 +467,8 @@ int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, u64 addr)
455467
kfree(bo->sgts);
456468
bo->sgts = NULL;
457469
mutex_unlock(&bo->base.pages_lock);
458-
return -ENOMEM;
470+
ret = -ENOMEM;
471+
goto err_bo;
459472
}
460473
bo->base.pages = pages;
461474
bo->base.pages_use_count = 1;
@@ -493,12 +506,16 @@ int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, u64 addr)
493506

494507
dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr);
495508

509+
drm_gem_object_put_unlocked(&bo->base.base);
510+
496511
return 0;
497512

498513
err_map:
499514
sg_free_table(sgt);
500515
err_pages:
501516
drm_gem_shmem_put_pages(&bo->base);
517+
err_bo:
518+
drm_gem_object_put_unlocked(&bo->base.base);
502519
return ret;
503520
}
504521

0 commit comments

Comments
 (0)