Skip to content

Commit 267501e

Browse files
drm/amdgpu: switch the VRAM backend to self alloc
Similar to the TTM range manager. Signed-off-by: Christian König <[email protected]> Reviewed-by: Matthew Auld <[email protected]> Link: https://patchwork.freedesktop.org/patch/msgid/[email protected]
1 parent f700b18 commit 267501e

File tree

1 file changed

+30
-21
lines changed

1 file changed

+30
-21
lines changed

drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c

Lines changed: 30 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,8 @@
2323
*/
2424

2525
#include <linux/dma-mapping.h>
26+
#include <drm/ttm/ttm_range_manager.h>
27+
2628
#include "amdgpu.h"
2729
#include "amdgpu_vm.h"
2830
#include "amdgpu_res_cursor.h"
@@ -371,9 +373,9 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
371373
struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
372374
struct amdgpu_device *adev = to_amdgpu_device(mgr);
373375
uint64_t vis_usage = 0, mem_bytes, max_bytes;
376+
struct ttm_range_mgr_node *node;
374377
struct drm_mm *mm = &mgr->mm;
375378
enum drm_mm_insert_mode mode;
376-
struct drm_mm_node *nodes;
377379
unsigned i;
378380
int r;
379381

@@ -388,8 +390,8 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
388390
/* bail out quickly if there's likely not enough VRAM for this BO */
389391
mem_bytes = (u64)mem->num_pages << PAGE_SHIFT;
390392
if (atomic64_add_return(mem_bytes, &mgr->usage) > max_bytes) {
391-
atomic64_sub(mem_bytes, &mgr->usage);
392-
return -ENOSPC;
393+
r = -ENOSPC;
394+
goto error_sub;
393395
}
394396

395397
if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
@@ -407,13 +409,15 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
407409
num_nodes = DIV_ROUND_UP(mem->num_pages, pages_per_node);
408410
}
409411

410-
nodes = kvmalloc_array((uint32_t)num_nodes, sizeof(*nodes),
411-
GFP_KERNEL | __GFP_ZERO);
412-
if (!nodes) {
413-
atomic64_sub(mem_bytes, &mgr->usage);
414-
return -ENOMEM;
412+
node = kvmalloc(struct_size(node, mm_nodes, num_nodes),
413+
GFP_KERNEL | __GFP_ZERO);
414+
if (!node) {
415+
r = -ENOMEM;
416+
goto error_sub;
415417
}
416418

419+
ttm_resource_init(tbo, place, &node->base);
420+
417421
mode = DRM_MM_INSERT_BEST;
418422
if (place->flags & TTM_PL_FLAG_TOPDOWN)
419423
mode = DRM_MM_INSERT_HIGH;
@@ -432,8 +436,9 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
432436
if (pages >= pages_per_node)
433437
alignment = pages_per_node;
434438

435-
r = drm_mm_insert_node_in_range(mm, &nodes[i], pages, alignment,
436-
0, place->fpfn, lpfn, mode);
439+
r = drm_mm_insert_node_in_range(mm, &node->mm_nodes[i], pages,
440+
alignment, 0, place->fpfn,
441+
lpfn, mode);
437442
if (unlikely(r)) {
438443
if (pages > pages_per_node) {
439444
if (is_power_of_2(pages))
@@ -442,11 +447,11 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
442447
pages = rounddown_pow_of_two(pages);
443448
continue;
444449
}
445-
goto error;
450+
goto error_free;
446451
}
447452

448-
vis_usage += amdgpu_vram_mgr_vis_size(adev, &nodes[i]);
449-
amdgpu_vram_mgr_virt_start(mem, &nodes[i]);
453+
vis_usage += amdgpu_vram_mgr_vis_size(adev, &node->mm_nodes[i]);
454+
amdgpu_vram_mgr_virt_start(mem, &node->mm_nodes[i]);
450455
pages_left -= pages;
451456
++i;
452457

@@ -459,16 +464,17 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
459464
mem->placement |= TTM_PL_FLAG_CONTIGUOUS;
460465

461466
atomic64_add(vis_usage, &mgr->vis_usage);
462-
mem->mm_node = nodes;
467+
mem->mm_node = &node->mm_nodes[0];
463468
return 0;
464469

465-
error:
470+
error_free:
466471
while (i--)
467-
drm_mm_remove_node(&nodes[i]);
472+
drm_mm_remove_node(&node->mm_nodes[i]);
468473
spin_unlock(&mgr->lock);
469-
atomic64_sub(mem->num_pages << PAGE_SHIFT, &mgr->usage);
474+
kvfree(node);
470475

471-
kvfree(nodes);
476+
error_sub:
477+
atomic64_sub(mem_bytes, &mgr->usage);
472478
return r;
473479
}
474480

@@ -485,13 +491,17 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
485491
{
486492
struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
487493
struct amdgpu_device *adev = to_amdgpu_device(mgr);
488-
struct drm_mm_node *nodes = mem->mm_node;
494+
struct ttm_range_mgr_node *node;
489495
uint64_t usage = 0, vis_usage = 0;
490496
unsigned pages = mem->num_pages;
497+
struct drm_mm_node *nodes;
491498

492499
if (!mem->mm_node)
493500
return;
494501

502+
node = to_ttm_range_mgr_node(mem);
503+
nodes = &node->mm_nodes[0];
504+
495505
spin_lock(&mgr->lock);
496506
while (pages) {
497507
pages -= nodes->size;
@@ -506,8 +516,7 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
506516
atomic64_sub(usage, &mgr->usage);
507517
atomic64_sub(vis_usage, &mgr->vis_usage);
508518

509-
kvfree(mem->mm_node);
510-
mem->mm_node = NULL;
519+
kvfree(node);
511520
}
512521

513522
/**

0 commit comments

Comments
 (0)