Skip to content

Commit d3bcb4b

Browse files
drm/vmwgfx: switch the TTM backends to self alloc
Similar to the TTM range manager. Signed-off-by: Christian König <[email protected]> Reviewed-by: Matthew Auld <[email protected]> Link: https://patchwork.freedesktop.org/patch/msgid/[email protected]
1 parent beb4c86 commit d3bcb4b

File tree

2 files changed

+31
-24
lines changed

2 files changed

+31
-24
lines changed

drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c

Lines changed: 11 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -57,6 +57,12 @@ static int vmw_gmrid_man_get_node(struct ttm_resource_manager *man,
5757
struct vmwgfx_gmrid_man *gman = to_gmrid_manager(man);
5858
int id;
5959

60+
mem->mm_node = kmalloc(sizeof(*mem), GFP_KERNEL);
61+
if (!mem->mm_node)
62+
return -ENOMEM;
63+
64+
ttm_resource_init(bo, place, mem->mm_node);
65+
6066
id = ida_alloc_max(&gman->gmr_ida, gman->max_gmr_ids - 1, GFP_KERNEL);
6167
if (id < 0)
6268
return id;
@@ -87,13 +93,11 @@ static void vmw_gmrid_man_put_node(struct ttm_resource_manager *man,
8793
{
8894
struct vmwgfx_gmrid_man *gman = to_gmrid_manager(man);
8995

90-
if (mem->mm_node) {
91-
ida_free(&gman->gmr_ida, mem->start);
92-
spin_lock(&gman->lock);
93-
gman->used_gmr_pages -= mem->num_pages;
94-
spin_unlock(&gman->lock);
95-
mem->mm_node = NULL;
96-
}
96+
ida_free(&gman->gmr_ida, mem->start);
97+
spin_lock(&gman->lock);
98+
gman->used_gmr_pages -= mem->num_pages;
99+
spin_unlock(&gman->lock);
100+
kfree(mem->mm_node);
97101
}
98102

99103
static const struct ttm_resource_manager_func vmw_gmrid_manager_func;

drivers/gpu/drm/vmwgfx/vmwgfx_thp.c

Lines changed: 20 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@
77
#include "vmwgfx_drv.h"
88
#include <drm/ttm/ttm_bo_driver.h>
99
#include <drm/ttm/ttm_placement.h>
10+
#include <drm/ttm/ttm_range_manager.h>
1011

1112
/**
1213
* struct vmw_thp_manager - Range manager implementing huge page alignment
@@ -54,16 +55,18 @@ static int vmw_thp_get_node(struct ttm_resource_manager *man,
5455
{
5556
struct vmw_thp_manager *rman = to_thp_manager(man);
5657
struct drm_mm *mm = &rman->mm;
57-
struct drm_mm_node *node;
58+
struct ttm_range_mgr_node *node;
5859
unsigned long align_pages;
5960
unsigned long lpfn;
6061
enum drm_mm_insert_mode mode = DRM_MM_INSERT_BEST;
6162
int ret;
6263

63-
node = kzalloc(sizeof(*node), GFP_KERNEL);
64+
node = kzalloc(struct_size(node, mm_nodes, 1), GFP_KERNEL);
6465
if (!node)
6566
return -ENOMEM;
6667

68+
ttm_resource_init(bo, place, &node->base);
69+
6770
lpfn = place->lpfn;
6871
if (!lpfn)
6972
lpfn = man->size;
@@ -76,32 +79,34 @@ static int vmw_thp_get_node(struct ttm_resource_manager *man,
7679
if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)) {
7780
align_pages = (HPAGE_PUD_SIZE >> PAGE_SHIFT);
7881
if (mem->num_pages >= align_pages) {
79-
ret = vmw_thp_insert_aligned(bo, mm, node, align_pages,
80-
place, mem, lpfn, mode);
82+
ret = vmw_thp_insert_aligned(bo, mm, &node->mm_nodes[0],
83+
align_pages, place, mem,
84+
lpfn, mode);
8185
if (!ret)
8286
goto found_unlock;
8387
}
8488
}
8589

8690
align_pages = (HPAGE_PMD_SIZE >> PAGE_SHIFT);
8791
if (mem->num_pages >= align_pages) {
88-
ret = vmw_thp_insert_aligned(bo, mm, node, align_pages, place,
89-
mem, lpfn, mode);
92+
ret = vmw_thp_insert_aligned(bo, mm, &node->mm_nodes[0],
93+
align_pages, place, mem, lpfn,
94+
mode);
9095
if (!ret)
9196
goto found_unlock;
9297
}
9398

94-
ret = drm_mm_insert_node_in_range(mm, node, mem->num_pages,
95-
bo->page_alignment, 0,
99+
ret = drm_mm_insert_node_in_range(mm, &node->mm_nodes[0],
100+
mem->num_pages, bo->page_alignment, 0,
96101
place->fpfn, lpfn, mode);
97102
found_unlock:
98103
spin_unlock(&rman->lock);
99104

100105
if (unlikely(ret)) {
101106
kfree(node);
102107
} else {
103-
mem->mm_node = node;
104-
mem->start = node->start;
108+
mem->mm_node = &node->mm_nodes[0];
109+
mem->start = node->mm_nodes[0].start;
105110
}
106111

107112
return ret;
@@ -113,15 +118,13 @@ static void vmw_thp_put_node(struct ttm_resource_manager *man,
113118
struct ttm_resource *mem)
114119
{
115120
struct vmw_thp_manager *rman = to_thp_manager(man);
121+
struct ttm_range_mgr_node * node = mem->mm_node;
116122

117-
if (mem->mm_node) {
118-
spin_lock(&rman->lock);
119-
drm_mm_remove_node(mem->mm_node);
120-
spin_unlock(&rman->lock);
123+
spin_lock(&rman->lock);
124+
drm_mm_remove_node(&node->mm_nodes[0]);
125+
spin_unlock(&rman->lock);
121126

122-
kfree(mem->mm_node);
123-
mem->mm_node = NULL;
124-
}
127+
kfree(node);
125128
}
126129

127130
int vmw_thp_init(struct vmw_private *dev_priv)

0 commit comments

Comments
 (0)