Skip to content

Commit f700b18

Browse files
drm/amdgpu: switch the GTT backend to self alloc
Similar to the TTM range manager. Signed-off-by: Christian König <[email protected]> Reviewed-by: Matthew Auld <[email protected]> Link: https://patchwork.freedesktop.org/patch/msgid/[email protected]
1 parent 2fdcb55 commit f700b18

File tree

1 file changed

+22
-14
lines changed

1 file changed

+22
-14
lines changed

drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c

Lines changed: 22 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -22,11 +22,13 @@
2222
* Authors: Christian König
2323
*/
2424

25+
#include <drm/ttm/ttm_range_manager.h>
26+
2527
#include "amdgpu.h"
2628

2729
struct amdgpu_gtt_node {
28-
struct drm_mm_node node;
2930
struct ttm_buffer_object *tbo;
31+
struct ttm_range_mgr_node base;
3032
};
3133

3234
static inline struct amdgpu_gtt_mgr *
@@ -38,7 +40,8 @@ to_gtt_mgr(struct ttm_resource_manager *man)
3840
static inline struct amdgpu_gtt_node *
3941
to_amdgpu_gtt_node(struct ttm_resource *res)
4042
{
41-
return container_of(res->mm_node, struct amdgpu_gtt_node, node);
43+
return container_of(res->mm_node, struct amdgpu_gtt_node,
44+
base.mm_nodes[0]);
4245
}
4346

4447
/**
@@ -107,7 +110,7 @@ bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *mem)
107110
{
108111
struct amdgpu_gtt_node *node = to_amdgpu_gtt_node(mem);
109112

110-
return drm_mm_node_allocated(&node->node);
113+
return drm_mm_node_allocated(&node->base.mm_nodes[0]);
111114
}
112115

113116
/**
@@ -138,16 +141,19 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
138141
atomic64_sub(mem->num_pages, &mgr->available);
139142
spin_unlock(&mgr->lock);
140143

141-
node = kzalloc(sizeof(*node), GFP_KERNEL);
144+
node = kzalloc(struct_size(node, base.mm_nodes, 1), GFP_KERNEL);
142145
if (!node) {
143146
r = -ENOMEM;
144147
goto err_out;
145148
}
146149

147150
node->tbo = tbo;
151+
ttm_resource_init(tbo, place, &node->base.base);
152+
148153
if (place->lpfn) {
149154
spin_lock(&mgr->lock);
150-
r = drm_mm_insert_node_in_range(&mgr->mm, &node->node,
155+
r = drm_mm_insert_node_in_range(&mgr->mm,
156+
&node->base.mm_nodes[0],
151157
mem->num_pages,
152158
tbo->page_alignment, 0,
153159
place->fpfn, place->lpfn,
@@ -156,14 +162,14 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
156162
if (unlikely(r))
157163
goto err_free;
158164

159-
mem->start = node->node.start;
165+
mem->start = node->base.mm_nodes[0].start;
160166
} else {
161-
node->node.start = 0;
162-
node->node.size = mem->num_pages;
167+
node->base.mm_nodes[0].start = 0;
168+
node->base.mm_nodes[0].size = mem->num_pages;
163169
mem->start = AMDGPU_BO_INVALID_OFFSET;
164170
}
165171

166-
mem->mm_node = &node->node;
172+
mem->mm_node = &node->base.mm_nodes[0];
167173
return 0;
168174

169175
err_free:
@@ -186,15 +192,17 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
186192
static void amdgpu_gtt_mgr_del(struct ttm_resource_manager *man,
187193
struct ttm_resource *mem)
188194
{
189-
struct amdgpu_gtt_node *node = to_amdgpu_gtt_node(mem);
190195
struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
196+
struct amdgpu_gtt_node *node;
191197

192-
if (!node)
198+
if (!mem->mm_node)
193199
return;
194200

201+
node = to_amdgpu_gtt_node(mem);
202+
195203
spin_lock(&mgr->lock);
196-
if (drm_mm_node_allocated(&node->node))
197-
drm_mm_remove_node(&node->node);
204+
if (drm_mm_node_allocated(&node->base.mm_nodes[0]))
205+
drm_mm_remove_node(&node->base.mm_nodes[0]);
198206
spin_unlock(&mgr->lock);
199207
atomic64_add(mem->num_pages, &mgr->available);
200208

@@ -232,7 +240,7 @@ int amdgpu_gtt_mgr_recover(struct ttm_resource_manager *man)
232240

233241
spin_lock(&mgr->lock);
234242
drm_mm_for_each_node(mm_node, &mgr->mm) {
235-
node = container_of(mm_node, struct amdgpu_gtt_node, node);
243+
node = container_of(mm_node, typeof(*node), base.mm_nodes[0]);
236244
r = amdgpu_ttm_recover_gart(node->tbo);
237245
if (r)
238246
break;

0 commit comments

Comments
 (0)