22
22
* Authors: Christian König
23
23
*/
24
24
25
+ #include <drm/ttm/ttm_range_manager.h>
26
+
25
27
#include "amdgpu.h"
26
28
27
29
struct amdgpu_gtt_node {
28
- struct drm_mm_node node ;
29
30
struct ttm_buffer_object * tbo ;
31
+ struct ttm_range_mgr_node base ;
30
32
};
31
33
32
34
static inline struct amdgpu_gtt_mgr *
@@ -38,7 +40,8 @@ to_gtt_mgr(struct ttm_resource_manager *man)
38
40
static inline struct amdgpu_gtt_node *
39
41
to_amdgpu_gtt_node (struct ttm_resource * res )
40
42
{
41
- return container_of (res -> mm_node , struct amdgpu_gtt_node , node );
43
+ return container_of (res -> mm_node , struct amdgpu_gtt_node ,
44
+ base .mm_nodes [0 ]);
42
45
}
43
46
44
47
/**
@@ -107,7 +110,7 @@ bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *mem)
107
110
{
108
111
struct amdgpu_gtt_node * node = to_amdgpu_gtt_node (mem );
109
112
110
- return drm_mm_node_allocated (& node -> node );
113
+ return drm_mm_node_allocated (& node -> base . mm_nodes [ 0 ] );
111
114
}
112
115
113
116
/**
@@ -138,16 +141,19 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
138
141
atomic64_sub (mem -> num_pages , & mgr -> available );
139
142
spin_unlock (& mgr -> lock );
140
143
141
- node = kzalloc (sizeof ( * node ), GFP_KERNEL );
144
+ node = kzalloc (struct_size ( node , base . mm_nodes , 1 ), GFP_KERNEL );
142
145
if (!node ) {
143
146
r = - ENOMEM ;
144
147
goto err_out ;
145
148
}
146
149
147
150
node -> tbo = tbo ;
151
+ ttm_resource_init (tbo , place , & node -> base .base );
152
+
148
153
if (place -> lpfn ) {
149
154
spin_lock (& mgr -> lock );
150
- r = drm_mm_insert_node_in_range (& mgr -> mm , & node -> node ,
155
+ r = drm_mm_insert_node_in_range (& mgr -> mm ,
156
+ & node -> base .mm_nodes [0 ],
151
157
mem -> num_pages ,
152
158
tbo -> page_alignment , 0 ,
153
159
place -> fpfn , place -> lpfn ,
@@ -156,14 +162,14 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
156
162
if (unlikely (r ))
157
163
goto err_free ;
158
164
159
- mem -> start = node -> node .start ;
165
+ mem -> start = node -> base . mm_nodes [ 0 ] .start ;
160
166
} else {
161
- node -> node .start = 0 ;
162
- node -> node .size = mem -> num_pages ;
167
+ node -> base . mm_nodes [ 0 ] .start = 0 ;
168
+ node -> base . mm_nodes [ 0 ] .size = mem -> num_pages ;
163
169
mem -> start = AMDGPU_BO_INVALID_OFFSET ;
164
170
}
165
171
166
- mem -> mm_node = & node -> node ;
172
+ mem -> mm_node = & node -> base . mm_nodes [ 0 ] ;
167
173
return 0 ;
168
174
169
175
err_free :
@@ -186,15 +192,17 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
186
192
static void amdgpu_gtt_mgr_del (struct ttm_resource_manager * man ,
187
193
struct ttm_resource * mem )
188
194
{
189
- struct amdgpu_gtt_node * node = to_amdgpu_gtt_node (mem );
190
195
struct amdgpu_gtt_mgr * mgr = to_gtt_mgr (man );
196
+ struct amdgpu_gtt_node * node ;
191
197
192
- if (!node )
198
+ if (!mem -> mm_node )
193
199
return ;
194
200
201
+ node = to_amdgpu_gtt_node (mem );
202
+
195
203
spin_lock (& mgr -> lock );
196
- if (drm_mm_node_allocated (& node -> node ))
197
- drm_mm_remove_node (& node -> node );
204
+ if (drm_mm_node_allocated (& node -> base . mm_nodes [ 0 ] ))
205
+ drm_mm_remove_node (& node -> base . mm_nodes [ 0 ] );
198
206
spin_unlock (& mgr -> lock );
199
207
atomic64_add (mem -> num_pages , & mgr -> available );
200
208
@@ -232,7 +240,7 @@ int amdgpu_gtt_mgr_recover(struct ttm_resource_manager *man)
232
240
233
241
spin_lock (& mgr -> lock );
234
242
drm_mm_for_each_node (mm_node , & mgr -> mm ) {
235
- node = container_of (mm_node , struct amdgpu_gtt_node , node );
243
+ node = container_of (mm_node , typeof ( * node ), base . mm_nodes [ 0 ] );
236
244
r = amdgpu_ttm_recover_gart (node -> tbo );
237
245
if (r )
238
246
break ;
0 commit comments