22
22
*/
23
23
24
24
#include <linux/firmware.h>
25
+ #include <drm/drm_exec.h>
25
26
26
27
#include "amdgpu_mes.h"
27
28
#include "amdgpu.h"
@@ -1168,34 +1169,31 @@ int amdgpu_mes_ctx_map_meta_data(struct amdgpu_device *adev,
1168
1169
struct amdgpu_mes_ctx_data * ctx_data )
1169
1170
{
1170
1171
struct amdgpu_bo_va * bo_va ;
1171
- struct ww_acquire_ctx ticket ;
1172
- struct list_head list ;
1173
- struct amdgpu_bo_list_entry pd ;
1174
- struct ttm_validate_buffer csa_tv ;
1175
1172
struct amdgpu_sync sync ;
1173
+ struct drm_exec exec ;
1176
1174
int r ;
1177
1175
1178
1176
amdgpu_sync_create (& sync );
1179
- INIT_LIST_HEAD (& list );
1180
- INIT_LIST_HEAD (& csa_tv .head );
1181
1177
1182
- csa_tv .bo = & ctx_data -> meta_data_obj -> tbo ;
1183
- csa_tv .num_shared = 1 ;
1184
-
1185
- list_add (& csa_tv .head , & list );
1186
- amdgpu_vm_get_pd_bo (vm , & list , & pd );
1187
-
1188
- r = ttm_eu_reserve_buffers (& ticket , & list , true, NULL );
1189
- if (r ) {
1190
- DRM_ERROR ("failed to reserve meta data BO: err=%d\n" , r );
1191
- return r ;
1178
+ drm_exec_init (& exec , 0 );
1179
+ drm_exec_until_all_locked (& exec ) {
1180
+ r = drm_exec_lock_obj (& exec ,
1181
+ & ctx_data -> meta_data_obj -> tbo .base );
1182
+ drm_exec_retry_on_contention (& exec );
1183
+ if (unlikely (r ))
1184
+ goto error_fini_exec ;
1185
+
1186
+ r = amdgpu_vm_lock_pd (vm , & exec , 0 );
1187
+ drm_exec_retry_on_contention (& exec );
1188
+ if (unlikely (r ))
1189
+ goto error_fini_exec ;
1192
1190
}
1193
1191
1194
1192
bo_va = amdgpu_vm_bo_add (adev , vm , ctx_data -> meta_data_obj );
1195
1193
if (!bo_va ) {
1196
- ttm_eu_backoff_reservation (& ticket , & list );
1197
1194
DRM_ERROR ("failed to create bo_va for meta data BO\n" );
1198
- return - ENOMEM ;
1195
+ r = - ENOMEM ;
1196
+ goto error_fini_exec ;
1199
1197
}
1200
1198
1201
1199
r = amdgpu_vm_bo_map (adev , bo_va , ctx_data -> meta_data_gpu_addr , 0 ,
@@ -1205,33 +1203,35 @@ int amdgpu_mes_ctx_map_meta_data(struct amdgpu_device *adev,
1205
1203
1206
1204
if (r ) {
1207
1205
DRM_ERROR ("failed to do bo_map on meta data, err=%d\n" , r );
1208
- goto error ;
1206
+ goto error_del_bo_va ;
1209
1207
}
1210
1208
1211
1209
r = amdgpu_vm_bo_update (adev , bo_va , false);
1212
1210
if (r ) {
1213
1211
DRM_ERROR ("failed to do vm_bo_update on meta data\n" );
1214
- goto error ;
1212
+ goto error_del_bo_va ;
1215
1213
}
1216
1214
amdgpu_sync_fence (& sync , bo_va -> last_pt_update );
1217
1215
1218
1216
r = amdgpu_vm_update_pdes (adev , vm , false);
1219
1217
if (r ) {
1220
1218
DRM_ERROR ("failed to update pdes on meta data\n" );
1221
- goto error ;
1219
+ goto error_del_bo_va ;
1222
1220
}
1223
1221
amdgpu_sync_fence (& sync , vm -> last_update );
1224
1222
1225
1223
amdgpu_sync_wait (& sync , false);
1226
- ttm_eu_backoff_reservation ( & ticket , & list );
1224
+ drm_exec_fini ( & exec );
1227
1225
1228
1226
amdgpu_sync_free (& sync );
1229
1227
ctx_data -> meta_data_va = bo_va ;
1230
1228
return 0 ;
1231
1229
1232
- error :
1230
+ error_del_bo_va :
1233
1231
amdgpu_vm_bo_del (adev , bo_va );
1234
- ttm_eu_backoff_reservation (& ticket , & list );
1232
+
1233
+ error_fini_exec :
1234
+ drm_exec_fini (& exec );
1235
1235
amdgpu_sync_free (& sync );
1236
1236
return r ;
1237
1237
}
@@ -1242,34 +1242,30 @@ int amdgpu_mes_ctx_unmap_meta_data(struct amdgpu_device *adev,
1242
1242
struct amdgpu_bo_va * bo_va = ctx_data -> meta_data_va ;
1243
1243
struct amdgpu_bo * bo = ctx_data -> meta_data_obj ;
1244
1244
struct amdgpu_vm * vm = bo_va -> base .vm ;
1245
- struct amdgpu_bo_list_entry vm_pd ;
1246
- struct list_head list , duplicates ;
1247
- struct dma_fence * fence = NULL ;
1248
- struct ttm_validate_buffer tv ;
1249
- struct ww_acquire_ctx ticket ;
1250
- long r = 0 ;
1251
-
1252
- INIT_LIST_HEAD (& list );
1253
- INIT_LIST_HEAD (& duplicates );
1254
-
1255
- tv .bo = & bo -> tbo ;
1256
- tv .num_shared = 2 ;
1257
- list_add (& tv .head , & list );
1258
-
1259
- amdgpu_vm_get_pd_bo (vm , & list , & vm_pd );
1260
-
1261
- r = ttm_eu_reserve_buffers (& ticket , & list , false, & duplicates );
1262
- if (r ) {
1263
- dev_err (adev -> dev , "leaking bo va because "
1264
- "we fail to reserve bo (%ld)\n" , r );
1265
- return r ;
1245
+ struct dma_fence * fence ;
1246
+ struct drm_exec exec ;
1247
+ long r ;
1248
+
1249
+ drm_exec_init (& exec , 0 );
1250
+ drm_exec_until_all_locked (& exec ) {
1251
+ r = drm_exec_lock_obj (& exec ,
1252
+ & ctx_data -> meta_data_obj -> tbo .base );
1253
+ drm_exec_retry_on_contention (& exec );
1254
+ if (unlikely (r ))
1255
+ goto out_unlock ;
1256
+
1257
+ r = amdgpu_vm_lock_pd (vm , & exec , 0 );
1258
+ drm_exec_retry_on_contention (& exec );
1259
+ if (unlikely (r ))
1260
+ goto out_unlock ;
1266
1261
}
1267
1262
1268
1263
amdgpu_vm_bo_del (adev , bo_va );
1269
1264
if (!amdgpu_vm_ready (vm ))
1270
1265
goto out_unlock ;
1271
1266
1272
- r = dma_resv_get_singleton (bo -> tbo .base .resv , DMA_RESV_USAGE_BOOKKEEP , & fence );
1267
+ r = dma_resv_get_singleton (bo -> tbo .base .resv , DMA_RESV_USAGE_BOOKKEEP ,
1268
+ & fence );
1273
1269
if (r )
1274
1270
goto out_unlock ;
1275
1271
if (fence ) {
@@ -1288,7 +1284,7 @@ int amdgpu_mes_ctx_unmap_meta_data(struct amdgpu_device *adev,
1288
1284
out_unlock :
1289
1285
if (unlikely (r < 0 ))
1290
1286
dev_err (adev -> dev , "failed to clear page tables (%ld)\n" , r );
1291
- ttm_eu_backoff_reservation ( & ticket , & list );
1287
+ drm_exec_fini ( & exec );
1292
1288
1293
1289
return r ;
1294
1290
}
0 commit comments