@@ -929,7 +929,7 @@ static int pagefault_data_segments(struct mlx5_ib_dev *dev,
929
929
struct mlx5_pagefault * pfault ,
930
930
void * wqe ,
931
931
void * wqe_end , u32 * bytes_mapped ,
932
- u32 * total_wqe_bytes , int receive_queue )
932
+ u32 * total_wqe_bytes , bool receive_queue )
933
933
{
934
934
int ret = 0 , npages = 0 ;
935
935
u64 io_virt ;
@@ -1209,105 +1209,90 @@ static inline struct mlx5_ib_srq *res_to_srq(struct mlx5_core_rsc_common *res)
1209
1209
static void mlx5_ib_mr_wqe_pfault_handler (struct mlx5_ib_dev * dev ,
1210
1210
struct mlx5_pagefault * pfault )
1211
1211
{
1212
- int ret ;
1213
- void * wqe , * wqe_end ;
1212
+ bool sq = pfault -> type & MLX5_PFAULT_REQUESTOR ;
1213
+ u16 wqe_index = pfault -> wqe .wqe_index ;
1214
+ void * wqe = NULL , * wqe_end = NULL ;
1214
1215
u32 bytes_mapped , total_wqe_bytes ;
1215
- char * buffer = NULL ;
1216
+ struct mlx5_core_rsc_common * res ;
1216
1217
int resume_with_error = 1 ;
1217
- u16 wqe_index = pfault -> wqe .wqe_index ;
1218
- int requestor = pfault -> type & MLX5_PFAULT_REQUESTOR ;
1219
- struct mlx5_core_rsc_common * res = NULL ;
1220
- struct mlx5_ib_qp * qp = NULL ;
1221
- struct mlx5_ib_srq * srq = NULL ;
1218
+ struct mlx5_ib_qp * qp ;
1222
1219
size_t bytes_copied ;
1220
+ int ret = 0 ;
1223
1221
1224
1222
res = odp_get_rsc (dev , pfault -> wqe .wq_num , pfault -> type );
1225
1223
if (!res ) {
1226
1224
mlx5_ib_dbg (dev , "wqe page fault for missing resource %d\n" , pfault -> wqe .wq_num );
1227
1225
return ;
1228
1226
}
1229
1227
1230
- switch (res -> res ) {
1231
- case MLX5_RES_QP :
1232
- qp = res_to_qp (res );
1233
- break ;
1234
- case MLX5_RES_SRQ :
1235
- case MLX5_RES_XSRQ :
1236
- srq = res_to_srq (res );
1237
- break ;
1238
- default :
1239
- mlx5_ib_err (dev , "wqe page fault for unsupported type %d\n" , pfault -> type );
1228
+ if (res -> res != MLX5_RES_QP && res -> res != MLX5_RES_SRQ &&
1229
+ res -> res != MLX5_RES_XSRQ ) {
1230
+ mlx5_ib_err (dev , "wqe page fault for unsupported type %d\n" ,
1231
+ pfault -> type );
1240
1232
goto resolve_page_fault ;
1241
1233
}
1242
1234
1243
- buffer = (char * )__get_free_page (GFP_KERNEL );
1244
- if (!buffer ) {
1235
+ wqe = (void * )__get_free_page (GFP_KERNEL );
1236
+ if (!wqe ) {
1245
1237
mlx5_ib_err (dev , "Error allocating memory for IO page fault handling.\n" );
1246
1238
goto resolve_page_fault ;
1247
1239
}
1248
1240
1249
- if (qp ) {
1250
- if (requestor ) {
1251
- ret = mlx5_ib_read_user_wqe_sq (qp , wqe_index ,
1252
- buffer , PAGE_SIZE ,
1253
- & bytes_copied );
1254
- } else {
1255
- ret = mlx5_ib_read_user_wqe_rq (qp , wqe_index ,
1256
- buffer , PAGE_SIZE ,
1257
- & bytes_copied );
1258
- }
1259
- } else {
1260
- ret = mlx5_ib_read_user_wqe_srq (srq , wqe_index ,
1261
- buffer , PAGE_SIZE ,
1241
+ qp = (res -> res == MLX5_RES_QP ) ? res_to_qp (res ) : NULL ;
1242
+ if (qp && sq ) {
1243
+ ret = mlx5_ib_read_user_wqe_sq (qp , wqe_index , wqe , PAGE_SIZE ,
1244
+ & bytes_copied );
1245
+ if (ret )
1246
+ goto read_user ;
1247
+ ret = mlx5_ib_mr_initiator_pfault_handler (
1248
+ dev , pfault , qp , & wqe , & wqe_end , bytes_copied );
1249
+ } else if (qp && !sq ) {
1250
+ ret = mlx5_ib_read_user_wqe_rq (qp , wqe_index , wqe , PAGE_SIZE ,
1251
+ & bytes_copied );
1252
+ if (ret )
1253
+ goto read_user ;
1254
+ ret = mlx5_ib_mr_responder_pfault_handler_rq (
1255
+ dev , qp , wqe , & wqe_end , bytes_copied );
1256
+ } else if (!qp ) {
1257
+ struct mlx5_ib_srq * srq = res_to_srq (res );
1258
+
1259
+ ret = mlx5_ib_read_user_wqe_srq (srq , wqe_index , wqe , PAGE_SIZE ,
1262
1260
& bytes_copied );
1261
+ if (ret )
1262
+ goto read_user ;
1263
+ ret = mlx5_ib_mr_responder_pfault_handler_srq (
1264
+ dev , srq , & wqe , & wqe_end , bytes_copied );
1263
1265
}
1264
1266
1265
- if (ret ) {
1266
- mlx5_ib_err (dev , "Failed reading a WQE following page fault, error=%d, wqe_index=%x, qpn=%x\n" ,
1267
- ret , wqe_index , pfault -> token );
1267
+ if (ret < 0 || wqe >= wqe_end )
1268
1268
goto resolve_page_fault ;
1269
- }
1270
1269
1271
- wqe = buffer ;
1272
- if (requestor )
1273
- ret = mlx5_ib_mr_initiator_pfault_handler (dev , pfault , qp ,
1274
- & wqe , & wqe_end ,
1275
- bytes_copied );
1276
- else if (qp )
1277
- ret = mlx5_ib_mr_responder_pfault_handler_rq (dev , qp ,
1278
- wqe , & wqe_end ,
1279
- bytes_copied );
1280
- else
1281
- ret = mlx5_ib_mr_responder_pfault_handler_srq (dev , srq ,
1282
- & wqe , & wqe_end ,
1283
- bytes_copied );
1270
+ ret = pagefault_data_segments (dev , pfault , wqe , wqe_end , & bytes_mapped ,
1271
+ & total_wqe_bytes , !sq );
1272
+ if (ret == - EAGAIN )
1273
+ goto out ;
1284
1274
1285
- if (ret < 0 )
1275
+ if (ret < 0 || total_wqe_bytes > bytes_mapped )
1286
1276
goto resolve_page_fault ;
1287
1277
1288
- if (wqe >= wqe_end ) {
1289
- mlx5_ib_err (dev , "ODP fault on invalid WQE.\n" );
1290
- goto resolve_page_fault ;
1291
- }
1278
+ out :
1279
+ ret = 0 ;
1280
+ resume_with_error = 0 ;
1292
1281
1293
- ret = pagefault_data_segments (dev , pfault , wqe , wqe_end ,
1294
- & bytes_mapped , & total_wqe_bytes ,
1295
- !requestor );
1296
- if (ret == - EAGAIN ) {
1297
- resume_with_error = 0 ;
1298
- goto resolve_page_fault ;
1299
- } else if (ret < 0 || total_wqe_bytes > bytes_mapped ) {
1300
- goto resolve_page_fault ;
1301
- }
1282
+ read_user :
1283
+ if (ret )
1284
+ mlx5_ib_err (
1285
+ dev ,
1286
+ "Failed reading a WQE following page fault, error %d, wqe_index %x, qpn %x\n" ,
1287
+ ret , wqe_index , pfault -> token );
1302
1288
1303
- resume_with_error = 0 ;
1304
1289
resolve_page_fault :
1305
1290
mlx5_ib_page_fault_resume (dev , pfault , resume_with_error );
1306
1291
mlx5_ib_dbg (dev , "PAGE FAULT completed. QP 0x%x resume_with_error=%d, type: 0x%x\n" ,
1307
1292
pfault -> wqe .wq_num , resume_with_error ,
1308
1293
pfault -> type );
1309
1294
mlx5_core_res_put (res );
1310
- free_page ((unsigned long )buffer );
1295
+ free_page ((unsigned long )wqe );
1311
1296
}
1312
1297
1313
1298
static int pages_in_range (u64 address , u32 length )
0 commit comments