@@ -317,9 +317,12 @@ int ttm_tt_populate(struct ttm_device *bdev,
317
317
if (ttm_tt_is_populated (ttm ))
318
318
return 0 ;
319
319
320
- atomic_long_add (ttm -> num_pages , & ttm_pages_allocated );
321
- if (bdev -> pool .use_dma32 )
322
- atomic_long_add (ttm -> num_pages , & ttm_dma32_pages_allocated );
320
+ if (!(ttm -> page_flags & TTM_PAGE_FLAG_SG )) {
321
+ atomic_long_add (ttm -> num_pages , & ttm_pages_allocated );
322
+ if (bdev -> pool .use_dma32 )
323
+ atomic_long_add (ttm -> num_pages ,
324
+ & ttm_dma32_pages_allocated );
325
+ }
323
326
324
327
while (atomic_long_read (& ttm_pages_allocated ) > ttm_pages_limit ||
325
328
atomic_long_read (& ttm_dma32_pages_allocated ) >
@@ -350,9 +353,12 @@ int ttm_tt_populate(struct ttm_device *bdev,
350
353
return 0 ;
351
354
352
355
error :
353
- atomic_long_sub (ttm -> num_pages , & ttm_pages_allocated );
354
- if (bdev -> pool .use_dma32 )
355
- atomic_long_sub (ttm -> num_pages , & ttm_dma32_pages_allocated );
356
+ if (!(ttm -> page_flags & TTM_PAGE_FLAG_SG )) {
357
+ atomic_long_sub (ttm -> num_pages , & ttm_pages_allocated );
358
+ if (bdev -> pool .use_dma32 )
359
+ atomic_long_sub (ttm -> num_pages ,
360
+ & ttm_dma32_pages_allocated );
361
+ }
356
362
return ret ;
357
363
}
358
364
EXPORT_SYMBOL (ttm_tt_populate );
@@ -382,9 +388,12 @@ void ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm)
382
388
else
383
389
ttm_pool_free (& bdev -> pool , ttm );
384
390
385
- atomic_long_sub (ttm -> num_pages , & ttm_pages_allocated );
386
- if (bdev -> pool .use_dma32 )
387
- atomic_long_sub (ttm -> num_pages , & ttm_dma32_pages_allocated );
391
+ if (!(ttm -> page_flags & TTM_PAGE_FLAG_SG )) {
392
+ atomic_long_sub (ttm -> num_pages , & ttm_pages_allocated );
393
+ if (bdev -> pool .use_dma32 )
394
+ atomic_long_sub (ttm -> num_pages ,
395
+ & ttm_dma32_pages_allocated );
396
+ }
388
397
389
398
ttm -> page_flags &= ~TTM_PAGE_FLAG_PRIV_POPULATED ;
390
399
}
0 commit comments