43
43
44
44
#include "core_priv.h"
45
45
46
+ #define PFX "fmr_pool: "
47
+
46
48
enum {
47
49
IB_FMR_MAX_REMAPS = 32 ,
48
50
@@ -150,7 +152,7 @@ static void ib_fmr_batch_release(struct ib_fmr_pool *pool)
150
152
151
153
#ifdef DEBUG
152
154
if (fmr -> ref_count != 0 ) {
153
- printk (KERN_WARNING "Unmapping FMR 0x%08x with ref count %d" ,
155
+ printk (KERN_WARNING PFX "Unmapping FMR 0x%08x with ref count %d" ,
154
156
fmr , fmr -> ref_count );
155
157
}
156
158
#endif
@@ -168,7 +170,7 @@ static void ib_fmr_batch_release(struct ib_fmr_pool *pool)
168
170
169
171
ret = ib_unmap_fmr (& fmr_list );
170
172
if (ret )
171
- printk (KERN_WARNING "ib_unmap_fmr returned %d" , ret );
173
+ printk (KERN_WARNING PFX "ib_unmap_fmr returned %d" , ret );
172
174
173
175
spin_lock_irq (& pool -> pool_lock );
174
176
list_splice (& unmap_list , & pool -> free_list );
@@ -226,20 +228,20 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
226
228
device = pd -> device ;
227
229
if (!device -> alloc_fmr || !device -> dealloc_fmr ||
228
230
!device -> map_phys_fmr || !device -> unmap_fmr ) {
229
- printk (KERN_WARNING "Device %s does not support fast memory regions " ,
231
+ printk (KERN_INFO PFX "Device %s does not support FMRs\n " ,
230
232
device -> name );
231
233
return ERR_PTR (- ENOSYS );
232
234
}
233
235
234
236
attr = kmalloc (sizeof * attr , GFP_KERNEL );
235
237
if (!attr ) {
236
- printk (KERN_WARNING "couldn't allocate device attr struct" );
238
+ printk (KERN_WARNING PFX "couldn't allocate device attr struct" );
237
239
return ERR_PTR (- ENOMEM );
238
240
}
239
241
240
242
ret = ib_query_device (device , attr );
241
243
if (ret ) {
242
- printk (KERN_WARNING "couldn't query device" );
244
+ printk (KERN_WARNING PFX "couldn't query device: %d" , ret );
243
245
kfree (attr );
244
246
return ERR_PTR (ret );
245
247
}
@@ -253,7 +255,7 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
253
255
254
256
pool = kmalloc (sizeof * pool , GFP_KERNEL );
255
257
if (!pool ) {
256
- printk (KERN_WARNING "couldn't allocate pool struct" );
258
+ printk (KERN_WARNING PFX "couldn't allocate pool struct" );
257
259
return ERR_PTR (- ENOMEM );
258
260
}
259
261
@@ -270,7 +272,7 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
270
272
kmalloc (IB_FMR_HASH_SIZE * sizeof * pool -> cache_bucket ,
271
273
GFP_KERNEL );
272
274
if (!pool -> cache_bucket ) {
273
- printk (KERN_WARNING "Failed to allocate cache in pool" );
275
+ printk (KERN_WARNING PFX "Failed to allocate cache in pool" );
274
276
ret = - ENOMEM ;
275
277
goto out_free_pool ;
276
278
}
@@ -294,7 +296,7 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
294
296
"ib_fmr(%s)" ,
295
297
device -> name );
296
298
if (IS_ERR (pool -> thread )) {
297
- printk (KERN_WARNING "couldn't start cleanup thread" );
299
+ printk (KERN_WARNING PFX "couldn't start cleanup thread" );
298
300
ret = PTR_ERR (pool -> thread );
299
301
goto out_free_pool ;
300
302
}
@@ -311,8 +313,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
311
313
fmr = kmalloc (sizeof * fmr + params -> max_pages_per_fmr * sizeof (u64 ),
312
314
GFP_KERNEL );
313
315
if (!fmr ) {
314
- printk (KERN_WARNING "failed to allocate fmr struct "
315
- "for FMR %d" , i );
316
+ printk (KERN_WARNING PFX "failed to allocate fmr "
317
+ "struct for FMR %d" , i );
316
318
goto out_fail ;
317
319
}
318
320
@@ -323,7 +325,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
323
325
324
326
fmr -> fmr = ib_alloc_fmr (pd , params -> access , & fmr_attr );
325
327
if (IS_ERR (fmr -> fmr )) {
326
- printk (KERN_WARNING "fmr_create failed for FMR %d" , i );
328
+ printk (KERN_WARNING PFX "fmr_create failed "
329
+ "for FMR %d" , i );
327
330
kfree (fmr );
328
331
goto out_fail ;
329
332
}
@@ -378,7 +381,7 @@ void ib_destroy_fmr_pool(struct ib_fmr_pool *pool)
378
381
}
379
382
380
383
if (i < pool -> pool_size )
381
- printk (KERN_WARNING "pool still has %d regions registered" ,
384
+ printk (KERN_WARNING PFX "pool still has %d regions registered" ,
382
385
pool -> pool_size - i );
383
386
384
387
kfree (pool -> cache_bucket );
@@ -463,8 +466,7 @@ struct ib_pool_fmr *ib_fmr_pool_map_phys(struct ib_fmr_pool *pool_handle,
463
466
list_add (& fmr -> list , & pool -> free_list );
464
467
spin_unlock_irqrestore (& pool -> pool_lock , flags );
465
468
466
- printk (KERN_WARNING "fmr_map returns %d\n" ,
467
- result );
469
+ printk (KERN_WARNING PFX "fmr_map returns %d\n" , result );
468
470
469
471
return ERR_PTR (result );
470
472
}
@@ -516,7 +518,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
516
518
517
519
#ifdef DEBUG
518
520
if (fmr -> ref_count < 0 )
519
- printk (KERN_WARNING "FMR %p has ref count %d < 0" ,
521
+ printk (KERN_WARNING PFX "FMR %p has ref count %d < 0" ,
520
522
fmr , fmr -> ref_count );
521
523
#endif
522
524
0 commit comments