@@ -270,12 +270,10 @@ static struct sk_buff *napi_skb_cache_get(void)
270
270
return skb ;
271
271
}
272
272
273
- /* Caller must provide SKB that is memset cleared */
274
- static void __build_skb_around (struct sk_buff * skb , void * data ,
275
- unsigned int frag_size )
273
+ static inline void __finalize_skb_around (struct sk_buff * skb , void * data ,
274
+ unsigned int size )
276
275
{
277
276
struct skb_shared_info * shinfo ;
278
- unsigned int size = frag_size ? : ksize (data );
279
277
280
278
size -= SKB_DATA_ALIGN (sizeof (struct skb_shared_info ));
281
279
@@ -297,15 +295,71 @@ static void __build_skb_around(struct sk_buff *skb, void *data,
297
295
skb_set_kcov_handle (skb , kcov_common_handle ());
298
296
}
299
297
298
+ static inline void * __slab_build_skb (struct sk_buff * skb , void * data ,
299
+ unsigned int * size )
300
+ {
301
+ void * resized ;
302
+
303
+ /* Must find the allocation size (and grow it to match). */
304
+ * size = ksize (data );
305
+ /* krealloc() will immediately return "data" when
306
+ * "ksize(data)" is requested: it is the existing upper
307
+ * bounds. As a result, GFP_ATOMIC will be ignored. Note
308
+ * that this "new" pointer needs to be passed back to the
309
+ * caller for use so the __alloc_size hinting will be
310
+ * tracked correctly.
311
+ */
312
+ resized = krealloc (data , * size , GFP_ATOMIC );
313
+ WARN_ON_ONCE (resized != data );
314
+ return resized ;
315
+ }
316
+
317
+ /* build_skb() variant which can operate on slab buffers.
318
+ * Note that this should be used sparingly as slab buffers
319
+ * cannot be combined efficiently by GRO!
320
+ */
321
+ struct sk_buff * slab_build_skb (void * data )
322
+ {
323
+ struct sk_buff * skb ;
324
+ unsigned int size ;
325
+
326
+ skb = kmem_cache_alloc (skbuff_head_cache , GFP_ATOMIC );
327
+ if (unlikely (!skb ))
328
+ return NULL ;
329
+
330
+ memset (skb , 0 , offsetof(struct sk_buff , tail ));
331
+ data = __slab_build_skb (skb , data , & size );
332
+ __finalize_skb_around (skb , data , size );
333
+
334
+ return skb ;
335
+ }
336
+ EXPORT_SYMBOL (slab_build_skb );
337
+
338
+ /* Caller must provide SKB that is memset cleared */
339
+ static void __build_skb_around (struct sk_buff * skb , void * data ,
340
+ unsigned int frag_size )
341
+ {
342
+ unsigned int size = frag_size ;
343
+
344
+ /* frag_size == 0 is considered deprecated now. Callers
345
+ * using slab buffer should use slab_build_skb() instead.
346
+ */
347
+ if (WARN_ONCE (size == 0 , "Use slab_build_skb() instead" ))
348
+ data = __slab_build_skb (skb , data , & size );
349
+
350
+ __finalize_skb_around (skb , data , size );
351
+ }
352
+
300
353
/**
301
354
* __build_skb - build a network buffer
302
355
* @data: data buffer provided by caller
303
- * @frag_size: size of data, or 0 if head was kmalloced
356
+ * @frag_size: size of data (must not be 0)
304
357
*
305
358
* Allocate a new &sk_buff. Caller provides space holding head and
306
- * skb_shared_info. @data must have been allocated by kmalloc() only if
307
- * @frag_size is 0, otherwise data should come from the page allocator
308
- * or vmalloc()
359
+ * skb_shared_info. @data must have been allocated from the page
360
+ * allocator or vmalloc(). (A @frag_size of 0 to indicate a kmalloc()
361
+ * allocation is deprecated, and callers should use slab_build_skb()
362
+ * instead.)
309
363
* The return is the new skb buffer.
310
364
* On a failure the return is %NULL, and @data is not freed.
311
365
* Notes :
0 commit comments