@@ -21,25 +21,6 @@ static void enic_intr_update_pkt_size(struct vnic_rx_bytes_counter *pkt_size,
21
21
pkt_size -> small_pkt_bytes_cnt += pkt_len ;
22
22
}
23
23
24
- static bool enic_rxcopybreak (struct net_device * netdev , struct sk_buff * * skb ,
25
- struct vnic_rq_buf * buf , u16 len )
26
- {
27
- struct enic * enic = netdev_priv (netdev );
28
- struct sk_buff * new_skb ;
29
-
30
- if (len > enic -> rx_copybreak )
31
- return false;
32
- new_skb = netdev_alloc_skb_ip_align (netdev , len );
33
- if (!new_skb )
34
- return false;
35
- dma_sync_single_for_cpu (& enic -> pdev -> dev , buf -> dma_addr , len ,
36
- DMA_FROM_DEVICE );
37
- memcpy (new_skb -> data , (* skb )-> data , len );
38
- * skb = new_skb ;
39
-
40
- return true;
41
- }
42
-
43
24
int enic_rq_service (struct vnic_dev * vdev , struct cq_desc * cq_desc , u8 type ,
44
25
u16 q_number , u16 completed_index , void * opaque )
45
26
{
@@ -142,57 +123,56 @@ int enic_rq_alloc_buf(struct vnic_rq *rq)
142
123
{
143
124
struct enic * enic = vnic_dev_priv (rq -> vdev );
144
125
struct net_device * netdev = enic -> netdev ;
145
- struct sk_buff * skb ;
126
+ struct enic_rq * erq = & enic -> rq [rq -> index ];
127
+ struct enic_rq_stats * rqstats = & erq -> stats ;
128
+ unsigned int offset = 0 ;
146
129
unsigned int len = netdev -> mtu + VLAN_ETH_HLEN ;
147
130
unsigned int os_buf_index = 0 ;
148
131
dma_addr_t dma_addr ;
149
132
struct vnic_rq_buf * buf = rq -> to_use ;
133
+ struct page * page ;
134
+ unsigned int truesize = len ;
150
135
151
136
if (buf -> os_buf ) {
152
137
enic_queue_rq_desc (rq , buf -> os_buf , os_buf_index , buf -> dma_addr ,
153
138
buf -> len );
154
139
155
140
return 0 ;
156
141
}
157
- skb = netdev_alloc_skb_ip_align (netdev , len );
158
- if (!skb ) {
159
- enic -> rq [rq -> index ].stats .no_skb ++ ;
160
- return - ENOMEM ;
161
- }
162
142
163
- dma_addr = dma_map_single (& enic -> pdev -> dev , skb -> data , len ,
164
- DMA_FROM_DEVICE );
165
- if (unlikely (enic_dma_map_check (enic , dma_addr ))) {
166
- dev_kfree_skb (skb );
143
+ page = page_pool_dev_alloc (erq -> pool , & offset , & truesize );
144
+ if (unlikely (!page )) {
145
+ rqstats -> pp_alloc_fail ++ ;
167
146
return - ENOMEM ;
168
147
}
169
-
170
- enic_queue_rq_desc (rq , skb , os_buf_index , dma_addr , len );
148
+ buf -> offset = offset ;
149
+ buf -> truesize = truesize ;
150
+ dma_addr = page_pool_get_dma_addr (page ) + offset ;
151
+ enic_queue_rq_desc (rq , (void * )page , os_buf_index , dma_addr , len );
171
152
172
153
return 0 ;
173
154
}
174
155
175
156
void enic_free_rq_buf (struct vnic_rq * rq , struct vnic_rq_buf * buf )
176
157
{
177
158
struct enic * enic = vnic_dev_priv (rq -> vdev );
159
+ struct enic_rq * erq = & enic -> rq [rq -> index ];
178
160
179
161
if (!buf -> os_buf )
180
162
return ;
181
163
182
- dma_unmap_single (& enic -> pdev -> dev , buf -> dma_addr , buf -> len ,
183
- DMA_FROM_DEVICE );
184
- dev_kfree_skb_any (buf -> os_buf );
164
+ page_pool_put_full_page (erq -> pool , (struct page * )buf -> os_buf , true);
185
165
buf -> os_buf = NULL ;
186
166
}
187
167
188
168
void enic_rq_indicate_buf (struct vnic_rq * rq , struct cq_desc * cq_desc ,
189
169
struct vnic_rq_buf * buf , int skipped , void * opaque )
190
170
{
191
171
struct enic * enic = vnic_dev_priv (rq -> vdev );
192
- struct net_device * netdev = enic -> netdev ;
193
172
struct sk_buff * skb ;
194
173
struct vnic_cq * cq = & enic -> cq [enic_cq_rq (enic , rq -> index )];
195
174
struct enic_rq_stats * rqstats = & enic -> rq [rq -> index ].stats ;
175
+ struct napi_struct * napi ;
196
176
197
177
u8 type , color , eop , sop , ingress_port , vlan_stripped ;
198
178
u8 fcoe , fcoe_sof , fcoe_fc_crc_ok , fcoe_enc_error , fcoe_eof ;
@@ -208,8 +188,6 @@ void enic_rq_indicate_buf(struct vnic_rq *rq, struct cq_desc *cq_desc,
208
188
return ;
209
189
}
210
190
211
- skb = buf -> os_buf ;
212
-
213
191
cq_enet_rq_desc_dec ((struct cq_enet_rq_desc * )cq_desc , & type , & color ,
214
192
& q_number , & completed_index , & ingress_port , & fcoe ,
215
193
& eop , & sop , & rss_type , & csum_not_calc , & rss_hash ,
@@ -219,48 +197,46 @@ void enic_rq_indicate_buf(struct vnic_rq *rq, struct cq_desc *cq_desc,
219
197
& tcp , & ipv4_csum_ok , & ipv6 , & ipv4 , & ipv4_fragment ,
220
198
& fcs_ok );
221
199
222
- if (enic_rq_pkt_error (rq , packet_error , fcs_ok , bytes_written )) {
223
- dma_unmap_single (& enic -> pdev -> dev , buf -> dma_addr , buf -> len ,
224
- DMA_FROM_DEVICE );
225
- dev_kfree_skb_any (skb );
226
- buf -> os_buf = NULL ;
227
-
200
+ if (enic_rq_pkt_error (rq , packet_error , fcs_ok , bytes_written ))
228
201
return ;
229
- }
230
202
231
203
if (eop && bytes_written > 0 ) {
232
204
/* Good receive
233
205
*/
234
206
rqstats -> bytes += bytes_written ;
235
- if (!enic_rxcopybreak (netdev , & skb , buf , bytes_written )) {
236
- buf -> os_buf = NULL ;
237
- dma_unmap_single (& enic -> pdev -> dev , buf -> dma_addr ,
238
- buf -> len , DMA_FROM_DEVICE );
207
+ napi = & enic -> napi [rq -> index ];
208
+ skb = napi_get_frags (napi );
209
+ if (unlikely (!skb )) {
210
+ net_warn_ratelimited ("%s: skb alloc error rq[%d], desc[%d]\n" ,
211
+ enic -> netdev -> name , rq -> index ,
212
+ completed_index );
213
+ rqstats -> no_skb ++ ;
214
+ return ;
239
215
}
216
+
240
217
prefetch (skb -> data - NET_IP_ALIGN );
241
218
242
- skb_put (skb , bytes_written );
243
- skb -> protocol = eth_type_trans (skb , netdev );
219
+ dma_sync_single_for_cpu (& enic -> pdev -> dev , buf -> dma_addr ,
220
+ bytes_written , DMA_FROM_DEVICE );
221
+ skb_add_rx_frag (skb , skb_shinfo (skb )-> nr_frags ,
222
+ (struct page * )buf -> os_buf , buf -> offset ,
223
+ bytes_written , buf -> truesize );
244
224
skb_record_rx_queue (skb , q_number );
245
225
enic_rq_set_skb_flags (rq , type , rss_hash , rss_type , fcoe ,
246
226
fcoe_fc_crc_ok , vlan_stripped ,
247
227
csum_not_calc , tcp_udp_csum_ok , ipv6 ,
248
228
ipv4_csum_ok , vlan_tci , skb );
249
- skb_mark_napi_id (skb , & enic -> napi [rq -> index ]);
250
- if (!(netdev -> features & NETIF_F_GRO ))
251
- netif_receive_skb (skb );
252
- else
253
- napi_gro_receive (& enic -> napi [q_number ], skb );
229
+ skb_mark_for_recycle (skb );
230
+ napi_gro_frags (napi );
254
231
if (enic -> rx_coalesce_setting .use_adaptive_rx_coalesce )
255
232
enic_intr_update_pkt_size (& cq -> pkt_size_counter ,
256
233
bytes_written );
234
+ buf -> os_buf = NULL ;
235
+ buf -> dma_addr = 0 ;
236
+ buf = buf -> next ;
257
237
} else {
258
238
/* Buffer overflow
259
239
*/
260
240
rqstats -> pkt_truncated ++ ;
261
- dma_unmap_single (& enic -> pdev -> dev , buf -> dma_addr , buf -> len ,
262
- DMA_FROM_DEVICE );
263
- dev_kfree_skb_any (skb );
264
- buf -> os_buf = NULL ;
265
241
}
266
242
}
0 commit comments