@@ -87,7 +87,8 @@ int qede_alloc_rx_buffer(struct qede_rx_queue *rxq, bool allow_lazy)
87
87
rx_bd = (struct eth_rx_bd * )qed_chain_produce (& rxq -> rx_bd_ring );
88
88
WARN_ON (!rx_bd );
89
89
rx_bd -> addr .hi = cpu_to_le32 (upper_32_bits (mapping ));
90
- rx_bd -> addr .lo = cpu_to_le32 (lower_32_bits (mapping ));
90
+ rx_bd -> addr .lo = cpu_to_le32 (lower_32_bits (mapping ) +
91
+ rxq -> rx_headroom );
91
92
92
93
rxq -> sw_rx_prod ++ ;
93
94
rxq -> filled_buffers ++ ;
@@ -509,7 +510,8 @@ static inline void qede_reuse_page(struct qede_rx_queue *rxq,
509
510
new_mapping = curr_prod -> mapping + curr_prod -> page_offset ;
510
511
511
512
rx_bd_prod -> addr .hi = cpu_to_le32 (upper_32_bits (new_mapping ));
512
- rx_bd_prod -> addr .lo = cpu_to_le32 (lower_32_bits (new_mapping ));
513
+ rx_bd_prod -> addr .lo = cpu_to_le32 (lower_32_bits (new_mapping ) +
514
+ rxq -> rx_headroom );
513
515
514
516
rxq -> sw_rx_prod ++ ;
515
517
curr_cons -> data = NULL ;
@@ -991,13 +993,14 @@ static bool qede_rx_xdp(struct qede_dev *edev,
991
993
struct qede_rx_queue * rxq ,
992
994
struct bpf_prog * prog ,
993
995
struct sw_rx_data * bd ,
994
- struct eth_fast_path_rx_reg_cqe * cqe )
996
+ struct eth_fast_path_rx_reg_cqe * cqe ,
997
+ u16 data_offset )
995
998
{
996
999
u16 len = le16_to_cpu (cqe -> len_on_first_bd );
997
1000
struct xdp_buff xdp ;
998
1001
enum xdp_action act ;
999
1002
1000
- xdp .data = page_address (bd -> data ) + cqe -> placement_offset ;
1003
+ xdp .data = page_address (bd -> data ) + data_offset ;
1001
1004
xdp .data_end = xdp .data + len ;
1002
1005
1003
1006
/* Queues always have a full reset currently, so for the time
@@ -1026,7 +1029,7 @@ static bool qede_rx_xdp(struct qede_dev *edev,
1026
1029
/* Now if there's a transmission problem, we'd still have to
1027
1030
* throw current buffer, as replacement was already allocated.
1028
1031
*/
1029
- if (qede_xdp_xmit (edev , fp , bd , cqe -> placement_offset , len )) {
1032
+ if (qede_xdp_xmit (edev , fp , bd , data_offset , len )) {
1030
1033
dma_unmap_page (rxq -> dev , bd -> mapping ,
1031
1034
PAGE_SIZE , DMA_BIDIRECTIONAL );
1032
1035
__free_page (bd -> data );
@@ -1053,7 +1056,7 @@ static struct sk_buff *qede_rx_allocate_skb(struct qede_dev *edev,
1053
1056
struct sw_rx_data * bd , u16 len ,
1054
1057
u16 pad )
1055
1058
{
1056
- unsigned int offset = bd -> page_offset ;
1059
+ unsigned int offset = bd -> page_offset + pad ;
1057
1060
struct skb_frag_struct * frag ;
1058
1061
struct page * page = bd -> data ;
1059
1062
unsigned int pull_len ;
@@ -1070,15 +1073,15 @@ static struct sk_buff *qede_rx_allocate_skb(struct qede_dev *edev,
1070
1073
*/
1071
1074
if (len + pad <= edev -> rx_copybreak ) {
1072
1075
memcpy (skb_put (skb , len ),
1073
- page_address (page ) + pad + offset , len );
1076
+ page_address (page ) + offset , len );
1074
1077
qede_reuse_page (rxq , bd );
1075
1078
goto out ;
1076
1079
}
1077
1080
1078
1081
frag = & skb_shinfo (skb )-> frags [0 ];
1079
1082
1080
1083
skb_add_rx_frag (skb , skb_shinfo (skb )-> nr_frags ,
1081
- page , pad + offset , len , rxq -> rx_buf_seg_size );
1084
+ page , offset , len , rxq -> rx_buf_seg_size );
1082
1085
1083
1086
va = skb_frag_address (frag );
1084
1087
pull_len = eth_get_headlen (va , QEDE_RX_HDR_SIZE );
@@ -1224,11 +1227,11 @@ static int qede_rx_process_cqe(struct qede_dev *edev,
1224
1227
1225
1228
fp_cqe = & cqe -> fast_path_regular ;
1226
1229
len = le16_to_cpu (fp_cqe -> len_on_first_bd );
1227
- pad = fp_cqe -> placement_offset ;
1230
+ pad = fp_cqe -> placement_offset + rxq -> rx_headroom ;
1228
1231
1229
1232
/* Run eBPF program if one is attached */
1230
1233
if (xdp_prog )
1231
- if (!qede_rx_xdp (edev , fp , rxq , xdp_prog , bd , fp_cqe ))
1234
+ if (!qede_rx_xdp (edev , fp , rxq , xdp_prog , bd , fp_cqe , pad ))
1232
1235
return 0 ;
1233
1236
1234
1237
/* If this is an error packet then drop it */
0 commit comments