Skip to content

Commit 93c68f1

Browse files
hramamurthy12kuba-moo
authored andcommitted
gve: move DQO rx buffer management related code to a new file
In preparation for the upcoming page pool adoption for DQO raw addressing mode, move RX buffer management code to a new file. In the follow on patches, page pool code will be added to this file. No functional change, just movement of code. Reviewed-by: Praveen Kaligineedi <[email protected]> Reviewed-by: Shailend Chand <[email protected]> Reviewed-by: Willem de Bruijn <[email protected]> Signed-off-by: Harshitha Ramamurthy <[email protected]> Reviewed-by: Jacob Keller <[email protected]> Link: https://patch.msgid.link/[email protected] Signed-off-by: Jakub Kicinski <[email protected]>
1 parent 2d859af commit 93c68f1

File tree

4 files changed

+250
-226
lines changed

4 files changed

+250
-226
lines changed
Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
# Makefile for the Google virtual Ethernet (gve) driver
22

33
obj-$(CONFIG_GVE) += gve.o
4-
gve-objs := gve_main.o gve_tx.o gve_tx_dqo.o gve_rx.o gve_rx_dqo.o gve_ethtool.o gve_adminq.o gve_utils.o gve_flow_rule.o
4+
gve-objs := gve_main.o gve_tx.o gve_tx_dqo.o gve_rx.o gve_rx_dqo.o gve_ethtool.o gve_adminq.o gve_utils.o gve_flow_rule.o \
5+
gve_buffer_mgmt_dqo.o

drivers/net/ethernet/google/gve/gve.h

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1162,6 +1162,24 @@ void gve_rx_stop_ring_gqi(struct gve_priv *priv, int idx);
11621162
u16 gve_get_pkt_buf_size(const struct gve_priv *priv, bool enable_hplit);
11631163
bool gve_header_split_supported(const struct gve_priv *priv);
11641164
int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split);
1165+
/* rx buffer handling */
1166+
int gve_buf_ref_cnt(struct gve_rx_buf_state_dqo *bs);
1167+
void gve_free_page_dqo(struct gve_priv *priv, struct gve_rx_buf_state_dqo *bs,
1168+
bool free_page);
1169+
struct gve_rx_buf_state_dqo *gve_alloc_buf_state(struct gve_rx_ring *rx);
1170+
bool gve_buf_state_is_allocated(struct gve_rx_ring *rx,
1171+
struct gve_rx_buf_state_dqo *buf_state);
1172+
void gve_free_buf_state(struct gve_rx_ring *rx,
1173+
struct gve_rx_buf_state_dqo *buf_state);
1174+
struct gve_rx_buf_state_dqo *gve_dequeue_buf_state(struct gve_rx_ring *rx,
1175+
struct gve_index_list *list);
1176+
void gve_enqueue_buf_state(struct gve_rx_ring *rx, struct gve_index_list *list,
1177+
struct gve_rx_buf_state_dqo *buf_state);
1178+
struct gve_rx_buf_state_dqo *gve_get_recycled_buf_state(struct gve_rx_ring *rx);
1179+
int gve_alloc_page_dqo(struct gve_rx_ring *rx,
1180+
struct gve_rx_buf_state_dqo *buf_state);
1181+
void gve_try_recycle_buf(struct gve_priv *priv, struct gve_rx_ring *rx,
1182+
struct gve_rx_buf_state_dqo *buf_state);
11651183
/* Reset */
11661184
void gve_schedule_reset(struct gve_priv *priv);
11671185
int gve_reset(struct gve_priv *priv, bool attempt_teardown);
Lines changed: 230 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,230 @@
1+
// SPDX-License-Identifier: (GPL-2.0 OR MIT)
2+
/* Google virtual Ethernet (gve) driver
3+
*
4+
* Copyright (C) 2015-2024 Google, Inc.
5+
*/
6+
7+
#include "gve.h"
8+
#include "gve_utils.h"
9+
10+
int gve_buf_ref_cnt(struct gve_rx_buf_state_dqo *bs)
11+
{
12+
return page_count(bs->page_info.page) - bs->page_info.pagecnt_bias;
13+
}
14+
15+
void gve_free_page_dqo(struct gve_priv *priv, struct gve_rx_buf_state_dqo *bs,
16+
bool free_page)
17+
{
18+
page_ref_sub(bs->page_info.page, bs->page_info.pagecnt_bias - 1);
19+
if (free_page)
20+
gve_free_page(&priv->pdev->dev, bs->page_info.page, bs->addr,
21+
DMA_FROM_DEVICE);
22+
bs->page_info.page = NULL;
23+
}
24+
25+
struct gve_rx_buf_state_dqo *gve_alloc_buf_state(struct gve_rx_ring *rx)
26+
{
27+
struct gve_rx_buf_state_dqo *buf_state;
28+
s16 buffer_id;
29+
30+
buffer_id = rx->dqo.free_buf_states;
31+
if (unlikely(buffer_id == -1))
32+
return NULL;
33+
34+
buf_state = &rx->dqo.buf_states[buffer_id];
35+
36+
/* Remove buf_state from free list */
37+
rx->dqo.free_buf_states = buf_state->next;
38+
39+
/* Point buf_state to itself to mark it as allocated */
40+
buf_state->next = buffer_id;
41+
42+
return buf_state;
43+
}
44+
45+
bool gve_buf_state_is_allocated(struct gve_rx_ring *rx,
46+
struct gve_rx_buf_state_dqo *buf_state)
47+
{
48+
s16 buffer_id = buf_state - rx->dqo.buf_states;
49+
50+
return buf_state->next == buffer_id;
51+
}
52+
53+
void gve_free_buf_state(struct gve_rx_ring *rx,
54+
struct gve_rx_buf_state_dqo *buf_state)
55+
{
56+
s16 buffer_id = buf_state - rx->dqo.buf_states;
57+
58+
buf_state->next = rx->dqo.free_buf_states;
59+
rx->dqo.free_buf_states = buffer_id;
60+
}
61+
62+
struct gve_rx_buf_state_dqo *gve_dequeue_buf_state(struct gve_rx_ring *rx,
63+
struct gve_index_list *list)
64+
{
65+
struct gve_rx_buf_state_dqo *buf_state;
66+
s16 buffer_id;
67+
68+
buffer_id = list->head;
69+
if (unlikely(buffer_id == -1))
70+
return NULL;
71+
72+
buf_state = &rx->dqo.buf_states[buffer_id];
73+
74+
/* Remove buf_state from list */
75+
list->head = buf_state->next;
76+
if (buf_state->next == -1)
77+
list->tail = -1;
78+
79+
/* Point buf_state to itself to mark it as allocated */
80+
buf_state->next = buffer_id;
81+
82+
return buf_state;
83+
}
84+
85+
void gve_enqueue_buf_state(struct gve_rx_ring *rx, struct gve_index_list *list,
86+
struct gve_rx_buf_state_dqo *buf_state)
87+
{
88+
s16 buffer_id = buf_state - rx->dqo.buf_states;
89+
90+
buf_state->next = -1;
91+
92+
if (list->head == -1) {
93+
list->head = buffer_id;
94+
list->tail = buffer_id;
95+
} else {
96+
int tail = list->tail;
97+
98+
rx->dqo.buf_states[tail].next = buffer_id;
99+
list->tail = buffer_id;
100+
}
101+
}
102+
103+
struct gve_rx_buf_state_dqo *gve_get_recycled_buf_state(struct gve_rx_ring *rx)
104+
{
105+
struct gve_rx_buf_state_dqo *buf_state;
106+
int i;
107+
108+
/* Recycled buf states are immediately usable. */
109+
buf_state = gve_dequeue_buf_state(rx, &rx->dqo.recycled_buf_states);
110+
if (likely(buf_state))
111+
return buf_state;
112+
113+
if (unlikely(rx->dqo.used_buf_states.head == -1))
114+
return NULL;
115+
116+
/* Used buf states are only usable when ref count reaches 0, which means
117+
* no SKBs refer to them.
118+
*
119+
* Search a limited number before giving up.
120+
*/
121+
for (i = 0; i < 5; i++) {
122+
buf_state = gve_dequeue_buf_state(rx, &rx->dqo.used_buf_states);
123+
if (gve_buf_ref_cnt(buf_state) == 0) {
124+
rx->dqo.used_buf_states_cnt--;
125+
return buf_state;
126+
}
127+
128+
gve_enqueue_buf_state(rx, &rx->dqo.used_buf_states, buf_state);
129+
}
130+
131+
/* For QPL, we cannot allocate any new buffers and must
132+
* wait for the existing ones to be available.
133+
*/
134+
if (rx->dqo.qpl)
135+
return NULL;
136+
137+
/* If there are no free buf states discard an entry from
138+
* `used_buf_states` so it can be used.
139+
*/
140+
if (unlikely(rx->dqo.free_buf_states == -1)) {
141+
buf_state = gve_dequeue_buf_state(rx, &rx->dqo.used_buf_states);
142+
if (gve_buf_ref_cnt(buf_state) == 0)
143+
return buf_state;
144+
145+
gve_free_page_dqo(rx->gve, buf_state, true);
146+
gve_free_buf_state(rx, buf_state);
147+
}
148+
149+
return NULL;
150+
}
151+
152+
int gve_alloc_page_dqo(struct gve_rx_ring *rx,
153+
struct gve_rx_buf_state_dqo *buf_state)
154+
{
155+
struct gve_priv *priv = rx->gve;
156+
u32 idx;
157+
158+
if (!rx->dqo.qpl) {
159+
int err;
160+
161+
err = gve_alloc_page(priv, &priv->pdev->dev,
162+
&buf_state->page_info.page,
163+
&buf_state->addr,
164+
DMA_FROM_DEVICE, GFP_ATOMIC);
165+
if (err)
166+
return err;
167+
} else {
168+
idx = rx->dqo.next_qpl_page_idx;
169+
if (idx >= gve_get_rx_pages_per_qpl_dqo(priv->rx_desc_cnt)) {
170+
net_err_ratelimited("%s: Out of QPL pages\n",
171+
priv->dev->name);
172+
return -ENOMEM;
173+
}
174+
buf_state->page_info.page = rx->dqo.qpl->pages[idx];
175+
buf_state->addr = rx->dqo.qpl->page_buses[idx];
176+
rx->dqo.next_qpl_page_idx++;
177+
}
178+
buf_state->page_info.page_offset = 0;
179+
buf_state->page_info.page_address =
180+
page_address(buf_state->page_info.page);
181+
buf_state->last_single_ref_offset = 0;
182+
183+
/* The page already has 1 ref. */
184+
page_ref_add(buf_state->page_info.page, INT_MAX - 1);
185+
buf_state->page_info.pagecnt_bias = INT_MAX;
186+
187+
return 0;
188+
}
189+
190+
void gve_try_recycle_buf(struct gve_priv *priv, struct gve_rx_ring *rx,
191+
struct gve_rx_buf_state_dqo *buf_state)
192+
{
193+
const u16 data_buffer_size = priv->data_buffer_size_dqo;
194+
int pagecount;
195+
196+
/* Can't reuse if we only fit one buffer per page */
197+
if (data_buffer_size * 2 > PAGE_SIZE)
198+
goto mark_used;
199+
200+
pagecount = gve_buf_ref_cnt(buf_state);
201+
202+
/* Record the offset when we have a single remaining reference.
203+
*
204+
* When this happens, we know all of the other offsets of the page are
205+
* usable.
206+
*/
207+
if (pagecount == 1) {
208+
buf_state->last_single_ref_offset =
209+
buf_state->page_info.page_offset;
210+
}
211+
212+
/* Use the next buffer sized chunk in the page. */
213+
buf_state->page_info.page_offset += data_buffer_size;
214+
buf_state->page_info.page_offset &= (PAGE_SIZE - 1);
215+
216+
/* If we wrap around to the same offset without ever dropping to 1
217+
* reference, then we don't know if this offset was ever freed.
218+
*/
219+
if (buf_state->page_info.page_offset ==
220+
buf_state->last_single_ref_offset) {
221+
goto mark_used;
222+
}
223+
224+
gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states, buf_state);
225+
return;
226+
227+
mark_used:
228+
gve_enqueue_buf_state(rx, &rx->dqo.used_buf_states, buf_state);
229+
rx->dqo.used_buf_states_cnt++;
230+
}

0 commit comments

Comments
 (0)