Skip to content

Commit ebdfae0

Browse files
hramamurthy12kuba-moo
authored andcommitted
gve: adopt page pool for DQ RDA mode
For DQ queue format in raw DMA addressing(RDA) mode, implement page pool recycling of buffers by leveraging a few helper functions. DQ QPL mode will continue to use the exisiting recycling logic. This is because in QPL mode, the pages come from a constant set of pages that the driver pre-allocates and registers with the device. Reviewed-by: Praveen Kaligineedi <[email protected]> Reviewed-by: Shailend Chand <[email protected]> Reviewed-by: Willem de Bruijn <[email protected]> Signed-off-by: Harshitha Ramamurthy <[email protected]> Reviewed-by: Jacob Keller <[email protected]> Link: https://patch.msgid.link/[email protected] Signed-off-by: Jakub Kicinski <[email protected]>
1 parent 93c68f1 commit ebdfae0

File tree

4 files changed

+198
-94
lines changed

4 files changed

+198
-94
lines changed

drivers/net/ethernet/google/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@ if NET_VENDOR_GOOGLE
1818
config GVE
1919
tristate "Google Virtual NIC (gVNIC) support"
2020
depends on (PCI_MSI && (X86 || CPU_LITTLE_ENDIAN))
21+
select PAGE_POOL
2122
help
2223
This driver supports Google Virtual NIC (gVNIC)"
2324

drivers/net/ethernet/google/gve/gve.h

Lines changed: 20 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@
1313
#include <linux/netdevice.h>
1414
#include <linux/pci.h>
1515
#include <linux/u64_stats_sync.h>
16+
#include <net/page_pool/helpers.h>
1617
#include <net/xdp.h>
1718

1819
#include "gve_desc.h"
@@ -60,6 +61,8 @@
6061

6162
#define GVE_DEFAULT_RX_BUFFER_OFFSET 2048
6263

64+
#define GVE_PAGE_POOL_SIZE_MULTIPLIER 4
65+
6366
#define GVE_FLOW_RULES_CACHE_SIZE \
6467
(GVE_ADMINQ_BUFFER_SIZE / sizeof(struct gve_adminq_queried_flow_rule))
6568
#define GVE_FLOW_RULE_IDS_CACHE_SIZE \
@@ -102,6 +105,7 @@ struct gve_rx_slot_page_info {
102105
struct page *page;
103106
void *page_address;
104107
u32 page_offset; /* offset to write to in page */
108+
unsigned int buf_size;
105109
int pagecnt_bias; /* expected pagecnt if only the driver has a ref */
106110
u16 pad; /* adjustment for rx padding */
107111
u8 can_flip; /* tracks if the networking stack is using the page */
@@ -273,6 +277,8 @@ struct gve_rx_ring {
273277

274278
/* Address info of the buffers for header-split */
275279
struct gve_header_buf hdr_bufs;
280+
281+
struct page_pool *page_pool;
276282
} dqo;
277283
};
278284

@@ -1176,10 +1182,22 @@ struct gve_rx_buf_state_dqo *gve_dequeue_buf_state(struct gve_rx_ring *rx,
11761182
void gve_enqueue_buf_state(struct gve_rx_ring *rx, struct gve_index_list *list,
11771183
struct gve_rx_buf_state_dqo *buf_state);
11781184
struct gve_rx_buf_state_dqo *gve_get_recycled_buf_state(struct gve_rx_ring *rx);
1179-
int gve_alloc_page_dqo(struct gve_rx_ring *rx,
1180-
struct gve_rx_buf_state_dqo *buf_state);
11811185
void gve_try_recycle_buf(struct gve_priv *priv, struct gve_rx_ring *rx,
11821186
struct gve_rx_buf_state_dqo *buf_state);
1187+
void gve_free_to_page_pool(struct gve_rx_ring *rx,
1188+
struct gve_rx_buf_state_dqo *buf_state,
1189+
bool allow_direct);
1190+
int gve_alloc_qpl_page_dqo(struct gve_rx_ring *rx,
1191+
struct gve_rx_buf_state_dqo *buf_state);
1192+
void gve_free_qpl_page_dqo(struct gve_rx_buf_state_dqo *buf_state);
1193+
void gve_reuse_buffer(struct gve_rx_ring *rx,
1194+
struct gve_rx_buf_state_dqo *buf_state);
1195+
void gve_free_buffer(struct gve_rx_ring *rx,
1196+
struct gve_rx_buf_state_dqo *buf_state);
1197+
int gve_alloc_buffer(struct gve_rx_ring *rx, struct gve_rx_desc_dqo *desc);
1198+
struct page_pool *gve_rx_create_page_pool(struct gve_priv *priv,
1199+
struct gve_rx_ring *rx);
1200+
11831201
/* Reset */
11841202
void gve_schedule_reset(struct gve_priv *priv);
11851203
int gve_reset(struct gve_priv *priv, bool attempt_teardown);

drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c

Lines changed: 131 additions & 49 deletions
Original file line numberDiff line numberDiff line change
@@ -12,16 +12,6 @@ int gve_buf_ref_cnt(struct gve_rx_buf_state_dqo *bs)
1212
return page_count(bs->page_info.page) - bs->page_info.pagecnt_bias;
1313
}
1414

15-
void gve_free_page_dqo(struct gve_priv *priv, struct gve_rx_buf_state_dqo *bs,
16-
bool free_page)
17-
{
18-
page_ref_sub(bs->page_info.page, bs->page_info.pagecnt_bias - 1);
19-
if (free_page)
20-
gve_free_page(&priv->pdev->dev, bs->page_info.page, bs->addr,
21-
DMA_FROM_DEVICE);
22-
bs->page_info.page = NULL;
23-
}
24-
2515
struct gve_rx_buf_state_dqo *gve_alloc_buf_state(struct gve_rx_ring *rx)
2616
{
2717
struct gve_rx_buf_state_dqo *buf_state;
@@ -128,56 +118,28 @@ struct gve_rx_buf_state_dqo *gve_get_recycled_buf_state(struct gve_rx_ring *rx)
128118
gve_enqueue_buf_state(rx, &rx->dqo.used_buf_states, buf_state);
129119
}
130120

131-
/* For QPL, we cannot allocate any new buffers and must
132-
* wait for the existing ones to be available.
133-
*/
134-
if (rx->dqo.qpl)
135-
return NULL;
136-
137-
/* If there are no free buf states discard an entry from
138-
* `used_buf_states` so it can be used.
139-
*/
140-
if (unlikely(rx->dqo.free_buf_states == -1)) {
141-
buf_state = gve_dequeue_buf_state(rx, &rx->dqo.used_buf_states);
142-
if (gve_buf_ref_cnt(buf_state) == 0)
143-
return buf_state;
144-
145-
gve_free_page_dqo(rx->gve, buf_state, true);
146-
gve_free_buf_state(rx, buf_state);
147-
}
148-
149121
return NULL;
150122
}
151123

152-
int gve_alloc_page_dqo(struct gve_rx_ring *rx,
153-
struct gve_rx_buf_state_dqo *buf_state)
124+
int gve_alloc_qpl_page_dqo(struct gve_rx_ring *rx,
125+
struct gve_rx_buf_state_dqo *buf_state)
154126
{
155127
struct gve_priv *priv = rx->gve;
156128
u32 idx;
157129

158-
if (!rx->dqo.qpl) {
159-
int err;
160-
161-
err = gve_alloc_page(priv, &priv->pdev->dev,
162-
&buf_state->page_info.page,
163-
&buf_state->addr,
164-
DMA_FROM_DEVICE, GFP_ATOMIC);
165-
if (err)
166-
return err;
167-
} else {
168-
idx = rx->dqo.next_qpl_page_idx;
169-
if (idx >= gve_get_rx_pages_per_qpl_dqo(priv->rx_desc_cnt)) {
170-
net_err_ratelimited("%s: Out of QPL pages\n",
171-
priv->dev->name);
172-
return -ENOMEM;
173-
}
174-
buf_state->page_info.page = rx->dqo.qpl->pages[idx];
175-
buf_state->addr = rx->dqo.qpl->page_buses[idx];
176-
rx->dqo.next_qpl_page_idx++;
130+
idx = rx->dqo.next_qpl_page_idx;
131+
if (idx >= gve_get_rx_pages_per_qpl_dqo(priv->rx_desc_cnt)) {
132+
net_err_ratelimited("%s: Out of QPL pages\n",
133+
priv->dev->name);
134+
return -ENOMEM;
177135
}
136+
buf_state->page_info.page = rx->dqo.qpl->pages[idx];
137+
buf_state->addr = rx->dqo.qpl->page_buses[idx];
138+
rx->dqo.next_qpl_page_idx++;
178139
buf_state->page_info.page_offset = 0;
179140
buf_state->page_info.page_address =
180141
page_address(buf_state->page_info.page);
142+
buf_state->page_info.buf_size = priv->data_buffer_size_dqo;
181143
buf_state->last_single_ref_offset = 0;
182144

183145
/* The page already has 1 ref. */
@@ -187,6 +149,16 @@ int gve_alloc_page_dqo(struct gve_rx_ring *rx,
187149
return 0;
188150
}
189151

152+
void gve_free_qpl_page_dqo(struct gve_rx_buf_state_dqo *buf_state)
153+
{
154+
if (!buf_state->page_info.page)
155+
return;
156+
157+
page_ref_sub(buf_state->page_info.page,
158+
buf_state->page_info.pagecnt_bias - 1);
159+
buf_state->page_info.page = NULL;
160+
}
161+
190162
void gve_try_recycle_buf(struct gve_priv *priv, struct gve_rx_ring *rx,
191163
struct gve_rx_buf_state_dqo *buf_state)
192164
{
@@ -228,3 +200,113 @@ void gve_try_recycle_buf(struct gve_priv *priv, struct gve_rx_ring *rx,
228200
gve_enqueue_buf_state(rx, &rx->dqo.used_buf_states, buf_state);
229201
rx->dqo.used_buf_states_cnt++;
230202
}
203+
204+
void gve_free_to_page_pool(struct gve_rx_ring *rx,
205+
struct gve_rx_buf_state_dqo *buf_state,
206+
bool allow_direct)
207+
{
208+
struct page *page = buf_state->page_info.page;
209+
210+
if (!page)
211+
return;
212+
213+
page_pool_put_page(page->pp, page, buf_state->page_info.buf_size,
214+
allow_direct);
215+
buf_state->page_info.page = NULL;
216+
}
217+
218+
static int gve_alloc_from_page_pool(struct gve_rx_ring *rx,
219+
struct gve_rx_buf_state_dqo *buf_state)
220+
{
221+
struct gve_priv *priv = rx->gve;
222+
struct page *page;
223+
224+
buf_state->page_info.buf_size = priv->data_buffer_size_dqo;
225+
page = page_pool_alloc(rx->dqo.page_pool,
226+
&buf_state->page_info.page_offset,
227+
&buf_state->page_info.buf_size, GFP_ATOMIC);
228+
229+
if (!page)
230+
return -ENOMEM;
231+
232+
buf_state->page_info.page = page;
233+
buf_state->page_info.page_address = page_address(page);
234+
buf_state->addr = page_pool_get_dma_addr(page);
235+
236+
return 0;
237+
}
238+
239+
struct page_pool *gve_rx_create_page_pool(struct gve_priv *priv,
240+
struct gve_rx_ring *rx)
241+
{
242+
u32 ntfy_id = gve_rx_idx_to_ntfy(priv, rx->q_num);
243+
struct page_pool_params pp = {
244+
.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
245+
.order = 0,
246+
.pool_size = GVE_PAGE_POOL_SIZE_MULTIPLIER * priv->rx_desc_cnt,
247+
.dev = &priv->pdev->dev,
248+
.netdev = priv->dev,
249+
.napi = &priv->ntfy_blocks[ntfy_id].napi,
250+
.max_len = PAGE_SIZE,
251+
.dma_dir = DMA_FROM_DEVICE,
252+
};
253+
254+
return page_pool_create(&pp);
255+
}
256+
257+
void gve_free_buffer(struct gve_rx_ring *rx,
258+
struct gve_rx_buf_state_dqo *buf_state)
259+
{
260+
if (rx->dqo.page_pool) {
261+
gve_free_to_page_pool(rx, buf_state, true);
262+
gve_free_buf_state(rx, buf_state);
263+
} else {
264+
gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states,
265+
buf_state);
266+
}
267+
}
268+
269+
void gve_reuse_buffer(struct gve_rx_ring *rx,
270+
struct gve_rx_buf_state_dqo *buf_state)
271+
{
272+
if (rx->dqo.page_pool) {
273+
buf_state->page_info.page = NULL;
274+
gve_free_buf_state(rx, buf_state);
275+
} else {
276+
gve_dec_pagecnt_bias(&buf_state->page_info);
277+
gve_try_recycle_buf(rx->gve, rx, buf_state);
278+
}
279+
}
280+
281+
int gve_alloc_buffer(struct gve_rx_ring *rx, struct gve_rx_desc_dqo *desc)
282+
{
283+
struct gve_rx_buf_state_dqo *buf_state;
284+
285+
if (rx->dqo.page_pool) {
286+
buf_state = gve_alloc_buf_state(rx);
287+
if (WARN_ON_ONCE(!buf_state))
288+
return -ENOMEM;
289+
290+
if (gve_alloc_from_page_pool(rx, buf_state))
291+
goto free_buf_state;
292+
} else {
293+
buf_state = gve_get_recycled_buf_state(rx);
294+
if (unlikely(!buf_state)) {
295+
buf_state = gve_alloc_buf_state(rx);
296+
if (unlikely(!buf_state))
297+
return -ENOMEM;
298+
299+
if (unlikely(gve_alloc_qpl_page_dqo(rx, buf_state)))
300+
goto free_buf_state;
301+
}
302+
}
303+
desc->buf_id = cpu_to_le16(buf_state - rx->dqo.buf_states);
304+
desc->buf_addr = cpu_to_le64(buf_state->addr +
305+
buf_state->page_info.page_offset);
306+
307+
return 0;
308+
309+
free_buf_state:
310+
gve_free_buf_state(rx, buf_state);
311+
return -ENOMEM;
312+
}

0 commit comments

Comments
 (0)