Skip to content

Commit 27150bc

Browse files
Geetha sowjanyadavem330
authored andcommitted
octeontx2-af: Interface backpressure configuration
Each of the interface receive channels can be backpressured by resources upon exhaustion or reaching configured threshold levels. Resources here are receive buffer queues (Auras) and pkt notification descriptor queues (CQs). Resources and interface channels are mapped using backpressure IDs (BPIDs). HW supports upto 512 BPIDs, this patch divides these BPIDs statically across CGX/LBK/SDP interfaces as follows. BPIDs 0 - 191 are mapped to LMAC channels, 16 per LMAC. BPIDs 192 - 255 are mapped to LBK channels. BPIDs 256 - 511 are mapped to SDP channels. Also did the needed basic configuration of BPIDs. Added mbox handlers with which a PF device can request for a BPID which it will use to configure Auras and CQs. Signed-off-by: Geetha sowjanya <[email protected]> Signed-off-by: Sunil Goutham <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 48938b1 commit 27150bc

File tree

3 files changed

+182
-6
lines changed

3 files changed

+182
-6
lines changed

drivers/net/ethernet/marvell/octeontx2/af/mbox.h

Lines changed: 23 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -125,7 +125,7 @@ static inline struct mbox_msghdr *otx2_mbox_alloc_msg(struct otx2_mbox *mbox,
125125
M(READY, 0x001, ready, msg_req, ready_msg_rsp) \
126126
M(ATTACH_RESOURCES, 0x002, attach_resources, rsrc_attach, msg_rsp) \
127127
M(DETACH_RESOURCES, 0x003, detach_resources, rsrc_detach, msg_rsp) \
128-
M(MSIX_OFFSET, 0x004, msix_offset, msg_req, msix_offset_rsp) \
128+
M(MSIX_OFFSET, 0x005, msix_offset, msg_req, msix_offset_rsp) \
129129
M(VF_FLR, 0x006, vf_flr, msg_req, msg_rsp) \
130130
M(GET_HW_CAP, 0x008, get_hw_cap, msg_req, get_hw_cap_rsp) \
131131
/* CGX mbox IDs (range 0x200 - 0x3FF) */ \
@@ -211,6 +211,9 @@ M(NIX_LSO_FORMAT_CFG, 0x8011, nix_lso_format_cfg, \
211211
nix_lso_format_cfg, \
212212
nix_lso_format_cfg_rsp) \
213213
M(NIX_RXVLAN_ALLOC, 0x8012, nix_rxvlan_alloc, msg_req, msg_rsp) \
214+
M(NIX_BP_ENABLE, 0x8016, nix_bp_enable, nix_bp_cfg_req, \
215+
nix_bp_cfg_rsp) \
216+
M(NIX_BP_DISABLE, 0x8017, nix_bp_disable, nix_bp_cfg_req, msg_rsp) \
214217
M(NIX_GET_MAC_ADDR, 0x8018, nix_get_mac_addr, msg_req, nix_get_mac_addr_rsp) \
215218

216219
/* Messages initiated by AF (range 0xC00 - 0xDFF) */
@@ -676,6 +679,25 @@ struct nix_lso_format_cfg_rsp {
676679
u8 lso_format_idx;
677680
};
678681

682+
struct nix_bp_cfg_req {
683+
struct mbox_msghdr hdr;
684+
u16 chan_base; /* Starting channel number */
685+
u8 chan_cnt; /* Number of channels */
686+
u8 bpid_per_chan;
687+
/* bpid_per_chan = 0 assigns single bp id for range of channels */
688+
/* bpid_per_chan = 1 assigns separate bp id for each channel */
689+
};
690+
691+
/* PF can be mapped to either CGX or LBK interface,
692+
* so maximum 64 channels are possible.
693+
*/
694+
#define NIX_MAX_BPID_CHAN 64
695+
struct nix_bp_cfg_rsp {
696+
struct mbox_msghdr hdr;
697+
u16 chan_bpid[NIX_MAX_BPID_CHAN]; /* Channel and bpid mapping */
698+
u8 chan_cnt; /* Number of channel for which bpids are assigned */
699+
};
700+
679701
/* NPC mbox message structs */
680702

681703
#define NPC_MCAM_ENTRY_INVALID 0xFFFF

drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c

Lines changed: 149 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,8 @@
1818
#include "cgx.h"
1919

2020
static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add);
21+
static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
22+
int type, int chan_id);
2123

2224
enum mc_tbl_sz {
2325
MC_TBL_SZ_256,
@@ -273,6 +275,142 @@ static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf)
273275
rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
274276
}
275277

278+
int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
279+
struct nix_bp_cfg_req *req,
280+
struct msg_rsp *rsp)
281+
{
282+
u16 pcifunc = req->hdr.pcifunc;
283+
struct rvu_pfvf *pfvf;
284+
int blkaddr, pf, type;
285+
u16 chan_base, chan;
286+
u64 cfg;
287+
288+
pf = rvu_get_pf(pcifunc);
289+
type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
290+
if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
291+
return 0;
292+
293+
pfvf = rvu_get_pfvf(rvu, pcifunc);
294+
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
295+
296+
chan_base = pfvf->rx_chan_base + req->chan_base;
297+
for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
298+
cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
299+
rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
300+
cfg & ~BIT_ULL(16));
301+
}
302+
return 0;
303+
}
304+
305+
static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
306+
int type, int chan_id)
307+
{
308+
int bpid, blkaddr, lmac_chan_cnt;
309+
struct rvu_hwinfo *hw = rvu->hw;
310+
u16 cgx_bpid_cnt, lbk_bpid_cnt;
311+
struct rvu_pfvf *pfvf;
312+
u8 cgx_id, lmac_id;
313+
u64 cfg;
314+
315+
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
316+
cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
317+
lmac_chan_cnt = cfg & 0xFF;
318+
319+
cgx_bpid_cnt = hw->cgx_links * lmac_chan_cnt;
320+
lbk_bpid_cnt = hw->lbk_links * ((cfg >> 16) & 0xFF);
321+
322+
pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
323+
324+
/* Backpressure IDs range division
325+
* CGX channles are mapped to (0 - 191) BPIDs
326+
* LBK channles are mapped to (192 - 255) BPIDs
327+
* SDP channles are mapped to (256 - 511) BPIDs
328+
*
329+
* Lmac channles and bpids mapped as follows
330+
* cgx(0)_lmac(0)_chan(0 - 15) = bpid(0 - 15)
331+
* cgx(0)_lmac(1)_chan(0 - 15) = bpid(16 - 31) ....
332+
* cgx(1)_lmac(0)_chan(0 - 15) = bpid(64 - 79) ....
333+
*/
334+
switch (type) {
335+
case NIX_INTF_TYPE_CGX:
336+
if ((req->chan_base + req->chan_cnt) > 15)
337+
return -EINVAL;
338+
rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
339+
/* Assign bpid based on cgx, lmac and chan id */
340+
bpid = (cgx_id * hw->lmac_per_cgx * lmac_chan_cnt) +
341+
(lmac_id * lmac_chan_cnt) + req->chan_base;
342+
343+
if (req->bpid_per_chan)
344+
bpid += chan_id;
345+
if (bpid > cgx_bpid_cnt)
346+
return -EINVAL;
347+
break;
348+
349+
case NIX_INTF_TYPE_LBK:
350+
if ((req->chan_base + req->chan_cnt) > 63)
351+
return -EINVAL;
352+
bpid = cgx_bpid_cnt + req->chan_base;
353+
if (req->bpid_per_chan)
354+
bpid += chan_id;
355+
if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt))
356+
return -EINVAL;
357+
break;
358+
default:
359+
return -EINVAL;
360+
}
361+
return bpid;
362+
}
363+
364+
int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
365+
struct nix_bp_cfg_req *req,
366+
struct nix_bp_cfg_rsp *rsp)
367+
{
368+
int blkaddr, pf, type, chan_id = 0;
369+
u16 pcifunc = req->hdr.pcifunc;
370+
struct rvu_pfvf *pfvf;
371+
u16 chan_base, chan;
372+
s16 bpid, bpid_base;
373+
u64 cfg;
374+
375+
pf = rvu_get_pf(pcifunc);
376+
type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
377+
378+
/* Enable backpressure only for CGX mapped PFs and LBK interface */
379+
if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
380+
return 0;
381+
382+
pfvf = rvu_get_pfvf(rvu, pcifunc);
383+
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
384+
385+
bpid_base = rvu_nix_get_bpid(rvu, req, type, chan_id);
386+
chan_base = pfvf->rx_chan_base + req->chan_base;
387+
bpid = bpid_base;
388+
389+
for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
390+
if (bpid < 0) {
391+
dev_warn(rvu->dev, "Fail to enable backpessure\n");
392+
return -EINVAL;
393+
}
394+
395+
cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
396+
rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
397+
cfg | (bpid & 0xFF) | BIT_ULL(16));
398+
chan_id++;
399+
bpid = rvu_nix_get_bpid(rvu, req, type, chan_id);
400+
}
401+
402+
for (chan = 0; chan < req->chan_cnt; chan++) {
403+
/* Map channel and bpid assign to it */
404+
rsp->chan_bpid[chan] = ((req->chan_base + chan) & 0x7F) << 10 |
405+
(bpid_base & 0x3FF);
406+
if (req->bpid_per_chan)
407+
bpid_base++;
408+
}
409+
rsp->chan_cnt = req->chan_cnt;
410+
411+
return 0;
412+
}
413+
276414
static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr,
277415
u64 format, bool v4, u64 *fidx)
278416
{
@@ -565,6 +703,11 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
565703
*/
566704
inst.res_addr = (u64)aq->res->iova;
567705

706+
/* Hardware uses same aq->res->base for updating result of
707+
* previous instruction hence wait here till it is done.
708+
*/
709+
spin_lock(&aq->lock);
710+
568711
/* Clean result + context memory */
569712
memset(aq->res->base, 0, aq->res->entry_sz);
570713
/* Context needs to be written at RES_ADDR + 128 */
@@ -609,11 +752,10 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
609752
break;
610753
default:
611754
rc = NIX_AF_ERR_AQ_ENQUEUE;
755+
spin_unlock(&aq->lock);
612756
return rc;
613757
}
614758

615-
spin_lock(&aq->lock);
616-
617759
/* Submit the instruction to AQ */
618760
rc = nix_aq_enqueue_wait(rvu, block, &inst);
619761
if (rc) {
@@ -718,6 +860,8 @@ static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
718860
if (req->ctype == NIX_AQ_CTYPE_CQ) {
719861
aq_req.cq.ena = 0;
720862
aq_req.cq_mask.ena = 1;
863+
aq_req.cq.bp_ena = 0;
864+
aq_req.cq_mask.bp_ena = 1;
721865
q_cnt = pfvf->cq_ctx->qsize;
722866
bmap = pfvf->cq_bmap;
723867
}
@@ -3061,6 +3205,9 @@ int rvu_nix_init(struct rvu *rvu)
30613205

30623206
/* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */
30633207
nix_link_config(rvu, blkaddr);
3208+
3209+
/* Enable Channel backpressure */
3210+
rvu_write64(rvu, blkaddr, NIX_AF_RX_CFG, BIT_ULL(0));
30643211
}
30653212
return 0;
30663213
}

drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c

Lines changed: 10 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -94,6 +94,11 @@ int rvu_npa_aq_enq_inst(struct rvu *rvu, struct npa_aq_enq_req *req,
9494
*/
9595
inst.res_addr = (u64)aq->res->iova;
9696

97+
/* Hardware uses same aq->res->base for updating result of
98+
* previous instruction hence wait here till it is done.
99+
*/
100+
spin_lock(&aq->lock);
101+
97102
/* Clean result + context memory */
98103
memset(aq->res->base, 0, aq->res->entry_sz);
99104
/* Context needs to be written at RES_ADDR + 128 */
@@ -138,10 +143,10 @@ int rvu_npa_aq_enq_inst(struct rvu *rvu, struct npa_aq_enq_req *req,
138143
break;
139144
}
140145

141-
if (rc)
146+
if (rc) {
147+
spin_unlock(&aq->lock);
142148
return rc;
143-
144-
spin_lock(&aq->lock);
149+
}
145150

146151
/* Submit the instruction to AQ */
147152
rc = npa_aq_enqueue_wait(rvu, block, &inst);
@@ -218,6 +223,8 @@ static int npa_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
218223
} else if (req->ctype == NPA_AQ_CTYPE_AURA) {
219224
aq_req.aura.ena = 0;
220225
aq_req.aura_mask.ena = 1;
226+
aq_req.aura.bp_ena = 0;
227+
aq_req.aura_mask.bp_ena = 1;
221228
cnt = pfvf->aura_ctx->qsize;
222229
bmap = pfvf->aura_bmap;
223230
}

0 commit comments

Comments
 (0)