Skip to content

Commit 019aba0

Browse files
Naveen MamindlapalliPaolo Abeni
authored andcommitted
octeontx2-af: Modify SMQ flush sequence to drop packets
The current implementation of SMQ flush sequence waits for the packets in the TM pipeline to be transmitted out of the link. This sequence doesn't succeed in HW when there is any issue with link such as lack of link credits, link down or any other traffic that is fully occupying the link bandwidth (QoS). This patch modifies the SMQ flush sequence to drop the packets after TL1 level (SQM) instead of polling for the packets to be sent out of RPM/CGX link. Fixes: 5d9b976 ("octeontx2-af: Support fixed transmit scheduler topology") Signed-off-by: Naveen Mamindlapalli <[email protected]> Reviewed-by: Sunil Kovvuri Goutham <[email protected]> Link: https://patch.msgid.link/[email protected] Signed-off-by: Paolo Abeni <[email protected]>
1 parent 4c80022 commit 019aba0

File tree

2 files changed

+48
-14
lines changed

2 files changed

+48
-14
lines changed

drivers/net/ethernet/marvell/octeontx2/af/rvu.h

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -319,6 +319,7 @@ struct nix_mark_format {
319319

320320
/* smq(flush) to tl1 cir/pir info */
321321
struct nix_smq_tree_ctx {
322+
u16 schq;
322323
u64 cir_off;
323324
u64 cir_val;
324325
u64 pir_off;
@@ -328,8 +329,6 @@ struct nix_smq_tree_ctx {
328329
/* smq flush context */
329330
struct nix_smq_flush_ctx {
330331
int smq;
331-
u16 tl1_schq;
332-
u16 tl2_schq;
333332
struct nix_smq_tree_ctx smq_tree_ctx[NIX_TXSCH_LVL_CNT];
334333
};
335334

drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c

Lines changed: 47 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -2259,14 +2259,13 @@ static void nix_smq_flush_fill_ctx(struct rvu *rvu, int blkaddr, int smq,
22592259
schq = smq;
22602260
for (lvl = NIX_TXSCH_LVL_SMQ; lvl <= NIX_TXSCH_LVL_TL1; lvl++) {
22612261
smq_tree_ctx = &smq_flush_ctx->smq_tree_ctx[lvl];
2262+
smq_tree_ctx->schq = schq;
22622263
if (lvl == NIX_TXSCH_LVL_TL1) {
2263-
smq_flush_ctx->tl1_schq = schq;
22642264
smq_tree_ctx->cir_off = NIX_AF_TL1X_CIR(schq);
22652265
smq_tree_ctx->pir_off = 0;
22662266
smq_tree_ctx->pir_val = 0;
22672267
parent_off = 0;
22682268
} else if (lvl == NIX_TXSCH_LVL_TL2) {
2269-
smq_flush_ctx->tl2_schq = schq;
22702269
smq_tree_ctx->cir_off = NIX_AF_TL2X_CIR(schq);
22712270
smq_tree_ctx->pir_off = NIX_AF_TL2X_PIR(schq);
22722271
parent_off = NIX_AF_TL2X_PARENT(schq);
@@ -2301,25 +2300,26 @@ static void nix_smq_flush_enadis_xoff(struct rvu *rvu, int blkaddr,
23012300
{
23022301
struct nix_txsch *txsch;
23032302
struct nix_hw *nix_hw;
2303+
int tl2, tl2_schq;
23042304
u64 regoff;
2305-
int tl2;
23062305

23072306
nix_hw = get_nix_hw(rvu->hw, blkaddr);
23082307
if (!nix_hw)
23092308
return;
23102309

23112310
/* loop through all TL2s with matching PF_FUNC */
23122311
txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2];
2312+
tl2_schq = smq_flush_ctx->smq_tree_ctx[NIX_TXSCH_LVL_TL2].schq;
23132313
for (tl2 = 0; tl2 < txsch->schq.max; tl2++) {
23142314
/* skip the smq(flush) TL2 */
2315-
if (tl2 == smq_flush_ctx->tl2_schq)
2315+
if (tl2 == tl2_schq)
23162316
continue;
23172317
/* skip unused TL2s */
23182318
if (TXSCH_MAP_FLAGS(txsch->pfvf_map[tl2]) & NIX_TXSCHQ_FREE)
23192319
continue;
23202320
/* skip if PF_FUNC doesn't match */
23212321
if ((TXSCH_MAP_FUNC(txsch->pfvf_map[tl2]) & ~RVU_PFVF_FUNC_MASK) !=
2322-
(TXSCH_MAP_FUNC(txsch->pfvf_map[smq_flush_ctx->tl2_schq] &
2322+
(TXSCH_MAP_FUNC(txsch->pfvf_map[tl2_schq] &
23232323
~RVU_PFVF_FUNC_MASK)))
23242324
continue;
23252325
/* enable/disable XOFF */
@@ -2361,10 +2361,12 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr,
23612361
int smq, u16 pcifunc, int nixlf)
23622362
{
23632363
struct nix_smq_flush_ctx *smq_flush_ctx;
2364+
int err, restore_tx_en = 0, i;
23642365
int pf = rvu_get_pf(pcifunc);
23652366
u8 cgx_id = 0, lmac_id = 0;
2366-
int err, restore_tx_en = 0;
2367-
u64 cfg;
2367+
u16 tl2_tl3_link_schq;
2368+
u8 link, link_level;
2369+
u64 cfg, bmap = 0;
23682370

23692371
if (!is_rvu_otx2(rvu)) {
23702372
/* Skip SMQ flush if pkt count is zero */
@@ -2388,16 +2390,38 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr,
23882390
nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, true);
23892391
nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, false);
23902392

2391-
cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
2392-
/* Do SMQ flush and set enqueue xoff */
2393-
cfg |= BIT_ULL(50) | BIT_ULL(49);
2394-
rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg);
2395-
23962393
/* Disable backpressure from physical link,
23972394
* otherwise SMQ flush may stall.
23982395
*/
23992396
rvu_cgx_enadis_rx_bp(rvu, pf, false);
24002397

2398+
link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ?
2399+
NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
2400+
tl2_tl3_link_schq = smq_flush_ctx->smq_tree_ctx[link_level].schq;
2401+
link = smq_flush_ctx->smq_tree_ctx[NIX_TXSCH_LVL_TL1].schq;
2402+
2403+
/* SMQ set enqueue xoff */
2404+
cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
2405+
cfg |= BIT_ULL(50);
2406+
rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg);
2407+
2408+
/* Clear all NIX_AF_TL3_TL2_LINK_CFG[ENA] for the TL3/TL2 queue */
2409+
for (i = 0; i < (rvu->hw->cgx_links + rvu->hw->lbk_links); i++) {
2410+
cfg = rvu_read64(rvu, blkaddr,
2411+
NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link));
2412+
if (!(cfg & BIT_ULL(12)))
2413+
continue;
2414+
bmap |= (1 << i);
2415+
cfg &= ~BIT_ULL(12);
2416+
rvu_write64(rvu, blkaddr,
2417+
NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link), cfg);
2418+
}
2419+
2420+
/* Do SMQ flush and set enqueue xoff */
2421+
cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
2422+
cfg |= BIT_ULL(50) | BIT_ULL(49);
2423+
rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg);
2424+
24012425
/* Wait for flush to complete */
24022426
err = rvu_poll_reg(rvu, blkaddr,
24032427
NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true);
@@ -2406,6 +2430,17 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr,
24062430
"NIXLF%d: SMQ%d flush failed, txlink might be busy\n",
24072431
nixlf, smq);
24082432

2433+
/* Set NIX_AF_TL3_TL2_LINKX_CFG[ENA] for the TL3/TL2 queue */
2434+
for (i = 0; i < (rvu->hw->cgx_links + rvu->hw->lbk_links); i++) {
2435+
if (!(bmap & (1 << i)))
2436+
continue;
2437+
cfg = rvu_read64(rvu, blkaddr,
2438+
NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link));
2439+
cfg |= BIT_ULL(12);
2440+
rvu_write64(rvu, blkaddr,
2441+
NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link), cfg);
2442+
}
2443+
24092444
/* clear XOFF on TL2s */
24102445
nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, true);
24112446
nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, false);

0 commit comments

Comments
 (0)