|
18 | 18 | #include "cgx.h"
|
19 | 19 |
|
20 | 20 | static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add);
|
| 21 | +static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req, |
| 22 | + int type, int chan_id); |
21 | 23 |
|
22 | 24 | enum mc_tbl_sz {
|
23 | 25 | MC_TBL_SZ_256,
|
@@ -273,6 +275,142 @@ static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf)
|
273 | 275 | rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
|
274 | 276 | }
|
275 | 277 |
|
| 278 | +int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu, |
| 279 | + struct nix_bp_cfg_req *req, |
| 280 | + struct msg_rsp *rsp) |
| 281 | +{ |
| 282 | + u16 pcifunc = req->hdr.pcifunc; |
| 283 | + struct rvu_pfvf *pfvf; |
| 284 | + int blkaddr, pf, type; |
| 285 | + u16 chan_base, chan; |
| 286 | + u64 cfg; |
| 287 | + |
| 288 | + pf = rvu_get_pf(pcifunc); |
| 289 | + type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; |
| 290 | + if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK) |
| 291 | + return 0; |
| 292 | + |
| 293 | + pfvf = rvu_get_pfvf(rvu, pcifunc); |
| 294 | + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); |
| 295 | + |
| 296 | + chan_base = pfvf->rx_chan_base + req->chan_base; |
| 297 | + for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) { |
| 298 | + cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan)); |
| 299 | + rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan), |
| 300 | + cfg & ~BIT_ULL(16)); |
| 301 | + } |
| 302 | + return 0; |
| 303 | +} |
| 304 | + |
| 305 | +static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req, |
| 306 | + int type, int chan_id) |
| 307 | +{ |
| 308 | + int bpid, blkaddr, lmac_chan_cnt; |
| 309 | + struct rvu_hwinfo *hw = rvu->hw; |
| 310 | + u16 cgx_bpid_cnt, lbk_bpid_cnt; |
| 311 | + struct rvu_pfvf *pfvf; |
| 312 | + u8 cgx_id, lmac_id; |
| 313 | + u64 cfg; |
| 314 | + |
| 315 | + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc); |
| 316 | + cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST); |
| 317 | + lmac_chan_cnt = cfg & 0xFF; |
| 318 | + |
| 319 | + cgx_bpid_cnt = hw->cgx_links * lmac_chan_cnt; |
| 320 | + lbk_bpid_cnt = hw->lbk_links * ((cfg >> 16) & 0xFF); |
| 321 | + |
| 322 | + pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); |
| 323 | + |
| 324 | + /* Backpressure IDs range division |
| 325 | + * CGX channles are mapped to (0 - 191) BPIDs |
| 326 | + * LBK channles are mapped to (192 - 255) BPIDs |
| 327 | + * SDP channles are mapped to (256 - 511) BPIDs |
| 328 | + * |
| 329 | + * Lmac channles and bpids mapped as follows |
| 330 | + * cgx(0)_lmac(0)_chan(0 - 15) = bpid(0 - 15) |
| 331 | + * cgx(0)_lmac(1)_chan(0 - 15) = bpid(16 - 31) .... |
| 332 | + * cgx(1)_lmac(0)_chan(0 - 15) = bpid(64 - 79) .... |
| 333 | + */ |
| 334 | + switch (type) { |
| 335 | + case NIX_INTF_TYPE_CGX: |
| 336 | + if ((req->chan_base + req->chan_cnt) > 15) |
| 337 | + return -EINVAL; |
| 338 | + rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id); |
| 339 | + /* Assign bpid based on cgx, lmac and chan id */ |
| 340 | + bpid = (cgx_id * hw->lmac_per_cgx * lmac_chan_cnt) + |
| 341 | + (lmac_id * lmac_chan_cnt) + req->chan_base; |
| 342 | + |
| 343 | + if (req->bpid_per_chan) |
| 344 | + bpid += chan_id; |
| 345 | + if (bpid > cgx_bpid_cnt) |
| 346 | + return -EINVAL; |
| 347 | + break; |
| 348 | + |
| 349 | + case NIX_INTF_TYPE_LBK: |
| 350 | + if ((req->chan_base + req->chan_cnt) > 63) |
| 351 | + return -EINVAL; |
| 352 | + bpid = cgx_bpid_cnt + req->chan_base; |
| 353 | + if (req->bpid_per_chan) |
| 354 | + bpid += chan_id; |
| 355 | + if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt)) |
| 356 | + return -EINVAL; |
| 357 | + break; |
| 358 | + default: |
| 359 | + return -EINVAL; |
| 360 | + } |
| 361 | + return bpid; |
| 362 | +} |
| 363 | + |
| 364 | +int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu, |
| 365 | + struct nix_bp_cfg_req *req, |
| 366 | + struct nix_bp_cfg_rsp *rsp) |
| 367 | +{ |
| 368 | + int blkaddr, pf, type, chan_id = 0; |
| 369 | + u16 pcifunc = req->hdr.pcifunc; |
| 370 | + struct rvu_pfvf *pfvf; |
| 371 | + u16 chan_base, chan; |
| 372 | + s16 bpid, bpid_base; |
| 373 | + u64 cfg; |
| 374 | + |
| 375 | + pf = rvu_get_pf(pcifunc); |
| 376 | + type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; |
| 377 | + |
| 378 | + /* Enable backpressure only for CGX mapped PFs and LBK interface */ |
| 379 | + if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK) |
| 380 | + return 0; |
| 381 | + |
| 382 | + pfvf = rvu_get_pfvf(rvu, pcifunc); |
| 383 | + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); |
| 384 | + |
| 385 | + bpid_base = rvu_nix_get_bpid(rvu, req, type, chan_id); |
| 386 | + chan_base = pfvf->rx_chan_base + req->chan_base; |
| 387 | + bpid = bpid_base; |
| 388 | + |
| 389 | + for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) { |
| 390 | + if (bpid < 0) { |
| 391 | + dev_warn(rvu->dev, "Fail to enable backpessure\n"); |
| 392 | + return -EINVAL; |
| 393 | + } |
| 394 | + |
| 395 | + cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan)); |
| 396 | + rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan), |
| 397 | + cfg | (bpid & 0xFF) | BIT_ULL(16)); |
| 398 | + chan_id++; |
| 399 | + bpid = rvu_nix_get_bpid(rvu, req, type, chan_id); |
| 400 | + } |
| 401 | + |
| 402 | + for (chan = 0; chan < req->chan_cnt; chan++) { |
| 403 | + /* Map channel and bpid assign to it */ |
| 404 | + rsp->chan_bpid[chan] = ((req->chan_base + chan) & 0x7F) << 10 | |
| 405 | + (bpid_base & 0x3FF); |
| 406 | + if (req->bpid_per_chan) |
| 407 | + bpid_base++; |
| 408 | + } |
| 409 | + rsp->chan_cnt = req->chan_cnt; |
| 410 | + |
| 411 | + return 0; |
| 412 | +} |
| 413 | + |
276 | 414 | static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr,
|
277 | 415 | u64 format, bool v4, u64 *fidx)
|
278 | 416 | {
|
@@ -565,6 +703,11 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
|
565 | 703 | */
|
566 | 704 | inst.res_addr = (u64)aq->res->iova;
|
567 | 705 |
|
| 706 | + /* Hardware uses same aq->res->base for updating result of |
| 707 | + * previous instruction hence wait here till it is done. |
| 708 | + */ |
| 709 | + spin_lock(&aq->lock); |
| 710 | + |
568 | 711 | /* Clean result + context memory */
|
569 | 712 | memset(aq->res->base, 0, aq->res->entry_sz);
|
570 | 713 | /* Context needs to be written at RES_ADDR + 128 */
|
@@ -609,11 +752,10 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
|
609 | 752 | break;
|
610 | 753 | default:
|
611 | 754 | rc = NIX_AF_ERR_AQ_ENQUEUE;
|
| 755 | + spin_unlock(&aq->lock); |
612 | 756 | return rc;
|
613 | 757 | }
|
614 | 758 |
|
615 |
| - spin_lock(&aq->lock); |
616 |
| - |
617 | 759 | /* Submit the instruction to AQ */
|
618 | 760 | rc = nix_aq_enqueue_wait(rvu, block, &inst);
|
619 | 761 | if (rc) {
|
@@ -718,6 +860,8 @@ static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
|
718 | 860 | if (req->ctype == NIX_AQ_CTYPE_CQ) {
|
719 | 861 | aq_req.cq.ena = 0;
|
720 | 862 | aq_req.cq_mask.ena = 1;
|
| 863 | + aq_req.cq.bp_ena = 0; |
| 864 | + aq_req.cq_mask.bp_ena = 1; |
721 | 865 | q_cnt = pfvf->cq_ctx->qsize;
|
722 | 866 | bmap = pfvf->cq_bmap;
|
723 | 867 | }
|
@@ -3061,6 +3205,9 @@ int rvu_nix_init(struct rvu *rvu)
|
3061 | 3205 |
|
3062 | 3206 | /* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */
|
3063 | 3207 | nix_link_config(rvu, blkaddr);
|
| 3208 | + |
| 3209 | + /* Enable Channel backpressure */ |
| 3210 | + rvu_write64(rvu, blkaddr, NIX_AF_RX_CFG, BIT_ULL(0)); |
3064 | 3211 | }
|
3065 | 3212 | return 0;
|
3066 | 3213 | }
|
|
0 commit comments