|
40 | 40 | #include "hinic_hw_io.h"
|
41 | 41 | #include "hinic_hw_dev.h"
|
42 | 42 |
|
| 43 | +#define CMDQ_CEQE_TYPE_SHIFT 0 |
| 44 | + |
| 45 | +#define CMDQ_CEQE_TYPE_MASK 0x7 |
| 46 | + |
| 47 | +#define CMDQ_CEQE_GET(val, member) \ |
| 48 | + (((val) >> CMDQ_CEQE_##member##_SHIFT) \ |
| 49 | + & CMDQ_CEQE_##member##_MASK) |
| 50 | + |
| 51 | +#define CMDQ_WQE_ERRCODE_VAL_SHIFT 20 |
| 52 | + |
| 53 | +#define CMDQ_WQE_ERRCODE_VAL_MASK 0xF |
| 54 | + |
| 55 | +#define CMDQ_WQE_ERRCODE_GET(val, member) \ |
| 56 | + (((val) >> CMDQ_WQE_ERRCODE_##member##_SHIFT) \ |
| 57 | + & CMDQ_WQE_ERRCODE_##member##_MASK) |
| 58 | + |
43 | 59 | #define CMDQ_DB_PI_OFF(pi) (((u16)LOWER_8_BITS(pi)) << 3)
|
44 | 60 |
|
45 | 61 | #define CMDQ_DB_ADDR(db_base, pi) ((db_base) + CMDQ_DB_PI_OFF(pi))
|
46 | 62 |
|
47 | 63 | #define CMDQ_WQE_HEADER(wqe) ((struct hinic_cmdq_header *)(wqe))
|
48 | 64 |
|
| 65 | +#define CMDQ_WQE_COMPLETED(ctrl_info) \ |
| 66 | + HINIC_CMDQ_CTRL_GET(ctrl_info, HW_BUSY_BIT) |
| 67 | + |
49 | 68 | #define FIRST_DATA_TO_WRITE_LAST sizeof(u64)
|
50 | 69 |
|
51 | 70 | #define CMDQ_DB_OFF SZ_2K
|
@@ -145,6 +164,22 @@ void hinic_free_cmdq_buf(struct hinic_cmdqs *cmdqs,
|
145 | 164 | pci_pool_free(cmdqs->cmdq_buf_pool, cmdq_buf->buf, cmdq_buf->dma_addr);
|
146 | 165 | }
|
147 | 166 |
|
| 167 | +static unsigned int cmdq_wqe_size_from_bdlen(enum bufdesc_len len) |
| 168 | +{ |
| 169 | + unsigned int wqe_size = 0; |
| 170 | + |
| 171 | + switch (len) { |
| 172 | + case BUFDESC_LCMD_LEN: |
| 173 | + wqe_size = WQE_LCMD_SIZE; |
| 174 | + break; |
| 175 | + case BUFDESC_SCMD_LEN: |
| 176 | + wqe_size = WQE_SCMD_SIZE; |
| 177 | + break; |
| 178 | + } |
| 179 | + |
| 180 | + return wqe_size; |
| 181 | +} |
| 182 | + |
148 | 183 | static void cmdq_set_sge_completion(struct hinic_cmdq_completion *completion,
|
149 | 184 | struct hinic_cmdq_buf *buf_out)
|
150 | 185 | {
|
@@ -210,6 +245,15 @@ static void cmdq_set_lcmd_bufdesc(struct hinic_cmdq_wqe_lcmd *wqe_lcmd,
|
210 | 245 | hinic_set_sge(&wqe_lcmd->buf_desc.sge, buf_in->dma_addr, buf_in->size);
|
211 | 246 | }
|
212 | 247 |
|
| 248 | +static void cmdq_set_direct_wqe_data(struct hinic_cmdq_direct_wqe *wqe, |
| 249 | + void *buf_in, u32 in_size) |
| 250 | +{ |
| 251 | + struct hinic_cmdq_wqe_scmd *wqe_scmd = &wqe->wqe_scmd; |
| 252 | + |
| 253 | + wqe_scmd->buf_desc.buf_len = in_size; |
| 254 | + memcpy(wqe_scmd->buf_desc.data, buf_in, in_size); |
| 255 | +} |
| 256 | + |
213 | 257 | static void cmdq_set_lcmd_wqe(struct hinic_cmdq_wqe *wqe,
|
214 | 258 | enum cmdq_cmd_type cmd_type,
|
215 | 259 | struct hinic_cmdq_buf *buf_in,
|
@@ -238,6 +282,36 @@ static void cmdq_set_lcmd_wqe(struct hinic_cmdq_wqe *wqe,
|
238 | 282 | cmdq_set_lcmd_bufdesc(wqe_lcmd, buf_in);
|
239 | 283 | }
|
240 | 284 |
|
| 285 | +static void cmdq_set_direct_wqe(struct hinic_cmdq_wqe *wqe, |
| 286 | + enum cmdq_cmd_type cmd_type, |
| 287 | + void *buf_in, u16 in_size, |
| 288 | + struct hinic_cmdq_buf *buf_out, int wrapped, |
| 289 | + enum hinic_cmd_ack_type ack_type, |
| 290 | + enum hinic_mod_type mod, u8 cmd, u16 prod_idx) |
| 291 | +{ |
| 292 | + struct hinic_cmdq_direct_wqe *direct_wqe = &wqe->direct_wqe; |
| 293 | + enum completion_format complete_format; |
| 294 | + struct hinic_cmdq_wqe_scmd *wqe_scmd; |
| 295 | + |
| 296 | + wqe_scmd = &direct_wqe->wqe_scmd; |
| 297 | + |
| 298 | + switch (cmd_type) { |
| 299 | + case CMDQ_CMD_SYNC_SGE_RESP: |
| 300 | + complete_format = COMPLETE_SGE; |
| 301 | + cmdq_set_sge_completion(&wqe_scmd->completion, buf_out); |
| 302 | + break; |
| 303 | + case CMDQ_CMD_SYNC_DIRECT_RESP: |
| 304 | + complete_format = COMPLETE_DIRECT; |
| 305 | + wqe_scmd->completion.direct_resp = 0; |
| 306 | + break; |
| 307 | + } |
| 308 | + |
| 309 | + cmdq_prepare_wqe_ctrl(wqe, wrapped, ack_type, mod, cmd, prod_idx, |
| 310 | + complete_format, DATA_DIRECT, BUFDESC_SCMD_LEN); |
| 311 | + |
| 312 | + cmdq_set_direct_wqe_data(direct_wqe, buf_in, in_size); |
| 313 | +} |
| 314 | + |
241 | 315 | static void cmdq_wqe_fill(void *dst, void *src)
|
242 | 316 | {
|
243 | 317 | memcpy(dst + FIRST_DATA_TO_WRITE_LAST, src + FIRST_DATA_TO_WRITE_LAST,
|
@@ -352,6 +426,52 @@ static int cmdq_sync_cmd_direct_resp(struct hinic_cmdq *cmdq,
|
352 | 426 | return 0;
|
353 | 427 | }
|
354 | 428 |
|
| 429 | +static int cmdq_set_arm_bit(struct hinic_cmdq *cmdq, void *buf_in, |
| 430 | + u16 in_size) |
| 431 | +{ |
| 432 | + struct hinic_cmdq_wqe *curr_cmdq_wqe, cmdq_wqe; |
| 433 | + u16 curr_prod_idx, next_prod_idx; |
| 434 | + struct hinic_wq *wq = cmdq->wq; |
| 435 | + struct hinic_hw_wqe *hw_wqe; |
| 436 | + int wrapped, num_wqebbs; |
| 437 | + |
| 438 | + /* Keep doorbell index correct */ |
| 439 | + spin_lock(&cmdq->cmdq_lock); |
| 440 | + |
| 441 | + /* WQE_SIZE = WQEBB_SIZE, we will get the wq element and not shadow*/ |
| 442 | + hw_wqe = hinic_get_wqe(wq, WQE_SCMD_SIZE, &curr_prod_idx); |
| 443 | + if (IS_ERR(hw_wqe)) { |
| 444 | + spin_unlock(&cmdq->cmdq_lock); |
| 445 | + return -EBUSY; |
| 446 | + } |
| 447 | + |
| 448 | + curr_cmdq_wqe = &hw_wqe->cmdq_wqe; |
| 449 | + |
| 450 | + wrapped = cmdq->wrapped; |
| 451 | + |
| 452 | + num_wqebbs = ALIGN(WQE_SCMD_SIZE, wq->wqebb_size) / wq->wqebb_size; |
| 453 | + next_prod_idx = curr_prod_idx + num_wqebbs; |
| 454 | + if (next_prod_idx >= wq->q_depth) { |
| 455 | + cmdq->wrapped = !cmdq->wrapped; |
| 456 | + next_prod_idx -= wq->q_depth; |
| 457 | + } |
| 458 | + |
| 459 | + cmdq_set_direct_wqe(&cmdq_wqe, CMDQ_CMD_SYNC_DIRECT_RESP, buf_in, |
| 460 | + in_size, NULL, wrapped, HINIC_CMD_ACK_TYPE_CMDQ, |
| 461 | + HINIC_MOD_COMM, CMDQ_SET_ARM_CMD, curr_prod_idx); |
| 462 | + |
| 463 | + /* The data that is written to HW should be in Big Endian Format */ |
| 464 | + hinic_cpu_to_be32(&cmdq_wqe, WQE_SCMD_SIZE); |
| 465 | + |
| 466 | + /* cmdq wqe is not shadow, therefore wqe will be written to wq */ |
| 467 | + cmdq_wqe_fill(curr_cmdq_wqe, &cmdq_wqe); |
| 468 | + |
| 469 | + cmdq_set_db(cmdq, HINIC_CMDQ_SYNC, next_prod_idx); |
| 470 | + |
| 471 | + spin_unlock(&cmdq->cmdq_lock); |
| 472 | + return 0; |
| 473 | +} |
| 474 | + |
355 | 475 | static int cmdq_params_valid(struct hinic_cmdq_buf *buf_in)
|
356 | 476 | {
|
357 | 477 | if (buf_in->size > HINIC_CMDQ_MAX_DATA_SIZE)
|
@@ -388,14 +508,189 @@ int hinic_cmdq_direct_resp(struct hinic_cmdqs *cmdqs,
|
388 | 508 | mod, cmd, buf_in, resp);
|
389 | 509 | }
|
390 | 510 |
|
| 511 | +/** |
| 512 | + * hinic_set_arm_bit - set arm bit for enable interrupt again |
| 513 | + * @cmdqs: the cmdqs |
| 514 | + * @q_type: type of queue to set the arm bit for |
| 515 | + * @q_id: the queue number |
| 516 | + * |
| 517 | + * Return 0 - Success, negative - Failure |
| 518 | + **/ |
| 519 | +int hinic_set_arm_bit(struct hinic_cmdqs *cmdqs, |
| 520 | + enum hinic_set_arm_qtype q_type, u32 q_id) |
| 521 | +{ |
| 522 | + struct hinic_cmdq *cmdq = &cmdqs->cmdq[HINIC_CMDQ_SYNC]; |
| 523 | + struct hinic_hwif *hwif = cmdqs->hwif; |
| 524 | + struct pci_dev *pdev = hwif->pdev; |
| 525 | + struct hinic_cmdq_arm_bit arm_bit; |
| 526 | + int err; |
| 527 | + |
| 528 | + arm_bit.q_type = q_type; |
| 529 | + arm_bit.q_id = q_id; |
| 530 | + |
| 531 | + err = cmdq_set_arm_bit(cmdq, &arm_bit, sizeof(arm_bit)); |
| 532 | + if (err) { |
| 533 | + dev_err(&pdev->dev, "Failed to set arm for qid %d\n", q_id); |
| 534 | + return err; |
| 535 | + } |
| 536 | + |
| 537 | + return 0; |
| 538 | +} |
| 539 | + |
| 540 | +static void clear_wqe_complete_bit(struct hinic_cmdq *cmdq, |
| 541 | + struct hinic_cmdq_wqe *wqe) |
| 542 | +{ |
| 543 | + u32 header_info = be32_to_cpu(CMDQ_WQE_HEADER(wqe)->header_info); |
| 544 | + unsigned int bufdesc_len, wqe_size; |
| 545 | + struct hinic_ctrl *ctrl; |
| 546 | + |
| 547 | + bufdesc_len = HINIC_CMDQ_WQE_HEADER_GET(header_info, BUFDESC_LEN); |
| 548 | + wqe_size = cmdq_wqe_size_from_bdlen(bufdesc_len); |
| 549 | + if (wqe_size == WQE_LCMD_SIZE) { |
| 550 | + struct hinic_cmdq_wqe_lcmd *wqe_lcmd = &wqe->wqe_lcmd; |
| 551 | + |
| 552 | + ctrl = &wqe_lcmd->ctrl; |
| 553 | + } else { |
| 554 | + struct hinic_cmdq_direct_wqe *direct_wqe = &wqe->direct_wqe; |
| 555 | + struct hinic_cmdq_wqe_scmd *wqe_scmd; |
| 556 | + |
| 557 | + wqe_scmd = &direct_wqe->wqe_scmd; |
| 558 | + ctrl = &wqe_scmd->ctrl; |
| 559 | + } |
| 560 | + |
| 561 | + /* clear HW busy bit */ |
| 562 | + ctrl->ctrl_info = 0; |
| 563 | + |
| 564 | + wmb(); /* verify wqe is clear */ |
| 565 | +} |
| 566 | + |
| 567 | +/** |
| 568 | + * cmdq_arm_ceq_handler - cmdq completion event handler for arm command |
| 569 | + * @cmdq: the cmdq of the arm command |
| 570 | + * @wqe: the wqe of the arm command |
| 571 | + * |
| 572 | + * Return 0 - Success, negative - Failure |
| 573 | + **/ |
| 574 | +static int cmdq_arm_ceq_handler(struct hinic_cmdq *cmdq, |
| 575 | + struct hinic_cmdq_wqe *wqe) |
| 576 | +{ |
| 577 | + struct hinic_cmdq_direct_wqe *direct_wqe = &wqe->direct_wqe; |
| 578 | + struct hinic_cmdq_wqe_scmd *wqe_scmd; |
| 579 | + struct hinic_ctrl *ctrl; |
| 580 | + u32 ctrl_info; |
| 581 | + |
| 582 | + wqe_scmd = &direct_wqe->wqe_scmd; |
| 583 | + ctrl = &wqe_scmd->ctrl; |
| 584 | + ctrl_info = be32_to_cpu(ctrl->ctrl_info); |
| 585 | + |
| 586 | + /* HW should toggle the HW BUSY BIT */ |
| 587 | + if (!CMDQ_WQE_COMPLETED(ctrl_info)) |
| 588 | + return -EBUSY; |
| 589 | + |
| 590 | + clear_wqe_complete_bit(cmdq, wqe); |
| 591 | + |
| 592 | + hinic_put_wqe(cmdq->wq, WQE_SCMD_SIZE); |
| 593 | + return 0; |
| 594 | +} |
| 595 | + |
| 596 | +static void cmdq_update_errcode(struct hinic_cmdq *cmdq, u16 prod_idx, |
| 597 | + int errcode) |
| 598 | +{ |
| 599 | + if (cmdq->errcode[prod_idx]) |
| 600 | + *cmdq->errcode[prod_idx] = errcode; |
| 601 | +} |
| 602 | + |
| 603 | +/** |
| 604 | + * cmdq_arm_ceq_handler - cmdq completion event handler for sync command |
| 605 | + * @cmdq: the cmdq of the command |
| 606 | + * @cons_idx: the consumer index to update the error code for |
| 607 | + * @errcode: the error code |
| 608 | + **/ |
| 609 | +static void cmdq_sync_cmd_handler(struct hinic_cmdq *cmdq, u16 cons_idx, |
| 610 | + int errcode) |
| 611 | +{ |
| 612 | + u16 prod_idx = cons_idx; |
| 613 | + |
| 614 | + spin_lock(&cmdq->cmdq_lock); |
| 615 | + cmdq_update_errcode(cmdq, prod_idx, errcode); |
| 616 | + |
| 617 | + wmb(); /* write all before update for the command request */ |
| 618 | + |
| 619 | + if (cmdq->done[prod_idx]) |
| 620 | + complete(cmdq->done[prod_idx]); |
| 621 | + spin_unlock(&cmdq->cmdq_lock); |
| 622 | +} |
| 623 | + |
| 624 | +static int cmdq_cmd_ceq_handler(struct hinic_cmdq *cmdq, u16 ci, |
| 625 | + struct hinic_cmdq_wqe *cmdq_wqe) |
| 626 | +{ |
| 627 | + struct hinic_cmdq_wqe_lcmd *wqe_lcmd = &cmdq_wqe->wqe_lcmd; |
| 628 | + struct hinic_status *status = &wqe_lcmd->status; |
| 629 | + struct hinic_ctrl *ctrl = &wqe_lcmd->ctrl; |
| 630 | + int errcode; |
| 631 | + |
| 632 | + if (!CMDQ_WQE_COMPLETED(be32_to_cpu(ctrl->ctrl_info))) |
| 633 | + return -EBUSY; |
| 634 | + |
| 635 | + errcode = CMDQ_WQE_ERRCODE_GET(be32_to_cpu(status->status_info), VAL); |
| 636 | + |
| 637 | + cmdq_sync_cmd_handler(cmdq, ci, errcode); |
| 638 | + |
| 639 | + clear_wqe_complete_bit(cmdq, cmdq_wqe); |
| 640 | + hinic_put_wqe(cmdq->wq, WQE_LCMD_SIZE); |
| 641 | + return 0; |
| 642 | +} |
| 643 | + |
391 | 644 | /**
|
392 | 645 | * cmdq_ceq_handler - cmdq completion event handler
|
393 | 646 | * @handle: private data for the handler(cmdqs)
|
394 | 647 | * @ceqe_data: ceq element data
|
395 | 648 | **/
|
396 | 649 | static void cmdq_ceq_handler(void *handle, u32 ceqe_data)
|
397 | 650 | {
|
398 |
| - /* should be implemented */ |
| 651 | + enum hinic_cmdq_type cmdq_type = CMDQ_CEQE_GET(ceqe_data, TYPE); |
| 652 | + struct hinic_cmdqs *cmdqs = (struct hinic_cmdqs *)handle; |
| 653 | + struct hinic_cmdq *cmdq = &cmdqs->cmdq[cmdq_type]; |
| 654 | + struct hinic_cmdq_header *header; |
| 655 | + struct hinic_hw_wqe *hw_wqe; |
| 656 | + int err, set_arm = 0; |
| 657 | + u32 saved_data; |
| 658 | + u16 ci; |
| 659 | + |
| 660 | + /* Read the smallest wqe size for getting wqe size */ |
| 661 | + while ((hw_wqe = hinic_read_wqe(cmdq->wq, WQE_SCMD_SIZE, &ci))) { |
| 662 | + if (IS_ERR(hw_wqe)) |
| 663 | + break; |
| 664 | + |
| 665 | + header = CMDQ_WQE_HEADER(&hw_wqe->cmdq_wqe); |
| 666 | + saved_data = be32_to_cpu(header->saved_data); |
| 667 | + |
| 668 | + if (HINIC_SAVED_DATA_GET(saved_data, ARM)) { |
| 669 | + /* arm_bit was set until here */ |
| 670 | + set_arm = 0; |
| 671 | + |
| 672 | + if (cmdq_arm_ceq_handler(cmdq, &hw_wqe->cmdq_wqe)) |
| 673 | + break; |
| 674 | + } else { |
| 675 | + set_arm = 1; |
| 676 | + |
| 677 | + hw_wqe = hinic_read_wqe(cmdq->wq, WQE_LCMD_SIZE, &ci); |
| 678 | + if (IS_ERR(hw_wqe)) |
| 679 | + break; |
| 680 | + |
| 681 | + if (cmdq_cmd_ceq_handler(cmdq, ci, &hw_wqe->cmdq_wqe)) |
| 682 | + break; |
| 683 | + } |
| 684 | + } |
| 685 | + |
| 686 | + if (set_arm) { |
| 687 | + struct hinic_hwif *hwif = cmdqs->hwif; |
| 688 | + struct pci_dev *pdev = hwif->pdev; |
| 689 | + |
| 690 | + err = hinic_set_arm_bit(cmdqs, HINIC_SET_ARM_CMDQ, cmdq_type); |
| 691 | + if (err) |
| 692 | + dev_err(&pdev->dev, "Failed to set arm for CMDQ\n"); |
| 693 | + } |
399 | 694 | }
|
400 | 695 |
|
401 | 696 | /**
|
|
0 commit comments