|
157 | 157 | #define SL_RX_BCAST_CHK_MSK (PORT_BASE + 0x2c0)
|
158 | 158 | #define PHYCTRL_OOB_RESTART_MSK (PORT_BASE + 0x2c4)
|
159 | 159 |
|
| 160 | +/* HW dma structures */ |
| 161 | +/* Delivery queue header */ |
| 162 | +/* dw0 */ |
| 163 | +#define CMD_HDR_RESP_REPORT_OFF 5 |
| 164 | +#define CMD_HDR_RESP_REPORT_MSK (0x1 << CMD_HDR_RESP_REPORT_OFF) |
| 165 | +#define CMD_HDR_TLR_CTRL_OFF 6 |
| 166 | +#define CMD_HDR_TLR_CTRL_MSK (0x3 << CMD_HDR_TLR_CTRL_OFF) |
| 167 | +#define CMD_HDR_PORT_OFF 18 |
| 168 | +#define CMD_HDR_PORT_MSK (0xf << CMD_HDR_PORT_OFF) |
| 169 | +#define CMD_HDR_PRIORITY_OFF 27 |
| 170 | +#define CMD_HDR_PRIORITY_MSK (0x1 << CMD_HDR_PRIORITY_OFF) |
| 171 | +#define CMD_HDR_CMD_OFF 29 |
| 172 | +#define CMD_HDR_CMD_MSK (0x7 << CMD_HDR_CMD_OFF) |
| 173 | +/* dw1 */ |
| 174 | +#define CMD_HDR_DIR_OFF 5 |
| 175 | +#define CMD_HDR_DIR_MSK (0x3 << CMD_HDR_DIR_OFF) |
| 176 | +#define CMD_HDR_VDTL_OFF 10 |
| 177 | +#define CMD_HDR_VDTL_MSK (0x1 << CMD_HDR_VDTL_OFF) |
| 178 | +#define CMD_HDR_FRAME_TYPE_OFF 11 |
| 179 | +#define CMD_HDR_FRAME_TYPE_MSK (0x1f << CMD_HDR_FRAME_TYPE_OFF) |
| 180 | +#define CMD_HDR_DEV_ID_OFF 16 |
| 181 | +#define CMD_HDR_DEV_ID_MSK (0xffff << CMD_HDR_DEV_ID_OFF) |
| 182 | +/* dw2 */ |
| 183 | +#define CMD_HDR_CFL_OFF 0 |
| 184 | +#define CMD_HDR_CFL_MSK (0x1ff << CMD_HDR_CFL_OFF) |
| 185 | +#define CMD_HDR_MRFL_OFF 15 |
| 186 | +#define CMD_HDR_MRFL_MSK (0x1ff << CMD_HDR_MRFL_OFF) |
| 187 | +#define CMD_HDR_SG_MOD_OFF 24 |
| 188 | +#define CMD_HDR_SG_MOD_MSK (0x3 << CMD_HDR_SG_MOD_OFF) |
| 189 | +/* dw6 */ |
| 190 | +#define CMD_HDR_DIF_SGL_LEN_OFF 0 |
| 191 | +#define CMD_HDR_DIF_SGL_LEN_MSK (0xffff << CMD_HDR_DIF_SGL_LEN_OFF) |
| 192 | +#define CMD_HDR_DATA_SGL_LEN_OFF 16 |
| 193 | +#define CMD_HDR_DATA_SGL_LEN_MSK (0xffff << CMD_HDR_DATA_SGL_LEN_OFF) |
| 194 | + |
160 | 195 | /* Completion header */
|
161 | 196 | /* dw0 */
|
162 | 197 | #define CMPLT_HDR_CMPLT_OFF 0
|
@@ -217,13 +252,25 @@ enum {
|
217 | 252 | HISI_SAS_PHY_INT_NR
|
218 | 253 | };
|
219 | 254 |
|
| 255 | +#define DIR_NO_DATA 0 |
| 256 | +#define DIR_TO_INI 1 |
| 257 | +#define DIR_TO_DEVICE 2 |
| 258 | +#define DIR_RESERVED 3 |
| 259 | + |
220 | 260 | static u32 hisi_sas_read32(struct hisi_hba *hisi_hba, u32 off)
|
221 | 261 | {
|
222 | 262 | void __iomem *regs = hisi_hba->regs + off;
|
223 | 263 |
|
224 | 264 | return readl(regs);
|
225 | 265 | }
|
226 | 266 |
|
| 267 | +static u32 hisi_sas_read32_relaxed(struct hisi_hba *hisi_hba, u32 off) |
| 268 | +{ |
| 269 | + void __iomem *regs = hisi_hba->regs + off; |
| 270 | + |
| 271 | + return readl_relaxed(regs); |
| 272 | +} |
| 273 | + |
227 | 274 | static void hisi_sas_write32(struct hisi_hba *hisi_hba, u32 off, u32 val)
|
228 | 275 | {
|
229 | 276 | void __iomem *regs = hisi_hba->regs + off;
|
@@ -448,6 +495,163 @@ static void sl_notify_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
|
448 | 495 | hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_control);
|
449 | 496 | }
|
450 | 497 |
|
| 498 | +/** |
| 499 | + * The callpath to this function and upto writing the write |
| 500 | + * queue pointer should be safe from interruption. |
| 501 | + */ |
| 502 | +static int |
| 503 | +get_free_slot_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq) |
| 504 | +{ |
| 505 | + struct device *dev = hisi_hba->dev; |
| 506 | + int queue = dq->id; |
| 507 | + u32 r, w; |
| 508 | + |
| 509 | + w = dq->wr_point; |
| 510 | + r = hisi_sas_read32_relaxed(hisi_hba, |
| 511 | + DLVRY_Q_0_RD_PTR + (queue * 0x14)); |
| 512 | + if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) { |
| 513 | + dev_warn(dev, "full queue=%d r=%d w=%d\n\n", |
| 514 | + queue, r, w); |
| 515 | + return -EAGAIN; |
| 516 | + } |
| 517 | + |
| 518 | + return 0; |
| 519 | +} |
| 520 | + |
| 521 | +static void start_delivery_v3_hw(struct hisi_sas_dq *dq) |
| 522 | +{ |
| 523 | + struct hisi_hba *hisi_hba = dq->hisi_hba; |
| 524 | + int dlvry_queue = dq->slot_prep->dlvry_queue; |
| 525 | + int dlvry_queue_slot = dq->slot_prep->dlvry_queue_slot; |
| 526 | + |
| 527 | + dq->wr_point = ++dlvry_queue_slot % HISI_SAS_QUEUE_SLOTS; |
| 528 | + hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14), |
| 529 | + dq->wr_point); |
| 530 | +} |
| 531 | + |
| 532 | +static int prep_prd_sge_v3_hw(struct hisi_hba *hisi_hba, |
| 533 | + struct hisi_sas_slot *slot, |
| 534 | + struct hisi_sas_cmd_hdr *hdr, |
| 535 | + struct scatterlist *scatter, |
| 536 | + int n_elem) |
| 537 | +{ |
| 538 | + struct device *dev = hisi_hba->dev; |
| 539 | + struct scatterlist *sg; |
| 540 | + int i; |
| 541 | + |
| 542 | + if (n_elem > HISI_SAS_SGE_PAGE_CNT) { |
| 543 | + dev_err(dev, "prd err: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT", |
| 544 | + n_elem); |
| 545 | + return -EINVAL; |
| 546 | + } |
| 547 | + |
| 548 | + slot->sge_page = dma_pool_alloc(hisi_hba->sge_page_pool, GFP_ATOMIC, |
| 549 | + &slot->sge_page_dma); |
| 550 | + if (!slot->sge_page) |
| 551 | + return -ENOMEM; |
| 552 | + |
| 553 | + for_each_sg(scatter, sg, n_elem, i) { |
| 554 | + struct hisi_sas_sge *entry = &slot->sge_page->sge[i]; |
| 555 | + |
| 556 | + entry->addr = cpu_to_le64(sg_dma_address(sg)); |
| 557 | + entry->page_ctrl_0 = entry->page_ctrl_1 = 0; |
| 558 | + entry->data_len = cpu_to_le32(sg_dma_len(sg)); |
| 559 | + entry->data_off = 0; |
| 560 | + } |
| 561 | + |
| 562 | + hdr->prd_table_addr = cpu_to_le64(slot->sge_page_dma); |
| 563 | + hdr->sg_len = cpu_to_le32(n_elem << CMD_HDR_DATA_SGL_LEN_OFF); |
| 564 | + |
| 565 | + return 0; |
| 566 | +} |
| 567 | + |
| 568 | +static int prep_ssp_v3_hw(struct hisi_hba *hisi_hba, |
| 569 | + struct hisi_sas_slot *slot, int is_tmf, |
| 570 | + struct hisi_sas_tmf_task *tmf) |
| 571 | +{ |
| 572 | + struct sas_task *task = slot->task; |
| 573 | + struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; |
| 574 | + struct domain_device *device = task->dev; |
| 575 | + struct hisi_sas_device *sas_dev = device->lldd_dev; |
| 576 | + struct hisi_sas_port *port = slot->port; |
| 577 | + struct sas_ssp_task *ssp_task = &task->ssp_task; |
| 578 | + struct scsi_cmnd *scsi_cmnd = ssp_task->cmd; |
| 579 | + int has_data = 0, rc, priority = is_tmf; |
| 580 | + u8 *buf_cmd; |
| 581 | + u32 dw1 = 0, dw2 = 0; |
| 582 | + |
| 583 | + hdr->dw0 = cpu_to_le32((1 << CMD_HDR_RESP_REPORT_OFF) | |
| 584 | + (2 << CMD_HDR_TLR_CTRL_OFF) | |
| 585 | + (port->id << CMD_HDR_PORT_OFF) | |
| 586 | + (priority << CMD_HDR_PRIORITY_OFF) | |
| 587 | + (1 << CMD_HDR_CMD_OFF)); /* ssp */ |
| 588 | + |
| 589 | + dw1 = 1 << CMD_HDR_VDTL_OFF; |
| 590 | + if (is_tmf) { |
| 591 | + dw1 |= 2 << CMD_HDR_FRAME_TYPE_OFF; |
| 592 | + dw1 |= DIR_NO_DATA << CMD_HDR_DIR_OFF; |
| 593 | + } else { |
| 594 | + dw1 |= 1 << CMD_HDR_FRAME_TYPE_OFF; |
| 595 | + switch (scsi_cmnd->sc_data_direction) { |
| 596 | + case DMA_TO_DEVICE: |
| 597 | + has_data = 1; |
| 598 | + dw1 |= DIR_TO_DEVICE << CMD_HDR_DIR_OFF; |
| 599 | + break; |
| 600 | + case DMA_FROM_DEVICE: |
| 601 | + has_data = 1; |
| 602 | + dw1 |= DIR_TO_INI << CMD_HDR_DIR_OFF; |
| 603 | + break; |
| 604 | + default: |
| 605 | + dw1 &= ~CMD_HDR_DIR_MSK; |
| 606 | + } |
| 607 | + } |
| 608 | + |
| 609 | + /* map itct entry */ |
| 610 | + dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF; |
| 611 | + hdr->dw1 = cpu_to_le32(dw1); |
| 612 | + |
| 613 | + dw2 = (((sizeof(struct ssp_command_iu) + sizeof(struct ssp_frame_hdr) |
| 614 | + + 3) / 4) << CMD_HDR_CFL_OFF) | |
| 615 | + ((HISI_SAS_MAX_SSP_RESP_SZ / 4) << CMD_HDR_MRFL_OFF) | |
| 616 | + (2 << CMD_HDR_SG_MOD_OFF); |
| 617 | + hdr->dw2 = cpu_to_le32(dw2); |
| 618 | + hdr->transfer_tags = cpu_to_le32(slot->idx); |
| 619 | + |
| 620 | + if (has_data) { |
| 621 | + rc = prep_prd_sge_v3_hw(hisi_hba, slot, hdr, task->scatter, |
| 622 | + slot->n_elem); |
| 623 | + if (rc) |
| 624 | + return rc; |
| 625 | + } |
| 626 | + |
| 627 | + hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len); |
| 628 | + hdr->cmd_table_addr = cpu_to_le64(slot->command_table_dma); |
| 629 | + hdr->sts_buffer_addr = cpu_to_le64(slot->status_buffer_dma); |
| 630 | + |
| 631 | + buf_cmd = slot->command_table + sizeof(struct ssp_frame_hdr); |
| 632 | + memcpy(buf_cmd, ssp_task->LUN, 8); |
| 633 | + |
| 634 | + if (!is_tmf) { |
| 635 | + buf_cmd[9] = ssp_task->task_attr | (ssp_task->task_prio << 3); |
| 636 | + memcpy(buf_cmd + 12, scsi_cmnd->cmnd, scsi_cmnd->cmd_len); |
| 637 | + } else { |
| 638 | + buf_cmd[10] = tmf->tmf; |
| 639 | + switch (tmf->tmf) { |
| 640 | + case TMF_ABORT_TASK: |
| 641 | + case TMF_QUERY_TASK: |
| 642 | + buf_cmd[12] = |
| 643 | + (tmf->tag_of_task_to_be_managed >> 8) & 0xff; |
| 644 | + buf_cmd[13] = |
| 645 | + tmf->tag_of_task_to_be_managed & 0xff; |
| 646 | + break; |
| 647 | + default: |
| 648 | + break; |
| 649 | + } |
| 650 | + } |
| 651 | + |
| 652 | + return 0; |
| 653 | +} |
| 654 | + |
451 | 655 | static int phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
|
452 | 656 | {
|
453 | 657 | int i, res = 0;
|
@@ -1020,6 +1224,10 @@ static const struct hisi_sas_hw hisi_sas_v3_hw = {
|
1020 | 1224 | .max_command_entries = HISI_SAS_COMMAND_ENTRIES_V3_HW,
|
1021 | 1225 | .complete_hdr_size = sizeof(struct hisi_sas_complete_v3_hdr),
|
1022 | 1226 | .sl_notify = sl_notify_v3_hw,
|
| 1227 | + .prep_ssp = prep_ssp_v3_hw, |
| 1228 | + .get_free_slot = get_free_slot_v3_hw, |
| 1229 | + .start_delivery = start_delivery_v3_hw, |
| 1230 | + .slot_complete = slot_complete_v3_hw, |
1023 | 1231 | .phys_init = phys_init_v3_hw,
|
1024 | 1232 | };
|
1025 | 1233 |
|
|
0 commit comments