|
38 | 38 | #include <linux/scatterlist.h>
|
39 | 39 |
|
40 | 40 | #include "iscsi_iser.h"
|
| 41 | +static |
| 42 | +int iser_fast_reg_fmr(struct iscsi_iser_task *iser_task, |
| 43 | + struct iser_data_buf *mem, |
| 44 | + struct iser_reg_resources *rsc, |
| 45 | + struct iser_mem_reg *mem_reg); |
| 46 | +static |
| 47 | +int iser_fast_reg_mr(struct iscsi_iser_task *iser_task, |
| 48 | + struct iser_data_buf *mem, |
| 49 | + struct iser_reg_resources *rsc, |
| 50 | + struct iser_mem_reg *mem_reg); |
41 | 51 |
|
42 | 52 | static struct iser_reg_ops fastreg_ops = {
|
43 | 53 | .alloc_reg_res = iser_alloc_fastreg_pool,
|
44 | 54 | .free_reg_res = iser_free_fastreg_pool,
|
45 |
| - .reg_rdma_mem = iser_reg_rdma_mem_fastreg, |
46 |
| - .unreg_rdma_mem = iser_unreg_mem_fastreg, |
| 55 | + .reg_mem = iser_fast_reg_mr, |
| 56 | + .unreg_mem = iser_unreg_mem_fastreg, |
47 | 57 | .reg_desc_get = iser_reg_desc_get_fr,
|
48 | 58 | .reg_desc_put = iser_reg_desc_put_fr,
|
49 | 59 | };
|
50 | 60 |
|
51 | 61 | static struct iser_reg_ops fmr_ops = {
|
52 | 62 | .alloc_reg_res = iser_alloc_fmr_pool,
|
53 | 63 | .free_reg_res = iser_free_fmr_pool,
|
54 |
| - .reg_rdma_mem = iser_reg_rdma_mem_fmr, |
55 |
| - .unreg_rdma_mem = iser_unreg_mem_fmr, |
| 64 | + .reg_mem = iser_fast_reg_fmr, |
| 65 | + .unreg_mem = iser_unreg_mem_fmr, |
56 | 66 | .reg_desc_get = iser_reg_desc_get_fmr,
|
57 | 67 | .reg_desc_put = iser_reg_desc_put_fmr,
|
58 | 68 | };
|
@@ -574,62 +584,6 @@ void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task,
|
574 | 584 | reg->mem_h = NULL;
|
575 | 585 | }
|
576 | 586 |
|
577 |
| -/** |
578 |
| - * iser_reg_rdma_mem_fmr - Registers memory intended for RDMA, |
579 |
| - * using FMR (if possible) obtaining rkey and va |
580 |
| - * |
581 |
| - * returns 0 on success, errno code on failure |
582 |
| - */ |
583 |
| -int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *iser_task, |
584 |
| - enum iser_data_dir cmd_dir) |
585 |
| -{ |
586 |
| - struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn; |
587 |
| - struct iser_device *device = ib_conn->device; |
588 |
| - struct ib_device *ibdev = device->ib_device; |
589 |
| - struct iser_data_buf *mem = &iser_task->data[cmd_dir]; |
590 |
| - struct iser_mem_reg *mem_reg; |
591 |
| - int aligned_len; |
592 |
| - int err; |
593 |
| - int i; |
594 |
| - |
595 |
| - mem_reg = &iser_task->rdma_reg[cmd_dir]; |
596 |
| - |
597 |
| - aligned_len = iser_data_buf_aligned_len(mem, ibdev); |
598 |
| - if (aligned_len != mem->dma_nents) { |
599 |
| - err = fall_to_bounce_buf(iser_task, mem, cmd_dir); |
600 |
| - if (err) { |
601 |
| - iser_err("failed to allocate bounce buffer\n"); |
602 |
| - return err; |
603 |
| - } |
604 |
| - } |
605 |
| - |
606 |
| - /* if there a single dma entry, FMR is not needed */ |
607 |
| - if (mem->dma_nents == 1) { |
608 |
| - return iser_reg_dma(device, mem, mem_reg); |
609 |
| - } else { /* use FMR for multiple dma entries */ |
610 |
| - struct iser_fr_desc *desc; |
611 |
| - |
612 |
| - desc = device->reg_ops->reg_desc_get(ib_conn); |
613 |
| - err = iser_fast_reg_fmr(iser_task, mem, &desc->rsc, mem_reg); |
614 |
| - if (err && err != -EAGAIN) { |
615 |
| - iser_data_buf_dump(mem, ibdev); |
616 |
| - iser_err("mem->dma_nents = %d (dlength = 0x%x)\n", |
617 |
| - mem->dma_nents, |
618 |
| - ntoh24(iser_task->desc.iscsi_header.dlength)); |
619 |
| - iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n", |
620 |
| - desc->rsc.page_vec->data_size, |
621 |
| - desc->rsc.page_vec->length, |
622 |
| - desc->rsc.page_vec->offset); |
623 |
| - for (i = 0; i < desc->rsc.page_vec->length; i++) |
624 |
| - iser_err("page_vec[%d] = 0x%llx\n", i, |
625 |
| - (unsigned long long)desc->rsc.page_vec->pages[i]); |
626 |
| - } |
627 |
| - if (err) |
628 |
| - return err; |
629 |
| - } |
630 |
| - return 0; |
631 |
| -} |
632 |
| - |
633 | 587 | static void
|
634 | 588 | iser_set_dif_domain(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs,
|
635 | 589 | struct ib_sig_domain *domain)
|
@@ -775,19 +729,12 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
|
775 | 729 | {
|
776 | 730 | struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn;
|
777 | 731 | struct iser_device *device = ib_conn->device;
|
778 |
| - struct ib_mr *mr; |
779 |
| - struct ib_fast_reg_page_list *frpl; |
| 732 | + struct ib_mr *mr = rsc->mr; |
| 733 | + struct ib_fast_reg_page_list *frpl = rsc->frpl; |
780 | 734 | struct ib_send_wr fastreg_wr, inv_wr;
|
781 | 735 | struct ib_send_wr *bad_wr, *wr = NULL;
|
782 | 736 | int ret, offset, size, plen;
|
783 | 737 |
|
784 |
| - /* if there a single dma entry, dma mr suffices */ |
785 |
| - if (mem->dma_nents == 1) |
786 |
| - return iser_reg_dma(device, mem, reg); |
787 |
| - |
788 |
| - mr = rsc->mr; |
789 |
| - frpl = rsc->frpl; |
790 |
| - |
791 | 738 | plen = iser_sg_to_page_vec(mem, device->ib_device, frpl->page_list,
|
792 | 739 | &offset, &size);
|
793 | 740 | if (plen * SIZE_4K < size) {
|
@@ -834,78 +781,113 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
|
834 | 781 | return ret;
|
835 | 782 | }
|
836 | 783 |
|
837 |
| -/** |
838 |
| - * iser_reg_rdma_mem_fastreg - Registers memory intended for RDMA, |
839 |
| - * using Fast Registration WR (if possible) obtaining rkey and va |
840 |
| - * |
841 |
| - * returns 0 on success, errno code on failure |
842 |
| - */ |
843 |
| -int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *iser_task, |
844 |
| - enum iser_data_dir cmd_dir) |
| 784 | +static int |
| 785 | +iser_handle_unaligned_buf(struct iscsi_iser_task *task, |
| 786 | + struct iser_data_buf *mem, |
| 787 | + enum iser_data_dir dir) |
845 | 788 | {
|
846 |
| - struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn; |
847 |
| - struct iser_device *device = ib_conn->device; |
848 |
| - struct ib_device *ibdev = device->ib_device; |
849 |
| - struct iser_data_buf *mem = &iser_task->data[cmd_dir]; |
850 |
| - struct iser_mem_reg *mem_reg = &iser_task->rdma_reg[cmd_dir]; |
851 |
| - struct iser_fr_desc *desc = NULL; |
| 789 | + struct iser_conn *iser_conn = task->iser_conn; |
| 790 | + struct iser_device *device = iser_conn->ib_conn.device; |
852 | 791 | int err, aligned_len;
|
853 | 792 |
|
854 |
| - aligned_len = iser_data_buf_aligned_len(mem, ibdev); |
| 793 | + aligned_len = iser_data_buf_aligned_len(mem, device->ib_device); |
855 | 794 | if (aligned_len != mem->dma_nents) {
|
856 |
| - err = fall_to_bounce_buf(iser_task, mem, cmd_dir); |
857 |
| - if (err) { |
858 |
| - iser_err("failed to allocate bounce buffer\n"); |
| 795 | + err = fall_to_bounce_buf(task, mem, dir); |
| 796 | + if (err) |
859 | 797 | return err;
|
860 |
| - } |
861 | 798 | }
|
862 | 799 |
|
| 800 | + return 0; |
| 801 | +} |
| 802 | + |
| 803 | +static int |
| 804 | +iser_reg_prot_sg(struct iscsi_iser_task *task, |
| 805 | + struct iser_data_buf *mem, |
| 806 | + struct iser_fr_desc *desc, |
| 807 | + struct iser_mem_reg *reg) |
| 808 | +{ |
| 809 | + struct iser_device *device = task->iser_conn->ib_conn.device; |
| 810 | + |
| 811 | + if (mem->dma_nents == 1) |
| 812 | + return iser_reg_dma(device, mem, reg); |
| 813 | + |
| 814 | + return device->reg_ops->reg_mem(task, mem, &desc->pi_ctx->rsc, reg); |
| 815 | +} |
| 816 | + |
| 817 | +static int |
| 818 | +iser_reg_data_sg(struct iscsi_iser_task *task, |
| 819 | + struct iser_data_buf *mem, |
| 820 | + struct iser_fr_desc *desc, |
| 821 | + struct iser_mem_reg *reg) |
| 822 | +{ |
| 823 | + struct iser_device *device = task->iser_conn->ib_conn.device; |
| 824 | + |
| 825 | + if (mem->dma_nents == 1) |
| 826 | + return iser_reg_dma(device, mem, reg); |
| 827 | + |
| 828 | + return device->reg_ops->reg_mem(task, mem, &desc->rsc, reg); |
| 829 | +} |
| 830 | + |
| 831 | +int iser_reg_rdma_mem(struct iscsi_iser_task *task, |
| 832 | + enum iser_data_dir dir) |
| 833 | +{ |
| 834 | + struct ib_conn *ib_conn = &task->iser_conn->ib_conn; |
| 835 | + struct iser_device *device = ib_conn->device; |
| 836 | + struct iser_data_buf *mem = &task->data[dir]; |
| 837 | + struct iser_mem_reg *reg = &task->rdma_reg[dir]; |
| 838 | + struct iser_fr_desc *desc = NULL; |
| 839 | + int err; |
| 840 | + |
| 841 | + err = iser_handle_unaligned_buf(task, mem, dir); |
| 842 | + if (unlikely(err)) |
| 843 | + return err; |
| 844 | + |
863 | 845 | if (mem->dma_nents != 1 ||
|
864 |
| - scsi_get_prot_op(iser_task->sc) != SCSI_PROT_NORMAL) { |
| 846 | + scsi_get_prot_op(task->sc) != SCSI_PROT_NORMAL) { |
865 | 847 | desc = device->reg_ops->reg_desc_get(ib_conn);
|
866 |
| - mem_reg->mem_h = desc; |
| 848 | + reg->mem_h = desc; |
867 | 849 | }
|
868 | 850 |
|
869 |
| - err = iser_fast_reg_mr(iser_task, mem, |
870 |
| - desc ? &desc->rsc : NULL, mem_reg); |
871 |
| - if (err) |
| 851 | + err = iser_reg_data_sg(task, mem, desc, reg); |
| 852 | + if (unlikely(err)) |
872 | 853 | goto err_reg;
|
873 | 854 |
|
874 |
| - if (scsi_get_prot_op(iser_task->sc) != SCSI_PROT_NORMAL) { |
| 855 | + if (scsi_get_prot_op(task->sc) != SCSI_PROT_NORMAL) { |
875 | 856 | struct iser_mem_reg prot_reg;
|
876 | 857 |
|
877 | 858 | memset(&prot_reg, 0, sizeof(prot_reg));
|
878 |
| - if (scsi_prot_sg_count(iser_task->sc)) { |
879 |
| - mem = &iser_task->prot[cmd_dir]; |
880 |
| - aligned_len = iser_data_buf_aligned_len(mem, ibdev); |
881 |
| - if (aligned_len != mem->dma_nents) { |
882 |
| - err = fall_to_bounce_buf(iser_task, mem, |
883 |
| - cmd_dir); |
884 |
| - if (err) { |
885 |
| - iser_err("failed to allocate bounce buffer\n"); |
886 |
| - return err; |
887 |
| - } |
888 |
| - } |
| 859 | + if (scsi_prot_sg_count(task->sc)) { |
| 860 | + mem = &task->prot[dir]; |
| 861 | + err = iser_handle_unaligned_buf(task, mem, dir); |
| 862 | + if (unlikely(err)) |
| 863 | + goto err_reg; |
889 | 864 |
|
890 |
| - err = iser_fast_reg_mr(iser_task, mem, |
891 |
| - &desc->pi_ctx->rsc, &prot_reg); |
892 |
| - if (err) |
| 865 | + err = iser_reg_prot_sg(task, mem, desc, &prot_reg); |
| 866 | + if (unlikely(err)) |
893 | 867 | goto err_reg;
|
894 | 868 | }
|
895 | 869 |
|
896 |
| - err = iser_reg_sig_mr(iser_task, desc->pi_ctx, mem_reg, |
897 |
| - &prot_reg, mem_reg); |
898 |
| - if (err) { |
899 |
| - iser_err("Failed to register signature mr\n"); |
900 |
| - return err; |
901 |
| - } |
| 870 | + err = iser_reg_sig_mr(task, desc->pi_ctx, reg, |
| 871 | + &prot_reg, reg); |
| 872 | + if (unlikely(err)) |
| 873 | + goto err_reg; |
| 874 | + |
902 | 875 | desc->pi_ctx->sig_protected = 1;
|
903 | 876 | }
|
904 | 877 |
|
905 | 878 | return 0;
|
| 879 | + |
906 | 880 | err_reg:
|
907 | 881 | if (desc)
|
908 | 882 | device->reg_ops->reg_desc_put(ib_conn, desc);
|
909 | 883 |
|
910 | 884 | return err;
|
911 | 885 | }
|
| 886 | + |
| 887 | +void iser_unreg_rdma_mem(struct iscsi_iser_task *task, |
| 888 | + enum iser_data_dir dir) |
| 889 | +{ |
| 890 | + struct iser_device *device = task->iser_conn->ib_conn.device; |
| 891 | + |
| 892 | + device->reg_ops->unreg_mem(task, dir); |
| 893 | +} |
0 commit comments