Skip to content

Commit dd0107a

Browse files
Sagi Grimbergdledford
authored andcommitted
IB/iser: set block queue_virt_boundary
The block layer can reliably guarantee that SG lists won't contain gaps (page unaligned) if a driver set the queue virt_boundary. With this setting the block layer will: - refuse merges if bios are not aligned to the virtual boundary - split bios/requests that are not aligned to the virtual boundary - or, bounce buffer SG_IOs that are not aligned to the virtual boundary Since iser is working in 4K page size, set the virt_boundary to 4K pages. With this setting, we can now safely remove the bounce buffering logic in iser. Signed-off-by: Sagi Grimberg <[email protected]> Reviewed-by: Christoph Hellwig <[email protected]> Signed-off-by: Doug Ledford <[email protected]>
1 parent 6c760b3 commit dd0107a

File tree

4 files changed

+18
-326
lines changed

4 files changed

+18
-326
lines changed

drivers/infiniband/ulp/iser/iscsi_iser.c

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -762,9 +762,7 @@ iscsi_iser_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *s
762762
stats->r2t_pdus = conn->r2t_pdus_cnt; /* always 0 */
763763
stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt;
764764
stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt;
765-
stats->custom_length = 1;
766-
strcpy(stats->custom[0].desc, "fmr_unalign_cnt");
767-
stats->custom[0].value = conn->fmr_unalign_cnt;
765+
stats->custom_length = 0;
768766
}
769767

770768
static int iscsi_iser_get_ep_param(struct iscsi_endpoint *ep,
@@ -969,6 +967,13 @@ static umode_t iser_attr_is_visible(int param_type, int param)
969967
return 0;
970968
}
971969

970+
static int iscsi_iser_slave_alloc(struct scsi_device *sdev)
971+
{
972+
blk_queue_virt_boundary(sdev->request_queue, ~MASK_4K);
973+
974+
return 0;
975+
}
976+
972977
static struct scsi_host_template iscsi_iser_sht = {
973978
.module = THIS_MODULE,
974979
.name = "iSCSI Initiator over iSER",
@@ -982,6 +987,7 @@ static struct scsi_host_template iscsi_iser_sht = {
982987
.eh_target_reset_handler = iscsi_eh_recover_target,
983988
.target_alloc = iscsi_target_alloc,
984989
.use_clustering = DISABLE_CLUSTERING,
990+
.slave_alloc = iscsi_iser_slave_alloc,
985991
.proc_name = "iscsi_iser",
986992
.this_id = -1,
987993
.track_queue_depth = 1,

drivers/infiniband/ulp/iser/iscsi_iser.h

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -227,18 +227,13 @@ enum iser_data_dir {
227227
* @size: num entries of this sg
228228
* @data_len: total beffer byte len
229229
* @dma_nents: returned by dma_map_sg
230-
* @orig_sg: pointer to the original sg list (in case
231-
* we used a copy)
232-
* @orig_size: num entris of orig sg list
233230
*/
234231
struct iser_data_buf {
235232
struct scatterlist *sg;
236233
unsigned int size;
237234
unsigned long data_len;
238235
unsigned int dma_nents;
239-
struct scatterlist *orig_sg;
240-
unsigned int orig_size;
241-
};
236+
};
242237

243238
/* fwd declarations */
244239
struct iser_device;

drivers/infiniband/ulp/iser/iser_initiator.c

Lines changed: 8 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -661,60 +661,25 @@ void iser_task_rdma_init(struct iscsi_iser_task *iser_task)
661661

662662
void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
663663
{
664-
int is_rdma_data_aligned = 1;
665-
int is_rdma_prot_aligned = 1;
666664
int prot_count = scsi_prot_sg_count(iser_task->sc);
667665

668-
/* if we were reading, copy back to unaligned sglist,
669-
* anyway dma_unmap and free the copy
670-
*/
671-
if (iser_task->data[ISER_DIR_IN].orig_sg) {
672-
is_rdma_data_aligned = 0;
673-
iser_finalize_rdma_unaligned_sg(iser_task,
674-
&iser_task->data[ISER_DIR_IN],
675-
ISER_DIR_IN);
676-
}
677-
678-
if (iser_task->data[ISER_DIR_OUT].orig_sg) {
679-
is_rdma_data_aligned = 0;
680-
iser_finalize_rdma_unaligned_sg(iser_task,
681-
&iser_task->data[ISER_DIR_OUT],
682-
ISER_DIR_OUT);
683-
}
684-
685-
if (iser_task->prot[ISER_DIR_IN].orig_sg) {
686-
is_rdma_prot_aligned = 0;
687-
iser_finalize_rdma_unaligned_sg(iser_task,
688-
&iser_task->prot[ISER_DIR_IN],
689-
ISER_DIR_IN);
690-
}
691-
692-
if (iser_task->prot[ISER_DIR_OUT].orig_sg) {
693-
is_rdma_prot_aligned = 0;
694-
iser_finalize_rdma_unaligned_sg(iser_task,
695-
&iser_task->prot[ISER_DIR_OUT],
696-
ISER_DIR_OUT);
697-
}
698-
699666
if (iser_task->dir[ISER_DIR_IN]) {
700667
iser_unreg_rdma_mem(iser_task, ISER_DIR_IN);
701-
if (is_rdma_data_aligned)
702-
iser_dma_unmap_task_data(iser_task,
703-
&iser_task->data[ISER_DIR_IN],
704-
DMA_FROM_DEVICE);
705-
if (prot_count && is_rdma_prot_aligned)
668+
iser_dma_unmap_task_data(iser_task,
669+
&iser_task->data[ISER_DIR_IN],
670+
DMA_FROM_DEVICE);
671+
if (prot_count)
706672
iser_dma_unmap_task_data(iser_task,
707673
&iser_task->prot[ISER_DIR_IN],
708674
DMA_FROM_DEVICE);
709675
}
710676

711677
if (iser_task->dir[ISER_DIR_OUT]) {
712678
iser_unreg_rdma_mem(iser_task, ISER_DIR_OUT);
713-
if (is_rdma_data_aligned)
714-
iser_dma_unmap_task_data(iser_task,
715-
&iser_task->data[ISER_DIR_OUT],
716-
DMA_TO_DEVICE);
717-
if (prot_count && is_rdma_prot_aligned)
679+
iser_dma_unmap_task_data(iser_task,
680+
&iser_task->data[ISER_DIR_OUT],
681+
DMA_TO_DEVICE);
682+
if (prot_count)
718683
iser_dma_unmap_task_data(iser_task,
719684
&iser_task->prot[ISER_DIR_OUT],
720685
DMA_TO_DEVICE);

0 commit comments

Comments
 (0)