Skip to content

Commit e3d7e4c

Browse files
Sagi GrimbergNicholas Bellinger
authored andcommitted
IB/isert: Introduce isert_map/unmap_data_buf
export map/unmap data buffer to a routine that may be used in various places in the code and keep the mapping data in a designated descriptor. Also, let isert_fast_reg_mr to decide weather to use global MR or do fast registration. This commit does not change any functionality. (Fix context change for v3.14-rc6 code - nab) Signed-off-by: Sagi Grimberg <[email protected]> Signed-off-by: Nicholas Bellinger <[email protected]>
1 parent 03abad9 commit e3d7e4c

File tree

2 files changed

+127
-125
lines changed

2 files changed

+127
-125
lines changed

drivers/infiniband/ulp/isert/ib_isert.c

Lines changed: 116 additions & 122 deletions
Original file line numberDiff line numberDiff line change
@@ -1392,19 +1392,60 @@ isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn,
13921392
}
13931393
}
13941394

1395+
static int
1396+
isert_map_data_buf(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1397+
struct scatterlist *sg, u32 nents, u32 length, u32 offset,
1398+
enum iser_ib_op_code op, struct isert_data_buf *data)
1399+
{
1400+
struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1401+
1402+
data->dma_dir = op == ISER_IB_RDMA_WRITE ?
1403+
DMA_TO_DEVICE : DMA_FROM_DEVICE;
1404+
1405+
data->len = length - offset;
1406+
data->offset = offset;
1407+
data->sg_off = data->offset / PAGE_SIZE;
1408+
1409+
data->sg = &sg[data->sg_off];
1410+
data->nents = min_t(unsigned int, nents - data->sg_off,
1411+
ISCSI_ISER_SG_TABLESIZE);
1412+
data->len = min_t(unsigned int, data->len, ISCSI_ISER_SG_TABLESIZE *
1413+
PAGE_SIZE);
1414+
1415+
data->dma_nents = ib_dma_map_sg(ib_dev, data->sg, data->nents,
1416+
data->dma_dir);
1417+
if (unlikely(!data->dma_nents)) {
1418+
pr_err("Cmd: unable to dma map SGs %p\n", sg);
1419+
return -EINVAL;
1420+
}
1421+
1422+
pr_debug("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
1423+
isert_cmd, data->dma_nents, data->sg, data->nents, data->len);
1424+
1425+
return 0;
1426+
}
1427+
1428+
static void
1429+
isert_unmap_data_buf(struct isert_conn *isert_conn, struct isert_data_buf *data)
1430+
{
1431+
struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1432+
1433+
ib_dma_unmap_sg(ib_dev, data->sg, data->nents, data->dma_dir);
1434+
memset(data, 0, sizeof(*data));
1435+
}
1436+
1437+
1438+
13951439
static void
13961440
isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
13971441
{
13981442
struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1399-
struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
14001443

14011444
pr_debug("isert_unmap_cmd: %p\n", isert_cmd);
1402-
if (wr->sge) {
1445+
1446+
if (wr->data.sg) {
14031447
pr_debug("isert_unmap_cmd: %p unmap_sg op\n", isert_cmd);
1404-
ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge,
1405-
(wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
1406-
DMA_TO_DEVICE : DMA_FROM_DEVICE);
1407-
wr->sge = NULL;
1448+
isert_unmap_data_buf(isert_conn, &wr->data);
14081449
}
14091450

14101451
if (wr->send_wr) {
@@ -1424,7 +1465,6 @@ static void
14241465
isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
14251466
{
14261467
struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1427-
struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
14281468
LIST_HEAD(unmap_list);
14291469

14301470
pr_debug("unreg_fastreg_cmd: %p\n", isert_cmd);
@@ -1438,12 +1478,9 @@ isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
14381478
wr->fr_desc = NULL;
14391479
}
14401480

1441-
if (wr->sge) {
1481+
if (wr->data.sg) {
14421482
pr_debug("unreg_fastreg_cmd: %p unmap_sg op\n", isert_cmd);
1443-
ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge,
1444-
(wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
1445-
DMA_TO_DEVICE : DMA_FROM_DEVICE);
1446-
wr->sge = NULL;
1483+
isert_unmap_data_buf(isert_conn, &wr->data);
14471484
}
14481485

14491486
wr->ib_sge = NULL;
@@ -1548,7 +1585,7 @@ isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
15481585

15491586
iscsit_stop_dataout_timer(cmd);
15501587
device->unreg_rdma_mem(isert_cmd, isert_conn);
1551-
cmd->write_data_done = wr->cur_rdma_length;
1588+
cmd->write_data_done = wr->data.len;
15521589
wr->send_wr_num = 0;
15531590

15541591
pr_debug("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd);
@@ -2099,54 +2136,39 @@ isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
20992136
struct se_cmd *se_cmd = &cmd->se_cmd;
21002137
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
21012138
struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2102-
struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
2139+
struct isert_data_buf *data = &wr->data;
21032140
struct ib_send_wr *send_wr;
21042141
struct ib_sge *ib_sge;
2105-
struct scatterlist *sg_start;
2106-
u32 sg_off = 0, sg_nents;
2107-
u32 offset = 0, data_len, data_left, rdma_write_max, va_offset = 0;
2108-
int ret = 0, count, i, ib_sge_cnt;
2142+
u32 offset, data_len, data_left, rdma_write_max, va_offset = 0;
2143+
int ret = 0, i, ib_sge_cnt;
21092144

2110-
if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
2111-
data_left = se_cmd->data_length;
2112-
} else {
2113-
sg_off = cmd->write_data_done / PAGE_SIZE;
2114-
data_left = se_cmd->data_length - cmd->write_data_done;
2115-
offset = cmd->write_data_done;
2116-
isert_cmd->tx_desc.isert_cmd = isert_cmd;
2117-
}
2145+
isert_cmd->tx_desc.isert_cmd = isert_cmd;
21182146

2119-
sg_start = &cmd->se_cmd.t_data_sg[sg_off];
2120-
sg_nents = se_cmd->t_data_nents - sg_off;
2147+
offset = wr->iser_ib_op == ISER_IB_RDMA_READ ? cmd->write_data_done : 0;
2148+
ret = isert_map_data_buf(isert_conn, isert_cmd, se_cmd->t_data_sg,
2149+
se_cmd->t_data_nents, se_cmd->data_length,
2150+
offset, wr->iser_ib_op, &wr->data);
2151+
if (ret)
2152+
return ret;
21212153

2122-
count = ib_dma_map_sg(ib_dev, sg_start, sg_nents,
2123-
(wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
2124-
DMA_TO_DEVICE : DMA_FROM_DEVICE);
2125-
if (unlikely(!count)) {
2126-
pr_err("Cmd: %p unrable to map SGs\n", isert_cmd);
2127-
return -EINVAL;
2128-
}
2129-
wr->sge = sg_start;
2130-
wr->num_sge = sg_nents;
2131-
wr->cur_rdma_length = data_left;
2132-
pr_debug("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
2133-
isert_cmd, count, sg_start, sg_nents, data_left);
2154+
data_left = data->len;
2155+
offset = data->offset;
21342156

2135-
ib_sge = kzalloc(sizeof(struct ib_sge) * sg_nents, GFP_KERNEL);
2157+
ib_sge = kzalloc(sizeof(struct ib_sge) * data->nents, GFP_KERNEL);
21362158
if (!ib_sge) {
21372159
pr_warn("Unable to allocate ib_sge\n");
21382160
ret = -ENOMEM;
2139-
goto unmap_sg;
2161+
goto unmap_cmd;
21402162
}
21412163
wr->ib_sge = ib_sge;
21422164

2143-
wr->send_wr_num = DIV_ROUND_UP(sg_nents, isert_conn->max_sge);
2165+
wr->send_wr_num = DIV_ROUND_UP(data->nents, isert_conn->max_sge);
21442166
wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num,
21452167
GFP_KERNEL);
21462168
if (!wr->send_wr) {
21472169
pr_debug("Unable to allocate wr->send_wr\n");
21482170
ret = -ENOMEM;
2149-
goto unmap_sg;
2171+
goto unmap_cmd;
21502172
}
21512173

21522174
wr->isert_cmd = isert_cmd;
@@ -2185,10 +2207,9 @@ isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
21852207
}
21862208

21872209
return 0;
2188-
unmap_sg:
2189-
ib_dma_unmap_sg(ib_dev, sg_start, sg_nents,
2190-
(wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
2191-
DMA_TO_DEVICE : DMA_FROM_DEVICE);
2210+
unmap_cmd:
2211+
isert_unmap_data_buf(isert_conn, data);
2212+
21922213
return ret;
21932214
}
21942215

@@ -2232,10 +2253,10 @@ isert_map_fr_pagelist(struct ib_device *ib_dev,
22322253
}
22332254

22342255
static int
2235-
isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc,
2236-
struct isert_conn *isert_conn, struct scatterlist *sg_start,
2237-
struct ib_sge *ib_sge, u32 sg_nents, u32 offset,
2238-
unsigned int data_len)
2256+
isert_fast_reg_mr(struct isert_conn *isert_conn,
2257+
struct fast_reg_descriptor *fr_desc,
2258+
struct isert_data_buf *mem,
2259+
struct ib_sge *sge)
22392260
{
22402261
struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
22412262
struct ib_send_wr fr_wr, inv_wr;
@@ -2244,13 +2265,19 @@ isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc,
22442265
u32 page_off;
22452266
u8 key;
22462267

2247-
sg_nents = min_t(unsigned int, sg_nents, ISCSI_ISER_SG_TABLESIZE);
2248-
page_off = offset % PAGE_SIZE;
2268+
if (mem->dma_nents == 1) {
2269+
sge->lkey = isert_conn->conn_mr->lkey;
2270+
sge->addr = ib_sg_dma_address(ib_dev, &mem->sg[0]);
2271+
sge->length = ib_sg_dma_len(ib_dev, &mem->sg[0]);
2272+
return 0;
2273+
}
2274+
2275+
page_off = mem->offset % PAGE_SIZE;
22492276

22502277
pr_debug("Use fr_desc %p sg_nents %d offset %u\n",
2251-
fr_desc, sg_nents, offset);
2278+
fr_desc, mem->nents, mem->offset);
22522279

2253-
pagelist_len = isert_map_fr_pagelist(ib_dev, sg_start, sg_nents,
2280+
pagelist_len = isert_map_fr_pagelist(ib_dev, mem->sg, mem->nents,
22542281
&fr_desc->data_frpl->page_list[0]);
22552282

22562283
if (!fr_desc->valid) {
@@ -2273,7 +2300,7 @@ isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc,
22732300
fr_wr.wr.fast_reg.page_list = fr_desc->data_frpl;
22742301
fr_wr.wr.fast_reg.page_list_len = pagelist_len;
22752302
fr_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
2276-
fr_wr.wr.fast_reg.length = data_len;
2303+
fr_wr.wr.fast_reg.length = mem->len;
22772304
fr_wr.wr.fast_reg.rkey = fr_desc->data_mr->rkey;
22782305
fr_wr.wr.fast_reg.access_flags = IB_ACCESS_LOCAL_WRITE;
22792306

@@ -2289,12 +2316,12 @@ isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc,
22892316
}
22902317
fr_desc->valid = false;
22912318

2292-
ib_sge->lkey = fr_desc->data_mr->lkey;
2293-
ib_sge->addr = fr_desc->data_frpl->page_list[0] + page_off;
2294-
ib_sge->length = data_len;
2319+
sge->lkey = fr_desc->data_mr->lkey;
2320+
sge->addr = fr_desc->data_frpl->page_list[0] + page_off;
2321+
sge->length = mem->len;
22952322

22962323
pr_debug("RDMA ib_sge: addr: 0x%16llx length: %u lkey: %08x\n",
2297-
ib_sge->addr, ib_sge->length, ib_sge->lkey);
2324+
sge->addr, sge->length, sge->lkey);
22982325

22992326
return ret;
23002327
}
@@ -2305,54 +2332,43 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
23052332
{
23062333
struct se_cmd *se_cmd = &cmd->se_cmd;
23072334
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2308-
struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2309-
struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
2335+
struct isert_conn *isert_conn = conn->context;
23102336
struct ib_send_wr *send_wr;
2311-
struct ib_sge *ib_sge;
2312-
struct scatterlist *sg_start;
2313-
struct fast_reg_descriptor *fr_desc;
2314-
u32 sg_off = 0, sg_nents;
2315-
u32 offset = 0, data_len, data_left, rdma_write_max;
2316-
int ret = 0, count;
2337+
struct fast_reg_descriptor *fr_desc = NULL;
2338+
u32 offset;
2339+
int ret = 0;
23172340
unsigned long flags;
23182341

2319-
if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
2320-
data_left = se_cmd->data_length;
2321-
} else {
2322-
offset = cmd->write_data_done;
2323-
sg_off = offset / PAGE_SIZE;
2324-
data_left = se_cmd->data_length - cmd->write_data_done;
2325-
isert_cmd->tx_desc.isert_cmd = isert_cmd;
2326-
}
2342+
isert_cmd->tx_desc.isert_cmd = isert_cmd;
23272343

2328-
sg_start = &cmd->se_cmd.t_data_sg[sg_off];
2329-
sg_nents = se_cmd->t_data_nents - sg_off;
2344+
offset = wr->iser_ib_op == ISER_IB_RDMA_READ ? cmd->write_data_done : 0;
2345+
ret = isert_map_data_buf(isert_conn, isert_cmd, se_cmd->t_data_sg,
2346+
se_cmd->t_data_nents, se_cmd->data_length,
2347+
offset, wr->iser_ib_op, &wr->data);
2348+
if (ret)
2349+
return ret;
23302350

2331-
count = ib_dma_map_sg(ib_dev, sg_start, sg_nents,
2332-
(wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
2333-
DMA_TO_DEVICE : DMA_FROM_DEVICE);
2334-
if (unlikely(!count)) {
2335-
pr_err("Cmd: %p unrable to map SGs\n", isert_cmd);
2336-
return -EINVAL;
2351+
if (wr->data.dma_nents != 1) {
2352+
spin_lock_irqsave(&isert_conn->conn_lock, flags);
2353+
fr_desc = list_first_entry(&isert_conn->conn_fr_pool,
2354+
struct fast_reg_descriptor, list);
2355+
list_del(&fr_desc->list);
2356+
spin_unlock_irqrestore(&isert_conn->conn_lock, flags);
2357+
wr->fr_desc = fr_desc;
23372358
}
2338-
wr->sge = sg_start;
2339-
wr->num_sge = sg_nents;
2340-
pr_debug("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
2341-
isert_cmd, count, sg_start, sg_nents, data_left);
23422359

2343-
memset(&wr->s_ib_sge, 0, sizeof(*ib_sge));
2344-
ib_sge = &wr->s_ib_sge;
2345-
wr->ib_sge = ib_sge;
2360+
ret = isert_fast_reg_mr(isert_conn, fr_desc, &wr->data, &wr->s_ib_sge);
2361+
if (ret)
2362+
goto unmap_cmd;
23462363

2364+
wr->ib_sge = &wr->s_ib_sge;
23472365
wr->send_wr_num = 1;
23482366
memset(&wr->s_send_wr, 0, sizeof(*send_wr));
23492367
wr->send_wr = &wr->s_send_wr;
2350-
23512368
wr->isert_cmd = isert_cmd;
2352-
rdma_write_max = ISCSI_ISER_SG_TABLESIZE * PAGE_SIZE;
23532369

23542370
send_wr = &isert_cmd->rdma_wr.s_send_wr;
2355-
send_wr->sg_list = ib_sge;
2371+
send_wr->sg_list = &wr->s_ib_sge;
23562372
send_wr->num_sge = 1;
23572373
send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
23582374
if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
@@ -2368,37 +2384,15 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
23682384
send_wr->send_flags = IB_SEND_SIGNALED;
23692385
}
23702386

2371-
data_len = min(data_left, rdma_write_max);
2372-
wr->cur_rdma_length = data_len;
2373-
2374-
/* if there is a single dma entry, dma mr is sufficient */
2375-
if (count == 1) {
2376-
ib_sge->addr = ib_sg_dma_address(ib_dev, &sg_start[0]);
2377-
ib_sge->length = ib_sg_dma_len(ib_dev, &sg_start[0]);
2378-
ib_sge->lkey = isert_conn->conn_mr->lkey;
2379-
wr->fr_desc = NULL;
2380-
} else {
2387+
return 0;
2388+
unmap_cmd:
2389+
if (fr_desc) {
23812390
spin_lock_irqsave(&isert_conn->conn_lock, flags);
2382-
fr_desc = list_first_entry(&isert_conn->conn_fr_pool,
2383-
struct fast_reg_descriptor, list);
2384-
list_del(&fr_desc->list);
2391+
list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool);
23852392
spin_unlock_irqrestore(&isert_conn->conn_lock, flags);
2386-
wr->fr_desc = fr_desc;
2387-
2388-
ret = isert_fast_reg_mr(fr_desc, isert_conn, sg_start,
2389-
ib_sge, sg_nents, offset, data_len);
2390-
if (ret) {
2391-
list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool);
2392-
goto unmap_sg;
2393-
}
23942393
}
2394+
isert_unmap_data_buf(isert_conn, &wr->data);
23952395

2396-
return 0;
2397-
2398-
unmap_sg:
2399-
ib_dma_unmap_sg(ib_dev, sg_start, sg_nents,
2400-
(wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
2401-
DMA_TO_DEVICE : DMA_FROM_DEVICE);
24022396
return ret;
24032397
}
24042398

drivers/infiniband/ulp/isert/ib_isert.h

Lines changed: 11 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -57,18 +57,26 @@ struct fast_reg_descriptor {
5757
bool valid;
5858
};
5959

60+
struct isert_data_buf {
61+
struct scatterlist *sg;
62+
int nents;
63+
u32 sg_off;
64+
u32 len; /* cur_rdma_length */
65+
u32 offset;
66+
unsigned int dma_nents;
67+
enum dma_data_direction dma_dir;
68+
};
69+
6070
struct isert_rdma_wr {
6171
struct list_head wr_list;
6272
struct isert_cmd *isert_cmd;
6373
enum iser_ib_op_code iser_ib_op;
6474
struct ib_sge *ib_sge;
6575
struct ib_sge s_ib_sge;
66-
int num_sge;
67-
struct scatterlist *sge;
6876
int send_wr_num;
6977
struct ib_send_wr *send_wr;
7078
struct ib_send_wr s_send_wr;
71-
u32 cur_rdma_length;
79+
struct isert_data_buf data;
7280
struct fast_reg_descriptor *fr_desc;
7381
};
7482

0 commit comments

Comments
 (0)