Skip to content

Commit 628e5f6

Browse files
Sean HeftyRoland Dreier
authored andcommitted
RDMA/cma: Add support for RDMA_PS_UDP
Allow the use of UD QPs through the rdma_cm, in order to provide address translation services for resolving IB addresses for datagram messages using SIDR. Signed-off-by: Sean Hefty <[email protected]> Signed-off-by: Roland Dreier <[email protected]>
1 parent 0fe313b commit 628e5f6

File tree

3 files changed

+222
-16
lines changed

3 files changed

+222
-16
lines changed

drivers/infiniband/core/cma.c

Lines changed: 204 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -70,6 +70,7 @@ static DEFINE_MUTEX(lock);
7070
static struct workqueue_struct *cma_wq;
7171
static DEFINE_IDR(sdp_ps);
7272
static DEFINE_IDR(tcp_ps);
73+
static DEFINE_IDR(udp_ps);
7374

7475
struct cma_device {
7576
struct list_head list;
@@ -508,9 +509,17 @@ static inline int cma_any_addr(struct sockaddr *addr)
508509
return cma_zero_addr(addr) || cma_loopback_addr(addr);
509510
}
510511

512+
static inline __be16 cma_port(struct sockaddr *addr)
513+
{
514+
if (addr->sa_family == AF_INET)
515+
return ((struct sockaddr_in *) addr)->sin_port;
516+
else
517+
return ((struct sockaddr_in6 *) addr)->sin6_port;
518+
}
519+
511520
static inline int cma_any_port(struct sockaddr *addr)
512521
{
513-
return !((struct sockaddr_in *) addr)->sin_port;
522+
return !cma_port(addr);
514523
}
515524

516525
static int cma_get_net_info(void *hdr, enum rdma_port_space ps,
@@ -847,8 +856,8 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
847856
return ret;
848857
}
849858

850-
static struct rdma_id_private *cma_new_id(struct rdma_cm_id *listen_id,
851-
struct ib_cm_event *ib_event)
859+
static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
860+
struct ib_cm_event *ib_event)
852861
{
853862
struct rdma_id_private *id_priv;
854863
struct rdma_cm_id *id;
@@ -895,6 +904,42 @@ static struct rdma_id_private *cma_new_id(struct rdma_cm_id *listen_id,
895904
return NULL;
896905
}
897906

907+
static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id,
908+
struct ib_cm_event *ib_event)
909+
{
910+
struct rdma_id_private *id_priv;
911+
struct rdma_cm_id *id;
912+
union cma_ip_addr *src, *dst;
913+
__u16 port;
914+
u8 ip_ver;
915+
int ret;
916+
917+
id = rdma_create_id(listen_id->event_handler, listen_id->context,
918+
listen_id->ps);
919+
if (IS_ERR(id))
920+
return NULL;
921+
922+
923+
if (cma_get_net_info(ib_event->private_data, listen_id->ps,
924+
&ip_ver, &port, &src, &dst))
925+
goto err;
926+
927+
cma_save_net_info(&id->route.addr, &listen_id->route.addr,
928+
ip_ver, port, src, dst);
929+
930+
ret = rdma_translate_ip(&id->route.addr.src_addr,
931+
&id->route.addr.dev_addr);
932+
if (ret)
933+
goto err;
934+
935+
id_priv = container_of(id, struct rdma_id_private, id);
936+
id_priv->state = CMA_CONNECT;
937+
return id_priv;
938+
err:
939+
rdma_destroy_id(id);
940+
return NULL;
941+
}
942+
898943
static void cma_set_req_event_data(struct rdma_cm_event *event,
899944
struct ib_cm_req_event_param *req_data,
900945
void *private_data, int offset)
@@ -923,7 +968,19 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
923968
goto out;
924969
}
925970

926-
conn_id = cma_new_id(&listen_id->id, ib_event);
971+
memset(&event, 0, sizeof event);
972+
offset = cma_user_data_offset(listen_id->id.ps);
973+
event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
974+
if (listen_id->id.ps == RDMA_PS_UDP) {
975+
conn_id = cma_new_udp_id(&listen_id->id, ib_event);
976+
event.param.ud.private_data = ib_event->private_data + offset;
977+
event.param.ud.private_data_len =
978+
IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE - offset;
979+
} else {
980+
conn_id = cma_new_conn_id(&listen_id->id, ib_event);
981+
cma_set_req_event_data(&event, &ib_event->param.req_rcvd,
982+
ib_event->private_data, offset);
983+
}
927984
if (!conn_id) {
928985
ret = -ENOMEM;
929986
goto out;
@@ -940,11 +997,6 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
940997
cm_id->context = conn_id;
941998
cm_id->cm_handler = cma_ib_handler;
942999

943-
offset = cma_user_data_offset(listen_id->id.ps);
944-
memset(&event, 0, sizeof event);
945-
event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
946-
cma_set_req_event_data(&event, &ib_event->param.req_rcvd,
947-
ib_event->private_data, offset);
9481000
ret = conn_id->id.event_handler(&conn_id->id, &event);
9491001
if (!ret)
9501002
goto out;
@@ -964,8 +1016,7 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
9641016

9651017
static __be64 cma_get_service_id(enum rdma_port_space ps, struct sockaddr *addr)
9661018
{
967-
return cpu_to_be64(((u64)ps << 16) +
968-
be16_to_cpu(((struct sockaddr_in *) addr)->sin_port));
1019+
return cpu_to_be64(((u64)ps << 16) + be16_to_cpu(cma_port(addr)));
9691020
}
9701021

9711022
static void cma_set_compare_data(enum rdma_port_space ps, struct sockaddr *addr,
@@ -1740,6 +1791,9 @@ static int cma_get_port(struct rdma_id_private *id_priv)
17401791
case RDMA_PS_TCP:
17411792
ps = &tcp_ps;
17421793
break;
1794+
case RDMA_PS_UDP:
1795+
ps = &udp_ps;
1796+
break;
17431797
default:
17441798
return -EPROTONOSUPPORT;
17451799
}
@@ -1828,6 +1882,110 @@ static int cma_format_hdr(void *hdr, enum rdma_port_space ps,
18281882
return 0;
18291883
}
18301884

1885+
static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
1886+
struct ib_cm_event *ib_event)
1887+
{
1888+
struct rdma_id_private *id_priv = cm_id->context;
1889+
struct rdma_cm_event event;
1890+
struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd;
1891+
int ret = 0;
1892+
1893+
memset(&event, 0, sizeof event);
1894+
atomic_inc(&id_priv->dev_remove);
1895+
if (!cma_comp(id_priv, CMA_CONNECT))
1896+
goto out;
1897+
1898+
switch (ib_event->event) {
1899+
case IB_CM_SIDR_REQ_ERROR:
1900+
event.event = RDMA_CM_EVENT_UNREACHABLE;
1901+
event.status = -ETIMEDOUT;
1902+
break;
1903+
case IB_CM_SIDR_REP_RECEIVED:
1904+
event.param.ud.private_data = ib_event->private_data;
1905+
event.param.ud.private_data_len = IB_CM_SIDR_REP_PRIVATE_DATA_SIZE;
1906+
if (rep->status != IB_SIDR_SUCCESS) {
1907+
event.event = RDMA_CM_EVENT_UNREACHABLE;
1908+
event.status = ib_event->param.sidr_rep_rcvd.status;
1909+
break;
1910+
}
1911+
if (rep->qkey != RDMA_UD_QKEY) {
1912+
event.event = RDMA_CM_EVENT_UNREACHABLE;
1913+
event.status = -EINVAL;
1914+
break;
1915+
}
1916+
ib_init_ah_from_path(id_priv->id.device, id_priv->id.port_num,
1917+
id_priv->id.route.path_rec,
1918+
&event.param.ud.ah_attr);
1919+
event.param.ud.qp_num = rep->qpn;
1920+
event.param.ud.qkey = rep->qkey;
1921+
event.event = RDMA_CM_EVENT_ESTABLISHED;
1922+
event.status = 0;
1923+
break;
1924+
default:
1925+
printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d",
1926+
ib_event->event);
1927+
goto out;
1928+
}
1929+
1930+
ret = id_priv->id.event_handler(&id_priv->id, &event);
1931+
if (ret) {
1932+
/* Destroy the CM ID by returning a non-zero value. */
1933+
id_priv->cm_id.ib = NULL;
1934+
cma_exch(id_priv, CMA_DESTROYING);
1935+
cma_release_remove(id_priv);
1936+
rdma_destroy_id(&id_priv->id);
1937+
return ret;
1938+
}
1939+
out:
1940+
cma_release_remove(id_priv);
1941+
return ret;
1942+
}
1943+
1944+
static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
1945+
struct rdma_conn_param *conn_param)
1946+
{
1947+
struct ib_cm_sidr_req_param req;
1948+
struct rdma_route *route;
1949+
int ret;
1950+
1951+
req.private_data_len = sizeof(struct cma_hdr) +
1952+
conn_param->private_data_len;
1953+
req.private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
1954+
if (!req.private_data)
1955+
return -ENOMEM;
1956+
1957+
if (conn_param->private_data && conn_param->private_data_len)
1958+
memcpy((void *) req.private_data + sizeof(struct cma_hdr),
1959+
conn_param->private_data, conn_param->private_data_len);
1960+
1961+
route = &id_priv->id.route;
1962+
ret = cma_format_hdr((void *) req.private_data, id_priv->id.ps, route);
1963+
if (ret)
1964+
goto out;
1965+
1966+
id_priv->cm_id.ib = ib_create_cm_id(id_priv->id.device,
1967+
cma_sidr_rep_handler, id_priv);
1968+
if (IS_ERR(id_priv->cm_id.ib)) {
1969+
ret = PTR_ERR(id_priv->cm_id.ib);
1970+
goto out;
1971+
}
1972+
1973+
req.path = route->path_rec;
1974+
req.service_id = cma_get_service_id(id_priv->id.ps,
1975+
&route->addr.dst_addr);
1976+
req.timeout_ms = 1 << (CMA_CM_RESPONSE_TIMEOUT - 8);
1977+
req.max_cm_retries = CMA_MAX_CM_RETRIES;
1978+
1979+
ret = ib_send_cm_sidr_req(id_priv->cm_id.ib, &req);
1980+
if (ret) {
1981+
ib_destroy_cm_id(id_priv->cm_id.ib);
1982+
id_priv->cm_id.ib = NULL;
1983+
}
1984+
out:
1985+
kfree(req.private_data);
1986+
return ret;
1987+
}
1988+
18311989
static int cma_connect_ib(struct rdma_id_private *id_priv,
18321990
struct rdma_conn_param *conn_param)
18331991
{
@@ -1949,7 +2107,10 @@ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
19492107

19502108
switch (rdma_node_get_transport(id->device->node_type)) {
19512109
case RDMA_TRANSPORT_IB:
1952-
ret = cma_connect_ib(id_priv, conn_param);
2110+
if (id->ps == RDMA_PS_UDP)
2111+
ret = cma_resolve_ib_udp(id_priv, conn_param);
2112+
else
2113+
ret = cma_connect_ib(id_priv, conn_param);
19532114
break;
19542115
case RDMA_TRANSPORT_IWARP:
19552116
ret = cma_connect_iw(id_priv, conn_param);
@@ -2032,6 +2193,24 @@ static int cma_accept_iw(struct rdma_id_private *id_priv,
20322193
return iw_cm_accept(id_priv->cm_id.iw, &iw_param);
20332194
}
20342195

2196+
static int cma_send_sidr_rep(struct rdma_id_private *id_priv,
2197+
enum ib_cm_sidr_status status,
2198+
const void *private_data, int private_data_len)
2199+
{
2200+
struct ib_cm_sidr_rep_param rep;
2201+
2202+
memset(&rep, 0, sizeof rep);
2203+
rep.status = status;
2204+
if (status == IB_SIDR_SUCCESS) {
2205+
rep.qp_num = id_priv->qp_num;
2206+
rep.qkey = RDMA_UD_QKEY;
2207+
}
2208+
rep.private_data = private_data;
2209+
rep.private_data_len = private_data_len;
2210+
2211+
return ib_send_cm_sidr_rep(id_priv->cm_id.ib, &rep);
2212+
}
2213+
20352214
int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
20362215
{
20372216
struct rdma_id_private *id_priv;
@@ -2048,7 +2227,11 @@ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
20482227

20492228
switch (rdma_node_get_transport(id->device->node_type)) {
20502229
case RDMA_TRANSPORT_IB:
2051-
if (conn_param)
2230+
if (id->ps == RDMA_PS_UDP)
2231+
ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS,
2232+
conn_param->private_data,
2233+
conn_param->private_data_len);
2234+
else if (conn_param)
20522235
ret = cma_accept_ib(id_priv, conn_param);
20532236
else
20542237
ret = cma_rep_recv(id_priv);
@@ -2105,9 +2288,13 @@ int rdma_reject(struct rdma_cm_id *id, const void *private_data,
21052288

21062289
switch (rdma_node_get_transport(id->device->node_type)) {
21072290
case RDMA_TRANSPORT_IB:
2108-
ret = ib_send_cm_rej(id_priv->cm_id.ib,
2109-
IB_CM_REJ_CONSUMER_DEFINED, NULL, 0,
2110-
private_data, private_data_len);
2291+
if (id->ps == RDMA_PS_UDP)
2292+
ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT,
2293+
private_data, private_data_len);
2294+
else
2295+
ret = ib_send_cm_rej(id_priv->cm_id.ib,
2296+
IB_CM_REJ_CONSUMER_DEFINED, NULL,
2297+
0, private_data, private_data_len);
21112298
break;
21122299
case RDMA_TRANSPORT_IWARP:
21132300
ret = iw_cm_reject(id_priv->cm_id.iw,
@@ -2277,6 +2464,7 @@ static void cma_cleanup(void)
22772464
destroy_workqueue(cma_wq);
22782465
idr_destroy(&sdp_ps);
22792466
idr_destroy(&tcp_ps);
2467+
idr_destroy(&udp_ps);
22802468
}
22812469

22822470
module_init(cma_init);

include/rdma/rdma_cm.h

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -90,11 +90,20 @@ struct rdma_conn_param {
9090
u32 qp_num;
9191
};
9292

93+
struct rdma_ud_param {
94+
const void *private_data;
95+
u8 private_data_len;
96+
struct ib_ah_attr ah_attr;
97+
u32 qp_num;
98+
u32 qkey;
99+
};
100+
93101
struct rdma_cm_event {
94102
enum rdma_cm_event_type event;
95103
int status;
96104
union {
97105
struct rdma_conn_param conn;
106+
struct rdma_ud_param ud;
98107
} param;
99108
};
100109

@@ -220,9 +229,15 @@ int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
220229

221230
/**
222231
* rdma_connect - Initiate an active connection request.
232+
* @id: Connection identifier to connect.
233+
* @conn_param: Connection information used for connected QPs.
223234
*
224235
* Users must have resolved a route for the rdma_cm_id to connect with
225236
* by having called rdma_resolve_route before calling this routine.
237+
*
238+
* This call will either connect to a remote QP or obtain remote QP
239+
* information for unconnected rdma_cm_id's. The actual operation is
240+
* based on the rdma_cm_id's port space.
226241
*/
227242
int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param);
228243

include/rdma/rdma_cm_ib.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -44,4 +44,7 @@
4444
int rdma_set_ib_paths(struct rdma_cm_id *id,
4545
struct ib_sa_path_rec *path_rec, int num_paths);
4646

47+
/* Global qkey for UD QPs and multicast groups. */
48+
#define RDMA_UD_QKEY 0x01234567
49+
4750
#endif /* RDMA_CM_IB_H */

0 commit comments

Comments
 (0)