@@ -592,20 +592,6 @@ static inline int cma_user_data_offset(enum rdma_port_space ps)
592
592
}
593
593
}
594
594
595
- static int cma_notify_user (struct rdma_id_private * id_priv ,
596
- enum rdma_cm_event_type type , int status ,
597
- void * data , u8 data_len )
598
- {
599
- struct rdma_cm_event event ;
600
-
601
- event .event = type ;
602
- event .status = status ;
603
- event .private_data = data ;
604
- event .private_data_len = data_len ;
605
-
606
- return id_priv -> id .event_handler (& id_priv -> id , & event );
607
- }
608
-
609
595
static void cma_cancel_route (struct rdma_id_private * id_priv )
610
596
{
611
597
switch (rdma_node_get_transport (id_priv -> id .device -> node_type )) {
@@ -790,66 +776,81 @@ static int cma_rtu_recv(struct rdma_id_private *id_priv)
790
776
return ret ;
791
777
}
792
778
779
+ static void cma_set_rep_event_data (struct rdma_cm_event * event ,
780
+ struct ib_cm_rep_event_param * rep_data ,
781
+ void * private_data )
782
+ {
783
+ event -> param .conn .private_data = private_data ;
784
+ event -> param .conn .private_data_len = IB_CM_REP_PRIVATE_DATA_SIZE ;
785
+ event -> param .conn .responder_resources = rep_data -> responder_resources ;
786
+ event -> param .conn .initiator_depth = rep_data -> initiator_depth ;
787
+ event -> param .conn .flow_control = rep_data -> flow_control ;
788
+ event -> param .conn .rnr_retry_count = rep_data -> rnr_retry_count ;
789
+ event -> param .conn .srq = rep_data -> srq ;
790
+ event -> param .conn .qp_num = rep_data -> remote_qpn ;
791
+ }
792
+
793
793
static int cma_ib_handler (struct ib_cm_id * cm_id , struct ib_cm_event * ib_event )
794
794
{
795
795
struct rdma_id_private * id_priv = cm_id -> context ;
796
- enum rdma_cm_event_type event ;
797
- u8 private_data_len = 0 ;
798
- int ret = 0 , status = 0 ;
796
+ struct rdma_cm_event event ;
797
+ int ret = 0 ;
799
798
800
799
atomic_inc (& id_priv -> dev_remove );
801
800
if (!cma_comp (id_priv , CMA_CONNECT ))
802
801
goto out ;
803
802
803
+ memset (& event , 0 , sizeof event );
804
804
switch (ib_event -> event ) {
805
805
case IB_CM_REQ_ERROR :
806
806
case IB_CM_REP_ERROR :
807
- event = RDMA_CM_EVENT_UNREACHABLE ;
808
- status = - ETIMEDOUT ;
807
+ event . event = RDMA_CM_EVENT_UNREACHABLE ;
808
+ event . status = - ETIMEDOUT ;
809
809
break ;
810
810
case IB_CM_REP_RECEIVED :
811
- status = cma_verify_rep (id_priv , ib_event -> private_data );
812
- if (status )
813
- event = RDMA_CM_EVENT_CONNECT_ERROR ;
811
+ event . status = cma_verify_rep (id_priv , ib_event -> private_data );
812
+ if (event . status )
813
+ event . event = RDMA_CM_EVENT_CONNECT_ERROR ;
814
814
else if (id_priv -> id .qp && id_priv -> id .ps != RDMA_PS_SDP ) {
815
- status = cma_rep_recv (id_priv );
816
- event = status ? RDMA_CM_EVENT_CONNECT_ERROR :
817
- RDMA_CM_EVENT_ESTABLISHED ;
815
+ event . status = cma_rep_recv (id_priv );
816
+ event . event = event . status ? RDMA_CM_EVENT_CONNECT_ERROR :
817
+ RDMA_CM_EVENT_ESTABLISHED ;
818
818
} else
819
- event = RDMA_CM_EVENT_CONNECT_RESPONSE ;
820
- private_data_len = IB_CM_REP_PRIVATE_DATA_SIZE ;
819
+ event .event = RDMA_CM_EVENT_CONNECT_RESPONSE ;
820
+ cma_set_rep_event_data (& event , & ib_event -> param .rep_rcvd ,
821
+ ib_event -> private_data );
821
822
break ;
822
823
case IB_CM_RTU_RECEIVED :
823
- status = cma_rtu_recv (id_priv );
824
- event = status ? RDMA_CM_EVENT_CONNECT_ERROR :
825
- RDMA_CM_EVENT_ESTABLISHED ;
824
+ event . status = cma_rtu_recv (id_priv );
825
+ event . event = event . status ? RDMA_CM_EVENT_CONNECT_ERROR :
826
+ RDMA_CM_EVENT_ESTABLISHED ;
826
827
break ;
827
828
case IB_CM_DREQ_ERROR :
828
- status = - ETIMEDOUT ; /* fall through */
829
+ event . status = - ETIMEDOUT ; /* fall through */
829
830
case IB_CM_DREQ_RECEIVED :
830
831
case IB_CM_DREP_RECEIVED :
831
832
if (!cma_comp_exch (id_priv , CMA_CONNECT , CMA_DISCONNECT ))
832
833
goto out ;
833
- event = RDMA_CM_EVENT_DISCONNECTED ;
834
+ event . event = RDMA_CM_EVENT_DISCONNECTED ;
834
835
break ;
835
836
case IB_CM_TIMEWAIT_EXIT :
836
837
case IB_CM_MRA_RECEIVED :
837
838
/* ignore event */
838
839
goto out ;
839
840
case IB_CM_REJ_RECEIVED :
840
841
cma_modify_qp_err (& id_priv -> id );
841
- status = ib_event -> param .rej_rcvd .reason ;
842
- event = RDMA_CM_EVENT_REJECTED ;
843
- private_data_len = IB_CM_REJ_PRIVATE_DATA_SIZE ;
842
+ event .status = ib_event -> param .rej_rcvd .reason ;
843
+ event .event = RDMA_CM_EVENT_REJECTED ;
844
+ event .param .conn .private_data = ib_event -> private_data ;
845
+ event .param .conn .private_data_len = IB_CM_REJ_PRIVATE_DATA_SIZE ;
844
846
break ;
845
847
default :
846
848
printk (KERN_ERR "RDMA CMA: unexpected IB CM event: %d" ,
847
849
ib_event -> event );
848
850
goto out ;
849
851
}
850
852
851
- ret = cma_notify_user (id_priv , event , status , ib_event -> private_data ,
852
- private_data_len );
853
+ ret = id_priv -> id .event_handler (& id_priv -> id , & event );
853
854
if (ret ) {
854
855
/* Destroy the CM ID by returning a non-zero value. */
855
856
id_priv -> cm_id .ib = NULL ;
@@ -911,9 +912,25 @@ static struct rdma_id_private *cma_new_id(struct rdma_cm_id *listen_id,
911
912
return NULL ;
912
913
}
913
914
915
+ static void cma_set_req_event_data (struct rdma_cm_event * event ,
916
+ struct ib_cm_req_event_param * req_data ,
917
+ void * private_data , int offset )
918
+ {
919
+ event -> param .conn .private_data = private_data + offset ;
920
+ event -> param .conn .private_data_len = IB_CM_REQ_PRIVATE_DATA_SIZE - offset ;
921
+ event -> param .conn .responder_resources = req_data -> responder_resources ;
922
+ event -> param .conn .initiator_depth = req_data -> initiator_depth ;
923
+ event -> param .conn .flow_control = req_data -> flow_control ;
924
+ event -> param .conn .retry_count = req_data -> retry_count ;
925
+ event -> param .conn .rnr_retry_count = req_data -> rnr_retry_count ;
926
+ event -> param .conn .srq = req_data -> srq ;
927
+ event -> param .conn .qp_num = req_data -> remote_qpn ;
928
+ }
929
+
914
930
static int cma_req_handler (struct ib_cm_id * cm_id , struct ib_cm_event * ib_event )
915
931
{
916
932
struct rdma_id_private * listen_id , * conn_id ;
933
+ struct rdma_cm_event event ;
917
934
int offset , ret ;
918
935
919
936
listen_id = cm_id -> context ;
@@ -941,9 +958,11 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
941
958
cm_id -> cm_handler = cma_ib_handler ;
942
959
943
960
offset = cma_user_data_offset (listen_id -> id .ps );
944
- ret = cma_notify_user (conn_id , RDMA_CM_EVENT_CONNECT_REQUEST , 0 ,
945
- ib_event -> private_data + offset ,
946
- IB_CM_REQ_PRIVATE_DATA_SIZE - offset );
961
+ memset (& event , 0 , sizeof event );
962
+ event .event = RDMA_CM_EVENT_CONNECT_REQUEST ;
963
+ cma_set_req_event_data (& event , & ib_event -> param .req_rcvd ,
964
+ ib_event -> private_data , offset );
965
+ ret = conn_id -> id .event_handler (& conn_id -> id , & event );
947
966
if (!ret )
948
967
goto out ;
949
968
@@ -1019,36 +1038,38 @@ static void cma_set_compare_data(enum rdma_port_space ps, struct sockaddr *addr,
1019
1038
static int cma_iw_handler (struct iw_cm_id * iw_id , struct iw_cm_event * iw_event )
1020
1039
{
1021
1040
struct rdma_id_private * id_priv = iw_id -> context ;
1022
- enum rdma_cm_event_type event = 0 ;
1041
+ struct rdma_cm_event event ;
1023
1042
struct sockaddr_in * sin ;
1024
1043
int ret = 0 ;
1025
1044
1045
+ memset (& event , 0 , sizeof event );
1026
1046
atomic_inc (& id_priv -> dev_remove );
1027
1047
1028
1048
switch (iw_event -> event ) {
1029
1049
case IW_CM_EVENT_CLOSE :
1030
- event = RDMA_CM_EVENT_DISCONNECTED ;
1050
+ event . event = RDMA_CM_EVENT_DISCONNECTED ;
1031
1051
break ;
1032
1052
case IW_CM_EVENT_CONNECT_REPLY :
1033
1053
sin = (struct sockaddr_in * ) & id_priv -> id .route .addr .src_addr ;
1034
1054
* sin = iw_event -> local_addr ;
1035
1055
sin = (struct sockaddr_in * ) & id_priv -> id .route .addr .dst_addr ;
1036
1056
* sin = iw_event -> remote_addr ;
1037
1057
if (iw_event -> status )
1038
- event = RDMA_CM_EVENT_REJECTED ;
1058
+ event . event = RDMA_CM_EVENT_REJECTED ;
1039
1059
else
1040
- event = RDMA_CM_EVENT_ESTABLISHED ;
1060
+ event . event = RDMA_CM_EVENT_ESTABLISHED ;
1041
1061
break ;
1042
1062
case IW_CM_EVENT_ESTABLISHED :
1043
- event = RDMA_CM_EVENT_ESTABLISHED ;
1063
+ event . event = RDMA_CM_EVENT_ESTABLISHED ;
1044
1064
break ;
1045
1065
default :
1046
1066
BUG_ON (1 );
1047
1067
}
1048
1068
1049
- ret = cma_notify_user (id_priv , event , iw_event -> status ,
1050
- iw_event -> private_data ,
1051
- iw_event -> private_data_len );
1069
+ event .status = iw_event -> status ;
1070
+ event .param .conn .private_data = iw_event -> private_data ;
1071
+ event .param .conn .private_data_len = iw_event -> private_data_len ;
1072
+ ret = id_priv -> id .event_handler (& id_priv -> id , & event );
1052
1073
if (ret ) {
1053
1074
/* Destroy the CM ID by returning a non-zero value. */
1054
1075
id_priv -> cm_id .iw = NULL ;
@@ -1069,6 +1090,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
1069
1090
struct rdma_id_private * listen_id , * conn_id ;
1070
1091
struct sockaddr_in * sin ;
1071
1092
struct net_device * dev = NULL ;
1093
+ struct rdma_cm_event event ;
1072
1094
int ret ;
1073
1095
1074
1096
listen_id = cm_id -> context ;
@@ -1122,9 +1144,11 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
1122
1144
sin = (struct sockaddr_in * ) & new_cm_id -> route .addr .dst_addr ;
1123
1145
* sin = iw_event -> remote_addr ;
1124
1146
1125
- ret = cma_notify_user (conn_id , RDMA_CM_EVENT_CONNECT_REQUEST , 0 ,
1126
- iw_event -> private_data ,
1127
- iw_event -> private_data_len );
1147
+ memset (& event , 0 , sizeof event );
1148
+ event .event = RDMA_CM_EVENT_CONNECT_REQUEST ;
1149
+ event .param .conn .private_data = iw_event -> private_data ;
1150
+ event .param .conn .private_data_len = iw_event -> private_data_len ;
1151
+ ret = conn_id -> id .event_handler (& conn_id -> id , & event );
1128
1152
if (ret ) {
1129
1153
/* User wants to destroy the CM ID */
1130
1154
conn_id -> cm_id .iw = NULL ;
@@ -1513,8 +1537,9 @@ static void addr_handler(int status, struct sockaddr *src_addr,
1513
1537
struct rdma_dev_addr * dev_addr , void * context )
1514
1538
{
1515
1539
struct rdma_id_private * id_priv = context ;
1516
- enum rdma_cm_event_type event ;
1540
+ struct rdma_cm_event event ;
1517
1541
1542
+ memset (& event , 0 , sizeof event );
1518
1543
atomic_inc (& id_priv -> dev_remove );
1519
1544
1520
1545
/*
@@ -1534,14 +1559,15 @@ static void addr_handler(int status, struct sockaddr *src_addr,
1534
1559
if (status ) {
1535
1560
if (!cma_comp_exch (id_priv , CMA_ADDR_RESOLVED , CMA_ADDR_BOUND ))
1536
1561
goto out ;
1537
- event = RDMA_CM_EVENT_ADDR_ERROR ;
1562
+ event .event = RDMA_CM_EVENT_ADDR_ERROR ;
1563
+ event .status = status ;
1538
1564
} else {
1539
1565
memcpy (& id_priv -> id .route .addr .src_addr , src_addr ,
1540
1566
ip_addr_size (src_addr ));
1541
- event = RDMA_CM_EVENT_ADDR_RESOLVED ;
1567
+ event . event = RDMA_CM_EVENT_ADDR_RESOLVED ;
1542
1568
}
1543
1569
1544
- if (cma_notify_user ( id_priv , event , status , NULL , 0 )) {
1570
+ if (id_priv -> id . event_handler ( & id_priv -> id , & event )) {
1545
1571
cma_exch (id_priv , CMA_DESTROYING );
1546
1572
cma_release_remove (id_priv );
1547
1573
cma_deref_id (id_priv );
@@ -2132,6 +2158,7 @@ static void cma_add_one(struct ib_device *device)
2132
2158
2133
2159
static int cma_remove_id_dev (struct rdma_id_private * id_priv )
2134
2160
{
2161
+ struct rdma_cm_event event ;
2135
2162
enum cma_state state ;
2136
2163
2137
2164
/* Record that we want to remove the device */
@@ -2146,8 +2173,9 @@ static int cma_remove_id_dev(struct rdma_id_private *id_priv)
2146
2173
if (!cma_comp (id_priv , CMA_DEVICE_REMOVAL ))
2147
2174
return 0 ;
2148
2175
2149
- return cma_notify_user (id_priv , RDMA_CM_EVENT_DEVICE_REMOVAL ,
2150
- 0 , NULL , 0 );
2176
+ memset (& event , 0 , sizeof event );
2177
+ event .event = RDMA_CM_EVENT_DEVICE_REMOVAL ;
2178
+ return id_priv -> id .event_handler (& id_priv -> id , & event );
2151
2179
}
2152
2180
2153
2181
static void cma_process_remove (struct cma_device * cma_dev )
0 commit comments