@@ -87,6 +87,10 @@ static int cm_send_dreq_locked(struct cm_id_private *cm_id_priv,
87
87
const void * private_data , u8 private_data_len );
88
88
static int cm_send_drep_locked (struct cm_id_private * cm_id_priv ,
89
89
void * private_data , u8 private_data_len );
90
+ static int cm_send_rej_locked (struct cm_id_private * cm_id_priv ,
91
+ enum ib_cm_rej_reason reason , void * ari ,
92
+ u8 ari_length , const void * private_data ,
93
+ u8 private_data_len );
90
94
91
95
static struct ib_client cm_client = {
92
96
.name = "cm" ,
@@ -1060,21 +1064,22 @@ static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
1060
1064
case IB_CM_REQ_SENT :
1061
1065
case IB_CM_MRA_REQ_RCVD :
1062
1066
ib_cancel_mad (cm_id_priv -> av .port -> mad_agent , cm_id_priv -> msg );
1067
+ cm_send_rej_locked (cm_id_priv , IB_CM_REJ_TIMEOUT ,
1068
+ & cm_id_priv -> id .device -> node_guid ,
1069
+ sizeof (cm_id_priv -> id .device -> node_guid ),
1070
+ NULL , 0 );
1063
1071
spin_unlock_irq (& cm_id_priv -> lock );
1064
- ib_send_cm_rej (cm_id , IB_CM_REJ_TIMEOUT ,
1065
- & cm_id_priv -> id .device -> node_guid ,
1066
- sizeof cm_id_priv -> id .device -> node_guid ,
1067
- NULL , 0 );
1068
1072
break ;
1069
1073
case IB_CM_REQ_RCVD :
1070
1074
if (err == - ENOMEM ) {
1071
1075
/* Do not reject to allow future retries. */
1072
1076
cm_reset_to_idle (cm_id_priv );
1073
1077
spin_unlock_irq (& cm_id_priv -> lock );
1074
1078
} else {
1079
+ cm_send_rej_locked (cm_id_priv ,
1080
+ IB_CM_REJ_CONSUMER_DEFINED , NULL , 0 ,
1081
+ NULL , 0 );
1075
1082
spin_unlock_irq (& cm_id_priv -> lock );
1076
- ib_send_cm_rej (cm_id , IB_CM_REJ_CONSUMER_DEFINED ,
1077
- NULL , 0 , NULL , 0 );
1078
1083
}
1079
1084
break ;
1080
1085
case IB_CM_REP_SENT :
@@ -1084,9 +1089,9 @@ static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
1084
1089
case IB_CM_MRA_REQ_SENT :
1085
1090
case IB_CM_REP_RCVD :
1086
1091
case IB_CM_MRA_REP_SENT :
1092
+ cm_send_rej_locked (cm_id_priv , IB_CM_REJ_CONSUMER_DEFINED , NULL ,
1093
+ 0 , NULL , 0 );
1087
1094
spin_unlock_irq (& cm_id_priv -> lock );
1088
- ib_send_cm_rej (cm_id , IB_CM_REJ_CONSUMER_DEFINED ,
1089
- NULL , 0 , NULL , 0 );
1090
1095
break ;
1091
1096
case IB_CM_ESTABLISHED :
1092
1097
if (cm_id_priv -> qp_type == IB_QPT_XRC_TGT ) {
@@ -2899,65 +2904,72 @@ static int cm_drep_handler(struct cm_work *work)
2899
2904
return - EINVAL ;
2900
2905
}
2901
2906
2902
- int ib_send_cm_rej (struct ib_cm_id * cm_id ,
2903
- enum ib_cm_rej_reason reason ,
2904
- void * ari ,
2905
- u8 ari_length ,
2906
- const void * private_data ,
2907
- u8 private_data_len )
2907
+ static int cm_send_rej_locked (struct cm_id_private * cm_id_priv ,
2908
+ enum ib_cm_rej_reason reason , void * ari ,
2909
+ u8 ari_length , const void * private_data ,
2910
+ u8 private_data_len )
2908
2911
{
2909
- struct cm_id_private * cm_id_priv ;
2910
2912
struct ib_mad_send_buf * msg ;
2911
- unsigned long flags ;
2912
2913
int ret ;
2913
2914
2915
+ lockdep_assert_held (& cm_id_priv -> lock );
2916
+
2914
2917
if ((private_data && private_data_len > IB_CM_REJ_PRIVATE_DATA_SIZE ) ||
2915
2918
(ari && ari_length > IB_CM_REJ_ARI_LENGTH ))
2916
2919
return - EINVAL ;
2917
2920
2918
- cm_id_priv = container_of (cm_id , struct cm_id_private , id );
2919
-
2920
- spin_lock_irqsave (& cm_id_priv -> lock , flags );
2921
- switch (cm_id -> state ) {
2921
+ switch (cm_id_priv -> id .state ) {
2922
2922
case IB_CM_REQ_SENT :
2923
2923
case IB_CM_MRA_REQ_RCVD :
2924
2924
case IB_CM_REQ_RCVD :
2925
2925
case IB_CM_MRA_REQ_SENT :
2926
2926
case IB_CM_REP_RCVD :
2927
2927
case IB_CM_MRA_REP_SENT :
2928
- ret = cm_alloc_msg (cm_id_priv , & msg );
2929
- if (!ret )
2930
- cm_format_rej ((struct cm_rej_msg * ) msg -> mad ,
2931
- cm_id_priv , reason , ari , ari_length ,
2932
- private_data , private_data_len );
2933
-
2934
2928
cm_reset_to_idle (cm_id_priv );
2929
+ ret = cm_alloc_msg (cm_id_priv , & msg );
2930
+ if (ret )
2931
+ return ret ;
2932
+ cm_format_rej ((struct cm_rej_msg * )msg -> mad , cm_id_priv , reason ,
2933
+ ari , ari_length , private_data , private_data_len );
2935
2934
break ;
2936
2935
case IB_CM_REP_SENT :
2937
2936
case IB_CM_MRA_REP_RCVD :
2938
- ret = cm_alloc_msg (cm_id_priv , & msg );
2939
- if (!ret )
2940
- cm_format_rej ((struct cm_rej_msg * ) msg -> mad ,
2941
- cm_id_priv , reason , ari , ari_length ,
2942
- private_data , private_data_len );
2943
-
2944
2937
cm_enter_timewait (cm_id_priv );
2938
+ ret = cm_alloc_msg (cm_id_priv , & msg );
2939
+ if (ret )
2940
+ return ret ;
2941
+ cm_format_rej ((struct cm_rej_msg * )msg -> mad , cm_id_priv , reason ,
2942
+ ari , ari_length , private_data , private_data_len );
2945
2943
break ;
2946
2944
default :
2947
2945
pr_debug ("%s: local_id %d, cm_id->state: %d\n" , __func__ ,
2948
- be32_to_cpu (cm_id_priv -> id .local_id ), cm_id -> state );
2949
- ret = - EINVAL ;
2950
- goto out ;
2946
+ be32_to_cpu (cm_id_priv -> id .local_id ),
2947
+ cm_id_priv -> id . state ) ;
2948
+ return - EINVAL ;
2951
2949
}
2952
2950
2953
- if (ret )
2954
- goto out ;
2955
-
2956
2951
ret = ib_post_send_mad (msg , NULL );
2957
- if (ret )
2952
+ if (ret ) {
2958
2953
cm_free_msg (msg );
2954
+ return ret ;
2955
+ }
2959
2956
2960
- out : spin_unlock_irqrestore (& cm_id_priv -> lock , flags );
2957
+ return 0 ;
2958
+ }
2959
+
2960
+ int ib_send_cm_rej (struct ib_cm_id * cm_id , enum ib_cm_rej_reason reason ,
2961
+ void * ari , u8 ari_length , const void * private_data ,
2962
+ u8 private_data_len )
2963
+ {
2964
+ struct cm_id_private * cm_id_priv =
2965
+ container_of (cm_id , struct cm_id_private , id );
2966
+ unsigned long flags ;
2967
+ int ret ;
2968
+
2969
+ spin_lock_irqsave (& cm_id_priv -> lock , flags );
2970
+ ret = cm_send_rej_locked (cm_id_priv , reason , ari , ari_length ,
2971
+ private_data , private_data_len );
2972
+ spin_unlock_irqrestore (& cm_id_priv -> lock , flags );
2961
2973
return ret ;
2962
2974
}
2963
2975
EXPORT_SYMBOL (ib_send_cm_rej );
0 commit comments