@@ -461,7 +461,7 @@ static struct ehca_qp *internal_create_qp(
461
461
ib_device );
462
462
struct ib_ucontext * context = NULL ;
463
463
u64 h_ret ;
464
- int is_llqp = 0 , has_srq = 0 ;
464
+ int is_llqp = 0 , has_srq = 0 , is_user = 0 ;
465
465
int qp_type , max_send_sge , max_recv_sge , ret ;
466
466
467
467
/* h_call's out parameters */
@@ -609,16 +609,18 @@ static struct ehca_qp *internal_create_qp(
609
609
}
610
610
}
611
611
612
- if (pd -> uobject && udata )
613
- context = pd -> uobject -> context ;
614
-
615
612
my_qp = kmem_cache_zalloc (qp_cache , GFP_KERNEL );
616
613
if (!my_qp ) {
617
614
ehca_err (pd -> device , "pd=%p not enough memory to alloc qp" , pd );
618
615
atomic_dec (& shca -> num_qps );
619
616
return ERR_PTR (- ENOMEM );
620
617
}
621
618
619
+ if (pd -> uobject && udata ) {
620
+ is_user = 1 ;
621
+ context = pd -> uobject -> context ;
622
+ }
623
+
622
624
atomic_set (& my_qp -> nr_events , 0 );
623
625
init_waitqueue_head (& my_qp -> wait_completion );
624
626
spin_lock_init (& my_qp -> spinlock_s );
@@ -707,7 +709,7 @@ static struct ehca_qp *internal_create_qp(
707
709
(parms .squeue .is_small || parms .rqueue .is_small );
708
710
}
709
711
710
- h_ret = hipz_h_alloc_resource_qp (shca -> ipz_hca_handle , & parms );
712
+ h_ret = hipz_h_alloc_resource_qp (shca -> ipz_hca_handle , & parms , is_user );
711
713
if (h_ret != H_SUCCESS ) {
712
714
ehca_err (pd -> device , "h_alloc_resource_qp() failed h_ret=%lli" ,
713
715
h_ret );
@@ -769,18 +771,20 @@ static struct ehca_qp *internal_create_qp(
769
771
goto create_qp_exit2 ;
770
772
}
771
773
772
- my_qp -> sq_map .entries = my_qp -> ipz_squeue .queue_length /
773
- my_qp -> ipz_squeue .qe_size ;
774
- my_qp -> sq_map .map = vmalloc (my_qp -> sq_map .entries *
775
- sizeof (struct ehca_qmap_entry ));
776
- if (!my_qp -> sq_map .map ) {
777
- ehca_err (pd -> device , "Couldn't allocate squeue "
778
- "map ret=%i" , ret );
779
- goto create_qp_exit3 ;
774
+ if (!is_user ) {
775
+ my_qp -> sq_map .entries = my_qp -> ipz_squeue .queue_length /
776
+ my_qp -> ipz_squeue .qe_size ;
777
+ my_qp -> sq_map .map = vmalloc (my_qp -> sq_map .entries *
778
+ sizeof (struct ehca_qmap_entry ));
779
+ if (!my_qp -> sq_map .map ) {
780
+ ehca_err (pd -> device , "Couldn't allocate squeue "
781
+ "map ret=%i" , ret );
782
+ goto create_qp_exit3 ;
783
+ }
784
+ INIT_LIST_HEAD (& my_qp -> sq_err_node );
785
+ /* to avoid the generation of bogus flush CQEs */
786
+ reset_queue_map (& my_qp -> sq_map );
780
787
}
781
- INIT_LIST_HEAD (& my_qp -> sq_err_node );
782
- /* to avoid the generation of bogus flush CQEs */
783
- reset_queue_map (& my_qp -> sq_map );
784
788
}
785
789
786
790
if (HAS_RQ (my_qp )) {
@@ -792,20 +796,21 @@ static struct ehca_qp *internal_create_qp(
792
796
"and pages ret=%i" , ret );
793
797
goto create_qp_exit4 ;
794
798
}
795
-
796
- my_qp -> rq_map .entries = my_qp -> ipz_rqueue .queue_length /
797
- my_qp -> ipz_rqueue .qe_size ;
798
- my_qp -> rq_map .map = vmalloc (my_qp -> rq_map .entries *
799
- sizeof (struct ehca_qmap_entry ));
800
- if (!my_qp -> rq_map .map ) {
801
- ehca_err (pd -> device , "Couldn't allocate squeue "
802
- "map ret=%i" , ret );
803
- goto create_qp_exit5 ;
799
+ if (!is_user ) {
800
+ my_qp -> rq_map .entries = my_qp -> ipz_rqueue .queue_length /
801
+ my_qp -> ipz_rqueue .qe_size ;
802
+ my_qp -> rq_map .map = vmalloc (my_qp -> rq_map .entries *
803
+ sizeof (struct ehca_qmap_entry ));
804
+ if (!my_qp -> rq_map .map ) {
805
+ ehca_err (pd -> device , "Couldn't allocate squeue "
806
+ "map ret=%i" , ret );
807
+ goto create_qp_exit5 ;
808
+ }
809
+ INIT_LIST_HEAD (& my_qp -> rq_err_node );
810
+ /* to avoid the generation of bogus flush CQEs */
811
+ reset_queue_map (& my_qp -> rq_map );
804
812
}
805
- INIT_LIST_HEAD (& my_qp -> rq_err_node );
806
- /* to avoid the generation of bogus flush CQEs */
807
- reset_queue_map (& my_qp -> rq_map );
808
- } else if (init_attr -> srq ) {
813
+ } else if (init_attr -> srq && !is_user ) {
809
814
/* this is a base QP, use the queue map of the SRQ */
810
815
my_qp -> rq_map = my_srq -> rq_map ;
811
816
INIT_LIST_HEAD (& my_qp -> rq_err_node );
@@ -918,15 +923,15 @@ static struct ehca_qp *internal_create_qp(
918
923
kfree (my_qp -> mod_qp_parm );
919
924
920
925
create_qp_exit6 :
921
- if (HAS_RQ (my_qp ))
926
+ if (HAS_RQ (my_qp ) && ! is_user )
922
927
vfree (my_qp -> rq_map .map );
923
928
924
929
create_qp_exit5 :
925
930
if (HAS_RQ (my_qp ))
926
931
ipz_queue_dtor (my_pd , & my_qp -> ipz_rqueue );
927
932
928
933
create_qp_exit4 :
929
- if (HAS_SQ (my_qp ))
934
+ if (HAS_SQ (my_qp ) && ! is_user )
930
935
vfree (my_qp -> sq_map .map );
931
936
932
937
create_qp_exit3 :
@@ -1244,6 +1249,7 @@ static int internal_modify_qp(struct ib_qp *ibqp,
1244
1249
u64 update_mask ;
1245
1250
u64 h_ret ;
1246
1251
int bad_wqe_cnt = 0 ;
1252
+ int is_user = 0 ;
1247
1253
int squeue_locked = 0 ;
1248
1254
unsigned long flags = 0 ;
1249
1255
@@ -1266,6 +1272,8 @@ static int internal_modify_qp(struct ib_qp *ibqp,
1266
1272
ret = ehca2ib_return_code (h_ret );
1267
1273
goto modify_qp_exit1 ;
1268
1274
}
1275
+ if (ibqp -> uobject )
1276
+ is_user = 1 ;
1269
1277
1270
1278
qp_cur_state = ehca2ib_qp_state (mqpcb -> qp_state );
1271
1279
@@ -1728,7 +1736,8 @@ static int internal_modify_qp(struct ib_qp *ibqp,
1728
1736
goto modify_qp_exit2 ;
1729
1737
}
1730
1738
}
1731
- if ((qp_new_state == IB_QPS_ERR ) && (qp_cur_state != IB_QPS_ERR )) {
1739
+ if ((qp_new_state == IB_QPS_ERR ) && (qp_cur_state != IB_QPS_ERR )
1740
+ && !is_user ) {
1732
1741
ret = check_for_left_cqes (my_qp , shca );
1733
1742
if (ret )
1734
1743
goto modify_qp_exit2 ;
@@ -1738,16 +1747,17 @@ static int internal_modify_qp(struct ib_qp *ibqp,
1738
1747
ipz_qeit_reset (& my_qp -> ipz_rqueue );
1739
1748
ipz_qeit_reset (& my_qp -> ipz_squeue );
1740
1749
1741
- if (qp_cur_state == IB_QPS_ERR ) {
1750
+ if (qp_cur_state == IB_QPS_ERR && ! is_user ) {
1742
1751
del_from_err_list (my_qp -> send_cq , & my_qp -> sq_err_node );
1743
1752
1744
1753
if (HAS_RQ (my_qp ))
1745
1754
del_from_err_list (my_qp -> recv_cq ,
1746
1755
& my_qp -> rq_err_node );
1747
1756
}
1748
- reset_queue_map (& my_qp -> sq_map );
1757
+ if (!is_user )
1758
+ reset_queue_map (& my_qp -> sq_map );
1749
1759
1750
- if (HAS_RQ (my_qp ))
1760
+ if (HAS_RQ (my_qp ) && ! is_user )
1751
1761
reset_queue_map (& my_qp -> rq_map );
1752
1762
}
1753
1763
@@ -1952,19 +1962,13 @@ int ehca_query_qp(struct ib_qp *qp,
1952
1962
qp_attr -> cap .max_inline_data = my_qp -> sq_max_inline_data_size ;
1953
1963
qp_attr -> dest_qp_num = qpcb -> dest_qp_nr ;
1954
1964
1955
- qp_attr -> pkey_index =
1956
- EHCA_BMASK_GET (MQPCB_PRIM_P_KEY_IDX , qpcb -> prim_p_key_idx );
1957
-
1958
- qp_attr -> port_num =
1959
- EHCA_BMASK_GET (MQPCB_PRIM_PHYS_PORT , qpcb -> prim_phys_port );
1960
-
1965
+ qp_attr -> pkey_index = qpcb -> prim_p_key_idx ;
1966
+ qp_attr -> port_num = qpcb -> prim_phys_port ;
1961
1967
qp_attr -> timeout = qpcb -> timeout ;
1962
1968
qp_attr -> retry_cnt = qpcb -> retry_count ;
1963
1969
qp_attr -> rnr_retry = qpcb -> rnr_retry_count ;
1964
1970
1965
- qp_attr -> alt_pkey_index =
1966
- EHCA_BMASK_GET (MQPCB_PRIM_P_KEY_IDX , qpcb -> alt_p_key_idx );
1967
-
1971
+ qp_attr -> alt_pkey_index = qpcb -> alt_p_key_idx ;
1968
1972
qp_attr -> alt_port_num = qpcb -> alt_phys_port ;
1969
1973
qp_attr -> alt_timeout = qpcb -> timeout_al ;
1970
1974
@@ -2051,8 +2055,7 @@ int ehca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
2051
2055
update_mask |=
2052
2056
EHCA_BMASK_SET (MQPCB_MASK_CURR_SRQ_LIMIT , 1 )
2053
2057
| EHCA_BMASK_SET (MQPCB_MASK_QP_AFF_ASYN_EV_LOG_REG , 1 );
2054
- mqpcb -> curr_srq_limit =
2055
- EHCA_BMASK_SET (MQPCB_CURR_SRQ_LIMIT , attr -> srq_limit );
2058
+ mqpcb -> curr_srq_limit = attr -> srq_limit ;
2056
2059
mqpcb -> qp_aff_asyn_ev_log_reg =
2057
2060
EHCA_BMASK_SET (QPX_AAELOG_RESET_SRQ_LIMIT , 1 );
2058
2061
}
@@ -2115,8 +2118,7 @@ int ehca_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr)
2115
2118
2116
2119
srq_attr -> max_wr = qpcb -> max_nr_outst_recv_wr - 1 ;
2117
2120
srq_attr -> max_sge = 3 ;
2118
- srq_attr -> srq_limit = EHCA_BMASK_GET (
2119
- MQPCB_CURR_SRQ_LIMIT , qpcb -> curr_srq_limit );
2121
+ srq_attr -> srq_limit = qpcb -> curr_srq_limit ;
2120
2122
2121
2123
if (ehca_debug_level >= 2 )
2122
2124
ehca_dmp (qpcb , 4 * 70 , "qp_num=%x" , my_qp -> real_qp_num );
@@ -2138,10 +2140,12 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
2138
2140
int ret ;
2139
2141
u64 h_ret ;
2140
2142
u8 port_num ;
2143
+ int is_user = 0 ;
2141
2144
enum ib_qp_type qp_type ;
2142
2145
unsigned long flags ;
2143
2146
2144
2147
if (uobject ) {
2148
+ is_user = 1 ;
2145
2149
if (my_qp -> mm_count_galpa ||
2146
2150
my_qp -> mm_count_rqueue || my_qp -> mm_count_squeue ) {
2147
2151
ehca_err (dev , "Resources still referenced in "
@@ -2168,10 +2172,10 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
2168
2172
* SRQs will never get into an error list and do not have a recv_cq,
2169
2173
* so we need to skip them here.
2170
2174
*/
2171
- if (HAS_RQ (my_qp ) && !IS_SRQ (my_qp ))
2175
+ if (HAS_RQ (my_qp ) && !IS_SRQ (my_qp ) && ! is_user )
2172
2176
del_from_err_list (my_qp -> recv_cq , & my_qp -> rq_err_node );
2173
2177
2174
- if (HAS_SQ (my_qp ))
2178
+ if (HAS_SQ (my_qp ) && ! is_user )
2175
2179
del_from_err_list (my_qp -> send_cq , & my_qp -> sq_err_node );
2176
2180
2177
2181
/* now wait until all pending events have completed */
@@ -2209,13 +2213,13 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
2209
2213
2210
2214
if (HAS_RQ (my_qp )) {
2211
2215
ipz_queue_dtor (my_pd , & my_qp -> ipz_rqueue );
2212
-
2213
- vfree (my_qp -> rq_map .map );
2216
+ if (! is_user )
2217
+ vfree (my_qp -> rq_map .map );
2214
2218
}
2215
2219
if (HAS_SQ (my_qp )) {
2216
2220
ipz_queue_dtor (my_pd , & my_qp -> ipz_squeue );
2217
-
2218
- vfree (my_qp -> sq_map .map );
2221
+ if (! is_user )
2222
+ vfree (my_qp -> sq_map .map );
2219
2223
}
2220
2224
kmem_cache_free (qp_cache , my_qp );
2221
2225
atomic_dec (& shca -> num_qps );
0 commit comments