@@ -795,8 +795,8 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
795
795
struct ib_udata * udata )
796
796
{
797
797
struct mlx5_ib_dev * dev = to_mdev (ibdev );
798
- struct mlx5_ib_alloc_ucontext_req_v2 req ;
799
- struct mlx5_ib_alloc_ucontext_resp resp ;
798
+ struct mlx5_ib_alloc_ucontext_req_v2 req = {} ;
799
+ struct mlx5_ib_alloc_ucontext_resp resp = {} ;
800
800
struct mlx5_ib_ucontext * context ;
801
801
struct mlx5_uuar_info * uuari ;
802
802
struct mlx5_uar * uars ;
@@ -811,20 +811,19 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
811
811
if (!dev -> ib_active )
812
812
return ERR_PTR (- EAGAIN );
813
813
814
- memset (& req , 0 , sizeof (req ));
815
814
reqlen = udata -> inlen - sizeof (struct ib_uverbs_cmd_hdr );
816
815
if (reqlen == sizeof (struct mlx5_ib_alloc_ucontext_req ))
817
816
ver = 0 ;
818
- else if (reqlen = = sizeof (struct mlx5_ib_alloc_ucontext_req_v2 ))
817
+ else if (reqlen > = sizeof (struct mlx5_ib_alloc_ucontext_req_v2 ))
819
818
ver = 2 ;
820
819
else
821
820
return ERR_PTR (- EINVAL );
822
821
823
- err = ib_copy_from_udata (& req , udata , reqlen );
822
+ err = ib_copy_from_udata (& req , udata , min ( reqlen , sizeof ( req )) );
824
823
if (err )
825
824
return ERR_PTR (err );
826
825
827
- if (req .flags || req . reserved )
826
+ if (req .flags )
828
827
return ERR_PTR (- EINVAL );
829
828
830
829
if (req .total_num_uuars > MLX5_MAX_UUARS )
@@ -833,6 +832,14 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
833
832
if (req .total_num_uuars == 0 )
834
833
return ERR_PTR (- EINVAL );
835
834
835
+ if (req .comp_mask )
836
+ return ERR_PTR (- EOPNOTSUPP );
837
+
838
+ if (reqlen > sizeof (req ) &&
839
+ !ib_is_udata_cleared (udata , sizeof (req ),
840
+ udata -> inlen - sizeof (req )))
841
+ return ERR_PTR (- EOPNOTSUPP );
842
+
836
843
req .total_num_uuars = ALIGN (req .total_num_uuars ,
837
844
MLX5_NON_FP_BF_REGS_PER_PAGE );
838
845
if (req .num_low_latency_uuars > req .total_num_uuars - 1 )
@@ -848,6 +855,8 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
848
855
resp .max_send_wqebb = 1 << MLX5_CAP_GEN (dev -> mdev , log_max_qp_sz );
849
856
resp .max_recv_wr = 1 << MLX5_CAP_GEN (dev -> mdev , log_max_qp_sz );
850
857
resp .max_srq_recv_wr = 1 << MLX5_CAP_GEN (dev -> mdev , log_max_srq_sz );
858
+ resp .response_length = min (offsetof(typeof (resp ), response_length ) +
859
+ sizeof (resp .response_length ), udata -> outlen );
851
860
852
861
context = kzalloc (sizeof (* context ), GFP_KERNEL );
853
862
if (!context )
@@ -898,8 +907,20 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
898
907
899
908
resp .tot_uuars = req .total_num_uuars ;
900
909
resp .num_ports = MLX5_CAP_GEN (dev -> mdev , num_ports );
901
- err = ib_copy_to_udata (udata , & resp ,
902
- sizeof (resp ) - sizeof (resp .reserved ));
910
+
911
+ if (field_avail (typeof (resp ), reserved2 , udata -> outlen ))
912
+ resp .response_length += sizeof (resp .reserved2 );
913
+
914
+ if (field_avail (typeof (resp ), hca_core_clock_offset , udata -> outlen )) {
915
+ resp .comp_mask |=
916
+ MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET ;
917
+ resp .hca_core_clock_offset =
918
+ offsetof(struct mlx5_init_seg , internal_timer_h ) %
919
+ PAGE_SIZE ;
920
+ resp .response_length += sizeof (resp .hca_core_clock_offset );
921
+ }
922
+
923
+ err = ib_copy_to_udata (udata , & resp , resp .response_length );
903
924
if (err )
904
925
goto out_uars ;
905
926
0 commit comments