@@ -1661,29 +1661,6 @@ static __always_inline void set_raddr_seg(struct mlx5_wqe_raddr_seg *rseg,
1661
1661
rseg -> reserved = 0 ;
1662
1662
}
1663
1663
1664
- static void set_atomic_seg (struct mlx5_wqe_atomic_seg * aseg , struct ib_send_wr * wr )
1665
- {
1666
- if (wr -> opcode == IB_WR_ATOMIC_CMP_AND_SWP ) {
1667
- aseg -> swap_add = cpu_to_be64 (wr -> wr .atomic .swap );
1668
- aseg -> compare = cpu_to_be64 (wr -> wr .atomic .compare_add );
1669
- } else if (wr -> opcode == IB_WR_MASKED_ATOMIC_FETCH_AND_ADD ) {
1670
- aseg -> swap_add = cpu_to_be64 (wr -> wr .atomic .compare_add );
1671
- aseg -> compare = cpu_to_be64 (wr -> wr .atomic .compare_add_mask );
1672
- } else {
1673
- aseg -> swap_add = cpu_to_be64 (wr -> wr .atomic .compare_add );
1674
- aseg -> compare = 0 ;
1675
- }
1676
- }
1677
-
1678
- static void set_masked_atomic_seg (struct mlx5_wqe_masked_atomic_seg * aseg ,
1679
- struct ib_send_wr * wr )
1680
- {
1681
- aseg -> swap_add = cpu_to_be64 (wr -> wr .atomic .swap );
1682
- aseg -> swap_add_mask = cpu_to_be64 (wr -> wr .atomic .swap_mask );
1683
- aseg -> compare = cpu_to_be64 (wr -> wr .atomic .compare_add );
1684
- aseg -> compare_mask = cpu_to_be64 (wr -> wr .atomic .compare_add_mask );
1685
- }
1686
-
1687
1664
static void set_datagram_seg (struct mlx5_wqe_datagram_seg * dseg ,
1688
1665
struct ib_send_wr * wr )
1689
1666
{
@@ -2073,28 +2050,11 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2073
2050
2074
2051
case IB_WR_ATOMIC_CMP_AND_SWP :
2075
2052
case IB_WR_ATOMIC_FETCH_AND_ADD :
2076
- set_raddr_seg (seg , wr -> wr .atomic .remote_addr ,
2077
- wr -> wr .atomic .rkey );
2078
- seg += sizeof (struct mlx5_wqe_raddr_seg );
2079
-
2080
- set_atomic_seg (seg , wr );
2081
- seg += sizeof (struct mlx5_wqe_atomic_seg );
2082
-
2083
- size += (sizeof (struct mlx5_wqe_raddr_seg ) +
2084
- sizeof (struct mlx5_wqe_atomic_seg )) / 16 ;
2085
- break ;
2086
-
2087
2053
case IB_WR_MASKED_ATOMIC_CMP_AND_SWP :
2088
- set_raddr_seg (seg , wr -> wr .atomic .remote_addr ,
2089
- wr -> wr .atomic .rkey );
2090
- seg += sizeof (struct mlx5_wqe_raddr_seg );
2091
-
2092
- set_masked_atomic_seg (seg , wr );
2093
- seg += sizeof (struct mlx5_wqe_masked_atomic_seg );
2094
-
2095
- size += (sizeof (struct mlx5_wqe_raddr_seg ) +
2096
- sizeof (struct mlx5_wqe_masked_atomic_seg )) / 16 ;
2097
- break ;
2054
+ mlx5_ib_warn (dev , "Atomic operations are not supported yet\n" );
2055
+ err = - ENOSYS ;
2056
+ * bad_wr = wr ;
2057
+ goto out ;
2098
2058
2099
2059
case IB_WR_LOCAL_INV :
2100
2060
next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL ;
0 commit comments