Skip to content

Commit bf23974

Browse files
Ilya Lesokhindavem330
authored andcommitted
net/mlx5e: TLS, Add Innova TLS TX offload data path
Implement the TLS tx offload data path according to the requirements of the TLS generic NIC offload infrastructure. Special metadata ethertype is used to pass information to the hardware. Signed-off-by: Ilya Lesokhin <[email protected]> Signed-off-by: Boris Pismenny <[email protected]> Acked-by: Saeed Mahameed <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent c83294b commit bf23974

File tree

10 files changed

+455
-16
lines changed

10 files changed

+455
-16
lines changed

drivers/net/ethernet/mellanox/mlx5/core/Makefile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,6 @@ mlx5_core-$(CONFIG_MLX5_CORE_IPOIB) += ipoib/ipoib.o ipoib/ethtool.o ipoib/ipoib
2828
mlx5_core-$(CONFIG_MLX5_EN_IPSEC) += en_accel/ipsec.o en_accel/ipsec_rxtx.o \
2929
en_accel/ipsec_stats.o
3030

31-
mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/tls.o
31+
mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/tls.o en_accel/tls_rxtx.o
3232

3333
CFLAGS_tracepoint.o := -I$(src)

drivers/net/ethernet/mellanox/mlx5/core/en.h

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -335,6 +335,7 @@ enum {
335335
MLX5E_SQ_STATE_RECOVERING,
336336
MLX5E_SQ_STATE_IPSEC,
337337
MLX5E_SQ_STATE_AM,
338+
MLX5E_SQ_STATE_TLS,
338339
};
339340

340341
struct mlx5e_sq_wqe_info {
@@ -830,6 +831,8 @@ void mlx5e_build_ptys2ethtool_map(void);
830831
u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
831832
void *accel_priv, select_queue_fallback_t fallback);
832833
netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
834+
netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
835+
struct mlx5e_tx_wqe *wqe, u16 pi);
833836

834837
void mlx5e_completion_event(struct mlx5_core_cq *mcq);
835838
void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
@@ -945,6 +948,18 @@ static inline bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev)
945948
MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ft_field_support.inner_ip_version));
946949
}
947950

951+
static inline void mlx5e_sq_fetch_wqe(struct mlx5e_txqsq *sq,
952+
struct mlx5e_tx_wqe **wqe,
953+
u16 *pi)
954+
{
955+
struct mlx5_wq_cyc *wq;
956+
957+
wq = &sq->wq;
958+
*pi = sq->pc & wq->sz_m1;
959+
*wqe = mlx5_wq_cyc_get_wqe(wq, *pi);
960+
memset(*wqe, 0, sizeof(**wqe));
961+
}
962+
948963
static inline
949964
struct mlx5e_tx_wqe *mlx5e_post_nop(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc)
950965
{
Lines changed: 72 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,72 @@
1+
/*
2+
* Copyright (c) 2018 Mellanox Technologies. All rights reserved.
3+
*
4+
* This software is available to you under a choice of one of two
5+
* licenses. You may choose to be licensed under the terms of the GNU
6+
* General Public License (GPL) Version 2, available from the file
7+
* COPYING in the main directory of this source tree, or the
8+
* OpenIB.org BSD license below:
9+
*
10+
* Redistribution and use in source and binary forms, with or
11+
* without modification, are permitted provided that the following
12+
* conditions are met:
13+
*
14+
* - Redistributions of source code must retain the above
15+
* copyright notice, this list of conditions and the following
16+
* disclaimer.
17+
*
18+
* - Redistributions in binary form must reproduce the above
19+
* copyright notice, this list of conditions and the following
20+
* disclaimer in the documentation and/or other materials
21+
* provided with the distribution.
22+
*
23+
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24+
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25+
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26+
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27+
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28+
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29+
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30+
* SOFTWARE.
31+
*
32+
*/
33+
34+
#ifndef __MLX5E_EN_ACCEL_H__
35+
#define __MLX5E_EN_ACCEL_H__
36+
37+
#ifdef CONFIG_MLX5_ACCEL
38+
39+
#include <linux/skbuff.h>
40+
#include <linux/netdevice.h>
41+
#include "en_accel/ipsec_rxtx.h"
42+
#include "en_accel/tls_rxtx.h"
43+
#include "en.h"
44+
45+
static inline struct sk_buff *mlx5e_accel_handle_tx(struct sk_buff *skb,
46+
struct mlx5e_txqsq *sq,
47+
struct net_device *dev,
48+
struct mlx5e_tx_wqe **wqe,
49+
u16 *pi)
50+
{
51+
#ifdef CONFIG_MLX5_EN_TLS
52+
if (sq->state & BIT(MLX5E_SQ_STATE_TLS)) {
53+
skb = mlx5e_tls_handle_tx_skb(dev, sq, skb, wqe, pi);
54+
if (unlikely(!skb))
55+
return NULL;
56+
}
57+
#endif
58+
59+
#ifdef CONFIG_MLX5_EN_IPSEC
60+
if (sq->state & BIT(MLX5E_SQ_STATE_IPSEC)) {
61+
skb = mlx5e_ipsec_handle_tx_skb(dev, *wqe, skb);
62+
if (unlikely(!skb))
63+
return NULL;
64+
}
65+
#endif
66+
67+
return skb;
68+
}
69+
70+
#endif /* CONFIG_MLX5_ACCEL */
71+
72+
#endif /* __MLX5E_EN_ACCEL_H__ */

drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -169,5 +169,7 @@ void mlx5e_tls_build_netdev(struct mlx5e_priv *priv)
169169
if (!mlx5_accel_is_tls_device(priv->mdev))
170170
return;
171171

172+
netdev->features |= NETIF_F_HW_TLS_TX;
173+
netdev->hw_features |= NETIF_F_HW_TLS_TX;
172174
netdev->tlsdev_ops = &mlx5e_tls_ops;
173175
}
Lines changed: 272 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,272 @@
1+
/*
2+
* Copyright (c) 2018 Mellanox Technologies. All rights reserved.
3+
*
4+
* This software is available to you under a choice of one of two
5+
* licenses. You may choose to be licensed under the terms of the GNU
6+
* General Public License (GPL) Version 2, available from the file
7+
* COPYING in the main directory of this source tree, or the
8+
* OpenIB.org BSD license below:
9+
*
10+
* Redistribution and use in source and binary forms, with or
11+
* without modification, are permitted provided that the following
12+
* conditions are met:
13+
*
14+
* - Redistributions of source code must retain the above
15+
* copyright notice, this list of conditions and the following
16+
* disclaimer.
17+
*
18+
* - Redistributions in binary form must reproduce the above
19+
* copyright notice, this list of conditions and the following
20+
* disclaimer in the documentation and/or other materials
21+
* provided with the distribution.
22+
*
23+
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24+
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25+
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26+
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27+
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28+
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29+
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30+
* SOFTWARE.
31+
*
32+
*/
33+
34+
#include "en_accel/tls.h"
35+
#include "en_accel/tls_rxtx.h"
36+
37+
#define SYNDROME_OFFLOAD_REQUIRED 32
38+
#define SYNDROME_SYNC 33
39+
40+
struct sync_info {
41+
u64 rcd_sn;
42+
s32 sync_len;
43+
int nr_frags;
44+
skb_frag_t frags[MAX_SKB_FRAGS];
45+
};
46+
47+
struct mlx5e_tls_metadata {
48+
/* One byte of syndrome followed by 3 bytes of swid */
49+
__be32 syndrome_swid;
50+
__be16 first_seq;
51+
/* packet type ID field */
52+
__be16 ethertype;
53+
} __packed;
54+
55+
static int mlx5e_tls_add_metadata(struct sk_buff *skb, __be32 swid)
56+
{
57+
struct mlx5e_tls_metadata *pet;
58+
struct ethhdr *eth;
59+
60+
if (skb_cow_head(skb, sizeof(struct mlx5e_tls_metadata)))
61+
return -ENOMEM;
62+
63+
eth = (struct ethhdr *)skb_push(skb, sizeof(struct mlx5e_tls_metadata));
64+
skb->mac_header -= sizeof(struct mlx5e_tls_metadata);
65+
pet = (struct mlx5e_tls_metadata *)(eth + 1);
66+
67+
memmove(skb->data, skb->data + sizeof(struct mlx5e_tls_metadata),
68+
2 * ETH_ALEN);
69+
70+
eth->h_proto = cpu_to_be16(MLX5E_METADATA_ETHER_TYPE);
71+
pet->syndrome_swid = htonl(SYNDROME_OFFLOAD_REQUIRED << 24) | swid;
72+
73+
return 0;
74+
}
75+
76+
static int mlx5e_tls_get_sync_data(struct mlx5e_tls_offload_context *context,
77+
u32 tcp_seq, struct sync_info *info)
78+
{
79+
int remaining, i = 0, ret = -EINVAL;
80+
struct tls_record_info *record;
81+
unsigned long flags;
82+
s32 sync_size;
83+
84+
spin_lock_irqsave(&context->base.lock, flags);
85+
record = tls_get_record(&context->base, tcp_seq, &info->rcd_sn);
86+
87+
if (unlikely(!record))
88+
goto out;
89+
90+
sync_size = tcp_seq - tls_record_start_seq(record);
91+
info->sync_len = sync_size;
92+
if (unlikely(sync_size < 0)) {
93+
if (tls_record_is_start_marker(record))
94+
goto done;
95+
96+
goto out;
97+
}
98+
99+
remaining = sync_size;
100+
while (remaining > 0) {
101+
info->frags[i] = record->frags[i];
102+
__skb_frag_ref(&info->frags[i]);
103+
remaining -= skb_frag_size(&info->frags[i]);
104+
105+
if (remaining < 0)
106+
skb_frag_size_add(&info->frags[i], remaining);
107+
108+
i++;
109+
}
110+
info->nr_frags = i;
111+
done:
112+
ret = 0;
113+
out:
114+
spin_unlock_irqrestore(&context->base.lock, flags);
115+
return ret;
116+
}
117+
118+
static void mlx5e_tls_complete_sync_skb(struct sk_buff *skb,
119+
struct sk_buff *nskb, u32 tcp_seq,
120+
int headln, __be64 rcd_sn)
121+
{
122+
struct mlx5e_tls_metadata *pet;
123+
u8 syndrome = SYNDROME_SYNC;
124+
struct iphdr *iph;
125+
struct tcphdr *th;
126+
int data_len, mss;
127+
128+
nskb->dev = skb->dev;
129+
skb_reset_mac_header(nskb);
130+
skb_set_network_header(nskb, skb_network_offset(skb));
131+
skb_set_transport_header(nskb, skb_transport_offset(skb));
132+
memcpy(nskb->data, skb->data, headln);
133+
memcpy(nskb->data + headln, &rcd_sn, sizeof(rcd_sn));
134+
135+
iph = ip_hdr(nskb);
136+
iph->tot_len = htons(nskb->len - skb_network_offset(nskb));
137+
th = tcp_hdr(nskb);
138+
data_len = nskb->len - headln;
139+
tcp_seq -= data_len;
140+
th->seq = htonl(tcp_seq);
141+
142+
mss = nskb->dev->mtu - (headln - skb_network_offset(nskb));
143+
skb_shinfo(nskb)->gso_size = 0;
144+
if (data_len > mss) {
145+
skb_shinfo(nskb)->gso_size = mss;
146+
skb_shinfo(nskb)->gso_segs = DIV_ROUND_UP(data_len, mss);
147+
}
148+
skb_shinfo(nskb)->gso_type = skb_shinfo(skb)->gso_type;
149+
150+
pet = (struct mlx5e_tls_metadata *)(nskb->data + sizeof(struct ethhdr));
151+
memcpy(pet, &syndrome, sizeof(syndrome));
152+
pet->first_seq = htons(tcp_seq);
153+
154+
/* MLX5 devices don't care about the checksum partial start, offset
155+
* and pseudo header
156+
*/
157+
nskb->ip_summed = CHECKSUM_PARTIAL;
158+
159+
nskb->xmit_more = 1;
160+
nskb->queue_mapping = skb->queue_mapping;
161+
}
162+
163+
static struct sk_buff *
164+
mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context *context,
165+
struct mlx5e_txqsq *sq, struct sk_buff *skb,
166+
struct mlx5e_tx_wqe **wqe,
167+
u16 *pi)
168+
{
169+
u32 tcp_seq = ntohl(tcp_hdr(skb)->seq);
170+
struct sync_info info;
171+
struct sk_buff *nskb;
172+
int linear_len = 0;
173+
int headln;
174+
int i;
175+
176+
sq->stats.tls_ooo++;
177+
178+
if (mlx5e_tls_get_sync_data(context, tcp_seq, &info))
179+
/* We might get here if a retransmission reaches the driver
180+
* after the relevant record is acked.
181+
* It should be safe to drop the packet in this case
182+
*/
183+
goto err_out;
184+
185+
if (unlikely(info.sync_len < 0)) {
186+
u32 payload;
187+
188+
headln = skb_transport_offset(skb) + tcp_hdrlen(skb);
189+
payload = skb->len - headln;
190+
if (likely(payload <= -info.sync_len))
191+
/* SKB payload doesn't require offload
192+
*/
193+
return skb;
194+
195+
netdev_err(skb->dev,
196+
"Can't offload from the middle of an SKB [seq: %X, offload_seq: %X, end_seq: %X]\n",
197+
tcp_seq, tcp_seq + payload + info.sync_len,
198+
tcp_seq + payload);
199+
goto err_out;
200+
}
201+
202+
if (unlikely(mlx5e_tls_add_metadata(skb, context->swid)))
203+
goto err_out;
204+
205+
headln = skb_transport_offset(skb) + tcp_hdrlen(skb);
206+
linear_len += headln + sizeof(info.rcd_sn);
207+
nskb = alloc_skb(linear_len, GFP_ATOMIC);
208+
if (unlikely(!nskb))
209+
goto err_out;
210+
211+
context->expected_seq = tcp_seq + skb->len - headln;
212+
skb_put(nskb, linear_len);
213+
for (i = 0; i < info.nr_frags; i++)
214+
skb_shinfo(nskb)->frags[i] = info.frags[i];
215+
216+
skb_shinfo(nskb)->nr_frags = info.nr_frags;
217+
nskb->data_len = info.sync_len;
218+
nskb->len += info.sync_len;
219+
sq->stats.tls_resync_bytes += nskb->len;
220+
mlx5e_tls_complete_sync_skb(skb, nskb, tcp_seq, headln,
221+
cpu_to_be64(info.rcd_sn));
222+
mlx5e_sq_xmit(sq, nskb, *wqe, *pi);
223+
mlx5e_sq_fetch_wqe(sq, wqe, pi);
224+
return skb;
225+
226+
err_out:
227+
dev_kfree_skb_any(skb);
228+
return NULL;
229+
}
230+
231+
struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev,
232+
struct mlx5e_txqsq *sq,
233+
struct sk_buff *skb,
234+
struct mlx5e_tx_wqe **wqe,
235+
u16 *pi)
236+
{
237+
struct mlx5e_tls_offload_context *context;
238+
struct tls_context *tls_ctx;
239+
u32 expected_seq;
240+
int datalen;
241+
u32 skb_seq;
242+
243+
if (!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk))
244+
goto out;
245+
246+
datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
247+
if (!datalen)
248+
goto out;
249+
250+
tls_ctx = tls_get_ctx(skb->sk);
251+
if (unlikely(tls_ctx->netdev != netdev))
252+
goto out;
253+
254+
skb_seq = ntohl(tcp_hdr(skb)->seq);
255+
context = mlx5e_get_tls_tx_context(tls_ctx);
256+
expected_seq = context->expected_seq;
257+
258+
if (unlikely(expected_seq != skb_seq)) {
259+
skb = mlx5e_tls_handle_ooo(context, sq, skb, wqe, pi);
260+
goto out;
261+
}
262+
263+
if (unlikely(mlx5e_tls_add_metadata(skb, context->swid))) {
264+
dev_kfree_skb_any(skb);
265+
skb = NULL;
266+
goto out;
267+
}
268+
269+
context->expected_seq = skb_seq + datalen;
270+
out:
271+
return skb;
272+
}

0 commit comments

Comments
 (0)