Skip to content

Commit 2760f5a

Browse files
committed
Merge branch 'aquantia-fixes'
Igor Russkikh says: ==================== aquantia: Atlantic driver bugfixes und improvements This series contains bugfixes for aQuantia Atlantic driver. Changes in v2: Review comments applied: - min_mtu set removed - extra mtu range check is removed - err codes handling improved ==================== Signed-off-by: David S. Miller <[email protected]>
2 parents 62b982e + c754568 commit 2760f5a

File tree

8 files changed

+129
-98
lines changed

8 files changed

+129
-98
lines changed

drivers/net/ethernet/aquantia/atlantic/aq_cfg.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -51,6 +51,10 @@
5151

5252
#define AQ_CFG_SKB_FRAGS_MAX 32U
5353

54+
/* Number of descriptors available in one ring to resume this ring queue
55+
*/
56+
#define AQ_CFG_RESTART_DESC_THRES (AQ_CFG_SKB_FRAGS_MAX * 2)
57+
5458
#define AQ_CFG_NAPI_WEIGHT 64U
5559

5660
#define AQ_CFG_MULTICAST_ADDRESS_MAX 32U

drivers/net/ethernet/aquantia/atlantic/aq_nic.c

Lines changed: 68 additions & 77 deletions
Original file line numberDiff line numberDiff line change
@@ -119,6 +119,35 @@ int aq_nic_cfg_start(struct aq_nic_s *self)
119119
return 0;
120120
}
121121

122+
static int aq_nic_update_link_status(struct aq_nic_s *self)
123+
{
124+
int err = self->aq_hw_ops.hw_get_link_status(self->aq_hw);
125+
126+
if (err)
127+
return err;
128+
129+
if (self->link_status.mbps != self->aq_hw->aq_link_status.mbps)
130+
pr_info("%s: link change old %d new %d\n",
131+
AQ_CFG_DRV_NAME, self->link_status.mbps,
132+
self->aq_hw->aq_link_status.mbps);
133+
134+
self->link_status = self->aq_hw->aq_link_status;
135+
if (!netif_carrier_ok(self->ndev) && self->link_status.mbps) {
136+
aq_utils_obj_set(&self->header.flags,
137+
AQ_NIC_FLAG_STARTED);
138+
aq_utils_obj_clear(&self->header.flags,
139+
AQ_NIC_LINK_DOWN);
140+
netif_carrier_on(self->ndev);
141+
netif_tx_wake_all_queues(self->ndev);
142+
}
143+
if (netif_carrier_ok(self->ndev) && !self->link_status.mbps) {
144+
netif_carrier_off(self->ndev);
145+
netif_tx_disable(self->ndev);
146+
aq_utils_obj_set(&self->header.flags, AQ_NIC_LINK_DOWN);
147+
}
148+
return 0;
149+
}
150+
122151
static void aq_nic_service_timer_cb(unsigned long param)
123152
{
124153
struct aq_nic_s *self = (struct aq_nic_s *)param;
@@ -131,26 +160,13 @@ static void aq_nic_service_timer_cb(unsigned long param)
131160
if (aq_utils_obj_test(&self->header.flags, AQ_NIC_FLAGS_IS_NOT_READY))
132161
goto err_exit;
133162

134-
err = self->aq_hw_ops.hw_get_link_status(self->aq_hw);
135-
if (err < 0)
163+
err = aq_nic_update_link_status(self);
164+
if (err)
136165
goto err_exit;
137166

138-
self->link_status = self->aq_hw->aq_link_status;
139-
140167
self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw,
141168
self->aq_nic_cfg.is_interrupt_moderation);
142169

143-
if (self->link_status.mbps) {
144-
aq_utils_obj_set(&self->header.flags,
145-
AQ_NIC_FLAG_STARTED);
146-
aq_utils_obj_clear(&self->header.flags,
147-
AQ_NIC_LINK_DOWN);
148-
netif_carrier_on(self->ndev);
149-
} else {
150-
netif_carrier_off(self->ndev);
151-
aq_utils_obj_set(&self->header.flags, AQ_NIC_LINK_DOWN);
152-
}
153-
154170
memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s));
155171
memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s));
156172
for (i = AQ_DIMOF(self->aq_vec); i--;) {
@@ -214,7 +230,6 @@ struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,
214230
SET_NETDEV_DEV(ndev, dev);
215231

216232
ndev->if_port = port;
217-
ndev->min_mtu = ETH_MIN_MTU;
218233
self->ndev = ndev;
219234

220235
self->aq_pci_func = aq_pci_func;
@@ -241,7 +256,6 @@ struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,
241256
int aq_nic_ndev_register(struct aq_nic_s *self)
242257
{
243258
int err = 0;
244-
unsigned int i = 0U;
245259

246260
if (!self->ndev) {
247261
err = -EINVAL;
@@ -263,8 +277,7 @@ int aq_nic_ndev_register(struct aq_nic_s *self)
263277

264278
netif_carrier_off(self->ndev);
265279

266-
for (i = AQ_CFG_VECS_MAX; i--;)
267-
aq_nic_ndev_queue_stop(self, i);
280+
netif_tx_disable(self->ndev);
268281

269282
err = register_netdev(self->ndev);
270283
if (err < 0)
@@ -283,6 +296,7 @@ int aq_nic_ndev_init(struct aq_nic_s *self)
283296
self->ndev->features = aq_hw_caps->hw_features;
284297
self->ndev->priv_flags = aq_hw_caps->hw_priv_flags;
285298
self->ndev->mtu = aq_nic_cfg->mtu - ETH_HLEN;
299+
self->ndev->max_mtu = self->aq_hw_caps.mtu - ETH_FCS_LEN - ETH_HLEN;
286300

287301
return 0;
288302
}
@@ -318,12 +332,8 @@ struct aq_nic_s *aq_nic_alloc_hot(struct net_device *ndev)
318332
err = -EINVAL;
319333
goto err_exit;
320334
}
321-
if (netif_running(ndev)) {
322-
unsigned int i;
323-
324-
for (i = AQ_CFG_VECS_MAX; i--;)
325-
netif_stop_subqueue(ndev, i);
326-
}
335+
if (netif_running(ndev))
336+
netif_tx_disable(ndev);
327337

328338
for (self->aq_vecs = 0; self->aq_vecs < self->aq_nic_cfg.vecs;
329339
self->aq_vecs++) {
@@ -383,16 +393,6 @@ int aq_nic_init(struct aq_nic_s *self)
383393
return err;
384394
}
385395

386-
void aq_nic_ndev_queue_start(struct aq_nic_s *self, unsigned int idx)
387-
{
388-
netif_start_subqueue(self->ndev, idx);
389-
}
390-
391-
void aq_nic_ndev_queue_stop(struct aq_nic_s *self, unsigned int idx)
392-
{
393-
netif_stop_subqueue(self->ndev, idx);
394-
}
395-
396396
int aq_nic_start(struct aq_nic_s *self)
397397
{
398398
struct aq_vec_s *aq_vec = NULL;
@@ -451,10 +451,6 @@ int aq_nic_start(struct aq_nic_s *self)
451451
goto err_exit;
452452
}
453453

454-
for (i = 0U, aq_vec = self->aq_vec[0];
455-
self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
456-
aq_nic_ndev_queue_start(self, i);
457-
458454
err = netif_set_real_num_tx_queues(self->ndev, self->aq_vecs);
459455
if (err < 0)
460456
goto err_exit;
@@ -463,6 +459,8 @@ int aq_nic_start(struct aq_nic_s *self)
463459
if (err < 0)
464460
goto err_exit;
465461

462+
netif_tx_start_all_queues(self->ndev);
463+
466464
err_exit:
467465
return err;
468466
}
@@ -475,6 +473,7 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self,
475473
unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
476474
unsigned int frag_count = 0U;
477475
unsigned int dx = ring->sw_tail;
476+
struct aq_ring_buff_s *first = NULL;
478477
struct aq_ring_buff_s *dx_buff = &ring->buff_ring[dx];
479478

480479
if (unlikely(skb_is_gso(skb))) {
@@ -485,6 +484,7 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self,
485484
dx_buff->len_l4 = tcp_hdrlen(skb);
486485
dx_buff->mss = skb_shinfo(skb)->gso_size;
487486
dx_buff->is_txc = 1U;
487+
dx_buff->eop_index = 0xffffU;
488488

489489
dx_buff->is_ipv6 =
490490
(ip_hdr(skb)->version == 6) ? 1U : 0U;
@@ -504,6 +504,7 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self,
504504
if (unlikely(dma_mapping_error(aq_nic_get_dev(self), dx_buff->pa)))
505505
goto exit;
506506

507+
first = dx_buff;
507508
dx_buff->len_pkt = skb->len;
508509
dx_buff->is_sop = 1U;
509510
dx_buff->is_mapped = 1U;
@@ -532,40 +533,46 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self,
532533

533534
for (; nr_frags--; ++frag_count) {
534535
unsigned int frag_len = 0U;
536+
unsigned int buff_offset = 0U;
537+
unsigned int buff_size = 0U;
535538
dma_addr_t frag_pa;
536539
skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_count];
537540

538541
frag_len = skb_frag_size(frag);
539-
frag_pa = skb_frag_dma_map(aq_nic_get_dev(self), frag, 0,
540-
frag_len, DMA_TO_DEVICE);
541542

542-
if (unlikely(dma_mapping_error(aq_nic_get_dev(self), frag_pa)))
543-
goto mapping_error;
543+
while (frag_len) {
544+
if (frag_len > AQ_CFG_TX_FRAME_MAX)
545+
buff_size = AQ_CFG_TX_FRAME_MAX;
546+
else
547+
buff_size = frag_len;
548+
549+
frag_pa = skb_frag_dma_map(aq_nic_get_dev(self),
550+
frag,
551+
buff_offset,
552+
buff_size,
553+
DMA_TO_DEVICE);
554+
555+
if (unlikely(dma_mapping_error(aq_nic_get_dev(self),
556+
frag_pa)))
557+
goto mapping_error;
544558

545-
while (frag_len > AQ_CFG_TX_FRAME_MAX) {
546559
dx = aq_ring_next_dx(ring, dx);
547560
dx_buff = &ring->buff_ring[dx];
548561

549562
dx_buff->flags = 0U;
550-
dx_buff->len = AQ_CFG_TX_FRAME_MAX;
563+
dx_buff->len = buff_size;
551564
dx_buff->pa = frag_pa;
552565
dx_buff->is_mapped = 1U;
566+
dx_buff->eop_index = 0xffffU;
567+
568+
frag_len -= buff_size;
569+
buff_offset += buff_size;
553570

554-
frag_len -= AQ_CFG_TX_FRAME_MAX;
555-
frag_pa += AQ_CFG_TX_FRAME_MAX;
556571
++ret;
557572
}
558-
559-
dx = aq_ring_next_dx(ring, dx);
560-
dx_buff = &ring->buff_ring[dx];
561-
562-
dx_buff->flags = 0U;
563-
dx_buff->len = frag_len;
564-
dx_buff->pa = frag_pa;
565-
dx_buff->is_mapped = 1U;
566-
++ret;
567573
}
568574

575+
first->eop_index = dx;
569576
dx_buff->is_eop = 1U;
570577
dx_buff->skb = skb;
571578
goto exit;
@@ -602,7 +609,6 @@ int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
602609
unsigned int vec = skb->queue_mapping % self->aq_nic_cfg.vecs;
603610
unsigned int tc = 0U;
604611
int err = NETDEV_TX_OK;
605-
bool is_nic_in_bad_state;
606612

607613
frags = skb_shinfo(skb)->nr_frags + 1;
608614

@@ -613,13 +619,10 @@ int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
613619
goto err_exit;
614620
}
615621

616-
is_nic_in_bad_state = aq_utils_obj_test(&self->header.flags,
617-
AQ_NIC_FLAGS_IS_NOT_TX_READY) ||
618-
(aq_ring_avail_dx(ring) <
619-
AQ_CFG_SKB_FRAGS_MAX);
622+
aq_ring_update_queue_state(ring);
620623

621-
if (is_nic_in_bad_state) {
622-
aq_nic_ndev_queue_stop(self, ring->idx);
624+
/* Above status update may stop the queue. Check this. */
625+
if (__netif_subqueue_stopped(self->ndev, ring->idx)) {
623626
err = NETDEV_TX_BUSY;
624627
goto err_exit;
625628
}
@@ -631,9 +634,6 @@ int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
631634
ring,
632635
frags);
633636
if (err >= 0) {
634-
if (aq_ring_avail_dx(ring) < AQ_CFG_SKB_FRAGS_MAX + 1)
635-
aq_nic_ndev_queue_stop(self, ring->idx);
636-
637637
++ring->stats.tx.packets;
638638
ring->stats.tx.bytes += skb->len;
639639
}
@@ -693,16 +693,9 @@ int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev)
693693

694694
int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu)
695695
{
696-
int err = 0;
697-
698-
if (new_mtu > self->aq_hw_caps.mtu) {
699-
err = -EINVAL;
700-
goto err_exit;
701-
}
702696
self->aq_nic_cfg.mtu = new_mtu;
703697

704-
err_exit:
705-
return err;
698+
return 0;
706699
}
707700

708701
int aq_nic_set_mac(struct aq_nic_s *self, struct net_device *ndev)
@@ -905,9 +898,7 @@ int aq_nic_stop(struct aq_nic_s *self)
905898
struct aq_vec_s *aq_vec = NULL;
906899
unsigned int i = 0U;
907900

908-
for (i = 0U, aq_vec = self->aq_vec[0];
909-
self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
910-
aq_nic_ndev_queue_stop(self, i);
901+
netif_tx_disable(self->ndev);
911902

912903
del_timer_sync(&self->service_timer);
913904

drivers/net/ethernet/aquantia/atlantic/aq_nic.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -83,8 +83,6 @@ struct net_device *aq_nic_get_ndev(struct aq_nic_s *self);
8383
int aq_nic_init(struct aq_nic_s *self);
8484
int aq_nic_cfg_start(struct aq_nic_s *self);
8585
int aq_nic_ndev_register(struct aq_nic_s *self);
86-
void aq_nic_ndev_queue_start(struct aq_nic_s *self, unsigned int idx);
87-
void aq_nic_ndev_queue_stop(struct aq_nic_s *self, unsigned int idx);
8886
void aq_nic_ndev_free(struct aq_nic_s *self);
8987
int aq_nic_start(struct aq_nic_s *self);
9088
int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb);

drivers/net/ethernet/aquantia/atlantic/aq_ring.c

Lines changed: 45 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -104,6 +104,38 @@ int aq_ring_init(struct aq_ring_s *self)
104104
return 0;
105105
}
106106

107+
static inline bool aq_ring_dx_in_range(unsigned int h, unsigned int i,
108+
unsigned int t)
109+
{
110+
return (h < t) ? ((h < i) && (i < t)) : ((h < i) || (i < t));
111+
}
112+
113+
void aq_ring_update_queue_state(struct aq_ring_s *ring)
114+
{
115+
if (aq_ring_avail_dx(ring) <= AQ_CFG_SKB_FRAGS_MAX)
116+
aq_ring_queue_stop(ring);
117+
else if (aq_ring_avail_dx(ring) > AQ_CFG_RESTART_DESC_THRES)
118+
aq_ring_queue_wake(ring);
119+
}
120+
121+
void aq_ring_queue_wake(struct aq_ring_s *ring)
122+
{
123+
struct net_device *ndev = aq_nic_get_ndev(ring->aq_nic);
124+
125+
if (__netif_subqueue_stopped(ndev, ring->idx)) {
126+
netif_wake_subqueue(ndev, ring->idx);
127+
ring->stats.tx.queue_restarts++;
128+
}
129+
}
130+
131+
void aq_ring_queue_stop(struct aq_ring_s *ring)
132+
{
133+
struct net_device *ndev = aq_nic_get_ndev(ring->aq_nic);
134+
135+
if (!__netif_subqueue_stopped(ndev, ring->idx))
136+
netif_stop_subqueue(ndev, ring->idx);
137+
}
138+
107139
void aq_ring_tx_clean(struct aq_ring_s *self)
108140
{
109141
struct device *dev = aq_nic_get_dev(self->aq_nic);
@@ -113,23 +145,28 @@ void aq_ring_tx_clean(struct aq_ring_s *self)
113145
struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
114146

115147
if (likely(buff->is_mapped)) {
116-
if (unlikely(buff->is_sop))
148+
if (unlikely(buff->is_sop)) {
149+
if (!buff->is_eop &&
150+
buff->eop_index != 0xffffU &&
151+
(!aq_ring_dx_in_range(self->sw_head,
152+
buff->eop_index,
153+
self->hw_head)))
154+
break;
155+
117156
dma_unmap_single(dev, buff->pa, buff->len,
118157
DMA_TO_DEVICE);
119-
else
158+
} else {
120159
dma_unmap_page(dev, buff->pa, buff->len,
121160
DMA_TO_DEVICE);
161+
}
122162
}
123163

124164
if (unlikely(buff->is_eop))
125165
dev_kfree_skb_any(buff->skb);
126-
}
127-
}
128166

129-
static inline unsigned int aq_ring_dx_in_range(unsigned int h, unsigned int i,
130-
unsigned int t)
131-
{
132-
return (h < t) ? ((h < i) && (i < t)) : ((h < i) || (i < t));
167+
buff->pa = 0U;
168+
buff->eop_index = 0xffffU;
169+
}
133170
}
134171

135172
#define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info))

0 commit comments

Comments
 (0)