@@ -175,7 +175,7 @@ struct r6040_private {
175
175
struct r6040_descriptor * tx_ring ;
176
176
dma_addr_t rx_ring_dma ;
177
177
dma_addr_t tx_ring_dma ;
178
- u16 tx_free_desc , rx_free_desc , phy_addr , phy_mode ;
178
+ u16 tx_free_desc , phy_addr , phy_mode ;
179
179
u16 mcr0 , mcr1 ;
180
180
u16 switch_sig ;
181
181
struct net_device * dev ;
@@ -291,27 +291,6 @@ static void r6040_init_ring_desc(struct r6040_descriptor *desc_ring,
291
291
desc -> vndescp = desc_ring ;
292
292
}
293
293
294
- /* Allocate skb buffer for rx descriptor */
295
- static void r6040_rx_buf_alloc (struct r6040_private * lp , struct net_device * dev )
296
- {
297
- struct r6040_descriptor * descptr ;
298
-
299
- descptr = lp -> rx_insert_ptr ;
300
- while (lp -> rx_free_desc < RX_DCNT ) {
301
- descptr -> skb_ptr = netdev_alloc_skb (dev , MAX_BUF_SIZE );
302
-
303
- if (!descptr -> skb_ptr )
304
- break ;
305
- descptr -> buf = cpu_to_le32 (pci_map_single (lp -> pdev ,
306
- descptr -> skb_ptr -> data ,
307
- MAX_BUF_SIZE , PCI_DMA_FROMDEVICE ));
308
- descptr -> status = 0x8000 ;
309
- descptr = descptr -> vndescp ;
310
- lp -> rx_free_desc ++ ;
311
- }
312
- lp -> rx_insert_ptr = descptr ;
313
- }
314
-
315
294
static void r6040_init_txbufs (struct net_device * dev )
316
295
{
317
296
struct r6040_private * lp = netdev_priv (dev );
@@ -556,71 +535,72 @@ static int r6040_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
556
535
static int r6040_rx (struct net_device * dev , int limit )
557
536
{
558
537
struct r6040_private * priv = netdev_priv (dev );
559
- int count ;
560
- void __iomem * ioaddr = priv -> base ;
538
+ struct r6040_descriptor * descptr = priv -> rx_remove_ptr ;
539
+ struct sk_buff * skb_ptr , * new_skb ;
540
+ int count = 0 ;
561
541
u16 err ;
562
542
563
- for (count = 0 ; count < limit ; ++ count ) {
564
- struct r6040_descriptor * descptr = priv -> rx_remove_ptr ;
565
- struct sk_buff * skb_ptr ;
566
-
567
- descptr = priv -> rx_remove_ptr ;
568
-
569
- /* Check for errors */
570
- err = ioread16 (ioaddr + MLSR );
571
- if (err & 0x0400 )
572
- dev -> stats .rx_errors ++ ;
573
- /* RX FIFO over-run */
574
- if (err & 0x8000 )
575
- dev -> stats .rx_fifo_errors ++ ;
576
- /* RX descriptor unavailable */
577
- if (err & 0x0080 )
578
- dev -> stats .rx_frame_errors ++ ;
579
- /* Received packet with length over buffer lenght */
580
- if (err & 0x0020 )
581
- dev -> stats .rx_over_errors ++ ;
582
- /* Received packet with too long or short */
583
- if (err & (0x0010 | 0x0008 ))
584
- dev -> stats .rx_length_errors ++ ;
585
- /* Received packet with CRC errors */
586
- if (err & 0x0004 ) {
587
- spin_lock (& priv -> lock );
588
- dev -> stats .rx_crc_errors ++ ;
589
- spin_unlock (& priv -> lock );
590
- }
591
-
592
- while (priv -> rx_free_desc ) {
593
- /* No RX packet */
594
- if (descptr -> status & 0x8000 )
595
- break ;
596
- skb_ptr = descptr -> skb_ptr ;
597
- if (!skb_ptr ) {
598
- printk (KERN_ERR "%s: Inconsistent RX"
599
- "descriptor chain\n" ,
600
- dev -> name );
601
- break ;
543
+ /* Limit not reached and the descriptor belongs to the CPU */
544
+ while (count < limit && !(descptr -> status & 0x8000 )) {
545
+ /* Read the descriptor status */
546
+ err = descptr -> status ;
547
+ /* Global error status set */
548
+ if (err & 0x0800 ) {
549
+ /* RX dribble */
550
+ if (err & 0x0400 )
551
+ dev -> stats .rx_frame_errors ++ ;
552
+ /* Buffer lenght exceeded */
553
+ if (err & 0x0200 )
554
+ dev -> stats .rx_length_errors ++ ;
555
+ /* Packet too long */
556
+ if (err & 0x0100 )
557
+ dev -> stats .rx_length_errors ++ ;
558
+ /* Packet < 64 bytes */
559
+ if (err & 0x0080 )
560
+ dev -> stats .rx_length_errors ++ ;
561
+ /* CRC error */
562
+ if (err & 0x0040 ) {
563
+ spin_lock (& priv -> lock );
564
+ dev -> stats .rx_crc_errors ++ ;
565
+ spin_unlock (& priv -> lock );
602
566
}
603
- descptr -> skb_ptr = NULL ;
604
- skb_ptr -> dev = priv -> dev ;
605
- /* Do not count the CRC */
606
- skb_put (skb_ptr , descptr -> len - 4 );
607
- pci_unmap_single (priv -> pdev , le32_to_cpu (descptr -> buf ),
608
- MAX_BUF_SIZE , PCI_DMA_FROMDEVICE );
609
- skb_ptr -> protocol = eth_type_trans (skb_ptr , priv -> dev );
610
- /* Send to upper layer */
611
- netif_receive_skb (skb_ptr );
612
- dev -> last_rx = jiffies ;
613
- dev -> stats .rx_packets ++ ;
614
- dev -> stats .rx_bytes += descptr -> len ;
615
- /* To next descriptor */
616
- descptr = descptr -> vndescp ;
617
- priv -> rx_free_desc -- ;
567
+ goto next_descr ;
568
+ }
569
+
570
+ /* Packet successfully received */
571
+ new_skb = netdev_alloc_skb (dev , MAX_BUF_SIZE );
572
+ if (!new_skb ) {
573
+ dev -> stats .rx_dropped ++ ;
574
+ goto next_descr ;
618
575
}
619
- priv -> rx_remove_ptr = descptr ;
576
+ skb_ptr = descptr -> skb_ptr ;
577
+ skb_ptr -> dev = priv -> dev ;
578
+
579
+ /* Do not count the CRC */
580
+ skb_put (skb_ptr , descptr -> len - 4 );
581
+ pci_unmap_single (priv -> pdev , le32_to_cpu (descptr -> buf ),
582
+ MAX_BUF_SIZE , PCI_DMA_FROMDEVICE );
583
+ skb_ptr -> protocol = eth_type_trans (skb_ptr , priv -> dev );
584
+
585
+ /* Send to upper layer */
586
+ netif_receive_skb (skb_ptr );
587
+ dev -> last_rx = jiffies ;
588
+ dev -> stats .rx_packets ++ ;
589
+ dev -> stats .rx_bytes += descptr -> len - 4 ;
590
+
591
+ /* put new skb into descriptor */
592
+ descptr -> skb_ptr = new_skb ;
593
+ descptr -> buf = cpu_to_le32 (pci_map_single (priv -> pdev ,
594
+ descptr -> skb_ptr -> data ,
595
+ MAX_BUF_SIZE , PCI_DMA_FROMDEVICE ));
596
+
597
+ next_descr :
598
+ /* put the descriptor back to the MAC */
599
+ descptr -> status = 0x8000 ;
600
+ descptr = descptr -> vndescp ;
601
+ count ++ ;
620
602
}
621
- /* Allocate new RX buffer */
622
- if (priv -> rx_free_desc < RX_DCNT )
623
- r6040_rx_buf_alloc (priv , priv -> dev );
603
+ priv -> rx_remove_ptr = descptr ;
624
604
625
605
return count ;
626
606
}
0 commit comments