29
29
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30
30
* SOFTWARE.
31
31
*/
32
+ #include <linux/if_ether.h>
33
+ #include <linux/if_vlan.h>
32
34
#include "qed_cxt.h"
33
35
#include "qed_hw.h"
36
+ #include "qed_ll2.h"
34
37
#include "qed_rdma.h"
35
38
#include "qed_reg_addr.h"
36
39
#include "qed_sp.h"
@@ -474,12 +477,214 @@ void qed_iwarp_resc_free(struct qed_hwfn *p_hwfn)
474
477
{
475
478
}
476
479
480
+ static int
481
+ qed_iwarp_ll2_post_rx (struct qed_hwfn * p_hwfn ,
482
+ struct qed_iwarp_ll2_buff * buf , u8 handle )
483
+ {
484
+ int rc ;
485
+
486
+ rc = qed_ll2_post_rx_buffer (p_hwfn , handle , buf -> data_phys_addr ,
487
+ (u16 )buf -> buff_size , buf , 1 );
488
+ if (rc ) {
489
+ DP_NOTICE (p_hwfn ,
490
+ "Failed to repost rx buffer to ll2 rc = %d, handle=%d\n" ,
491
+ rc , handle );
492
+ dma_free_coherent (& p_hwfn -> cdev -> pdev -> dev , buf -> buff_size ,
493
+ buf -> data , buf -> data_phys_addr );
494
+ kfree (buf );
495
+ }
496
+
497
+ return rc ;
498
+ }
499
+
500
+ static void
501
+ qed_iwarp_ll2_comp_syn_pkt (void * cxt , struct qed_ll2_comp_rx_data * data )
502
+ {
503
+ struct qed_iwarp_ll2_buff * buf = data -> cookie ;
504
+ struct qed_hwfn * p_hwfn = cxt ;
505
+
506
+ if (GET_FIELD (data -> parse_flags ,
507
+ PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED ) &&
508
+ GET_FIELD (data -> parse_flags , PARSING_AND_ERR_FLAGS_L4CHKSMERROR )) {
509
+ DP_NOTICE (p_hwfn , "Syn packet received with checksum error\n" );
510
+ goto err ;
511
+ }
512
+
513
+ /* Process SYN packet - added later on in series */
514
+
515
+ err :
516
+ qed_iwarp_ll2_post_rx (p_hwfn , buf ,
517
+ p_hwfn -> p_rdma_info -> iwarp .ll2_syn_handle );
518
+ }
519
+
520
+ static void qed_iwarp_ll2_rel_rx_pkt (void * cxt , u8 connection_handle ,
521
+ void * cookie , dma_addr_t rx_buf_addr ,
522
+ bool b_last_packet )
523
+ {
524
+ struct qed_iwarp_ll2_buff * buffer = cookie ;
525
+ struct qed_hwfn * p_hwfn = cxt ;
526
+
527
+ dma_free_coherent (& p_hwfn -> cdev -> pdev -> dev , buffer -> buff_size ,
528
+ buffer -> data , buffer -> data_phys_addr );
529
+ kfree (buffer );
530
+ }
531
+
532
+ static void qed_iwarp_ll2_comp_tx_pkt (void * cxt , u8 connection_handle ,
533
+ void * cookie , dma_addr_t first_frag_addr ,
534
+ bool b_last_fragment , bool b_last_packet )
535
+ {
536
+ struct qed_iwarp_ll2_buff * buffer = cookie ;
537
+ struct qed_hwfn * p_hwfn = cxt ;
538
+
539
+ /* this was originally an rx packet, post it back */
540
+ qed_iwarp_ll2_post_rx (p_hwfn , buffer , connection_handle );
541
+ }
542
+
543
+ static void qed_iwarp_ll2_rel_tx_pkt (void * cxt , u8 connection_handle ,
544
+ void * cookie , dma_addr_t first_frag_addr ,
545
+ bool b_last_fragment , bool b_last_packet )
546
+ {
547
+ struct qed_iwarp_ll2_buff * buffer = cookie ;
548
+ struct qed_hwfn * p_hwfn = cxt ;
549
+
550
+ if (!buffer )
551
+ return ;
552
+
553
+ dma_free_coherent (& p_hwfn -> cdev -> pdev -> dev , buffer -> buff_size ,
554
+ buffer -> data , buffer -> data_phys_addr );
555
+
556
+ kfree (buffer );
557
+ }
558
+
559
+ static int qed_iwarp_ll2_stop (struct qed_hwfn * p_hwfn , struct qed_ptt * p_ptt )
560
+ {
561
+ struct qed_iwarp_info * iwarp_info = & p_hwfn -> p_rdma_info -> iwarp ;
562
+ int rc = 0 ;
563
+
564
+ if (iwarp_info -> ll2_syn_handle != QED_IWARP_HANDLE_INVAL ) {
565
+ rc = qed_ll2_terminate_connection (p_hwfn ,
566
+ iwarp_info -> ll2_syn_handle );
567
+ if (rc )
568
+ DP_INFO (p_hwfn , "Failed to terminate syn connection\n" );
569
+
570
+ qed_ll2_release_connection (p_hwfn , iwarp_info -> ll2_syn_handle );
571
+ iwarp_info -> ll2_syn_handle = QED_IWARP_HANDLE_INVAL ;
572
+ }
573
+
574
+ qed_llh_remove_mac_filter (p_hwfn ,
575
+ p_ptt , p_hwfn -> p_rdma_info -> iwarp .mac_addr );
576
+ return rc ;
577
+ }
578
+
579
+ static int
580
+ qed_iwarp_ll2_alloc_buffers (struct qed_hwfn * p_hwfn ,
581
+ int num_rx_bufs , int buff_size , u8 ll2_handle )
582
+ {
583
+ struct qed_iwarp_ll2_buff * buffer ;
584
+ int rc = 0 ;
585
+ int i ;
586
+
587
+ for (i = 0 ; i < num_rx_bufs ; i ++ ) {
588
+ buffer = kzalloc (sizeof (* buffer ), GFP_KERNEL );
589
+ if (!buffer ) {
590
+ rc = - ENOMEM ;
591
+ break ;
592
+ }
593
+
594
+ buffer -> data = dma_alloc_coherent (& p_hwfn -> cdev -> pdev -> dev ,
595
+ buff_size ,
596
+ & buffer -> data_phys_addr ,
597
+ GFP_KERNEL );
598
+ if (!buffer -> data ) {
599
+ kfree (buffer );
600
+ rc = - ENOMEM ;
601
+ break ;
602
+ }
603
+
604
+ buffer -> buff_size = buff_size ;
605
+ rc = qed_iwarp_ll2_post_rx (p_hwfn , buffer , ll2_handle );
606
+ if (rc )
607
+ /* buffers will be deallocated by qed_ll2 */
608
+ break ;
609
+ }
610
+ return rc ;
611
+ }
612
+
613
+ #define QED_IWARP_MAX_BUF_SIZE (mtu ) \
614
+ ALIGN((mtu) + ETH_HLEN + 2 * VLAN_HLEN + 2 + ETH_CACHE_LINE_SIZE, \
615
+ ETH_CACHE_LINE_SIZE)
616
+
617
+ static int
618
+ qed_iwarp_ll2_start (struct qed_hwfn * p_hwfn ,
619
+ struct qed_rdma_start_in_params * params ,
620
+ struct qed_ptt * p_ptt )
621
+ {
622
+ struct qed_iwarp_info * iwarp_info ;
623
+ struct qed_ll2_acquire_data data ;
624
+ struct qed_ll2_cbs cbs ;
625
+ int rc = 0 ;
626
+
627
+ iwarp_info = & p_hwfn -> p_rdma_info -> iwarp ;
628
+ iwarp_info -> ll2_syn_handle = QED_IWARP_HANDLE_INVAL ;
629
+
630
+ iwarp_info -> max_mtu = params -> max_mtu ;
631
+
632
+ ether_addr_copy (p_hwfn -> p_rdma_info -> iwarp .mac_addr , params -> mac_addr );
633
+
634
+ rc = qed_llh_add_mac_filter (p_hwfn , p_ptt , params -> mac_addr );
635
+ if (rc )
636
+ return rc ;
637
+
638
+ /* Start SYN connection */
639
+ cbs .rx_comp_cb = qed_iwarp_ll2_comp_syn_pkt ;
640
+ cbs .rx_release_cb = qed_iwarp_ll2_rel_rx_pkt ;
641
+ cbs .tx_comp_cb = qed_iwarp_ll2_comp_tx_pkt ;
642
+ cbs .tx_release_cb = qed_iwarp_ll2_rel_tx_pkt ;
643
+ cbs .cookie = p_hwfn ;
644
+
645
+ memset (& data , 0 , sizeof (data ));
646
+ data .input .conn_type = QED_LL2_TYPE_IWARP ;
647
+ data .input .mtu = QED_IWARP_MAX_SYN_PKT_SIZE ;
648
+ data .input .rx_num_desc = QED_IWARP_LL2_SYN_RX_SIZE ;
649
+ data .input .tx_num_desc = QED_IWARP_LL2_SYN_TX_SIZE ;
650
+ data .input .tx_max_bds_per_packet = 1 ; /* will never be fragmented */
651
+ data .input .tx_tc = PKT_LB_TC ;
652
+ data .input .tx_dest = QED_LL2_TX_DEST_LB ;
653
+ data .p_connection_handle = & iwarp_info -> ll2_syn_handle ;
654
+ data .cbs = & cbs ;
655
+
656
+ rc = qed_ll2_acquire_connection (p_hwfn , & data );
657
+ if (rc ) {
658
+ DP_NOTICE (p_hwfn , "Failed to acquire LL2 connection\n" );
659
+ qed_llh_remove_mac_filter (p_hwfn , p_ptt , params -> mac_addr );
660
+ return rc ;
661
+ }
662
+
663
+ rc = qed_ll2_establish_connection (p_hwfn , iwarp_info -> ll2_syn_handle );
664
+ if (rc ) {
665
+ DP_NOTICE (p_hwfn , "Failed to establish LL2 connection\n" );
666
+ goto err ;
667
+ }
668
+
669
+ rc = qed_iwarp_ll2_alloc_buffers (p_hwfn ,
670
+ QED_IWARP_LL2_SYN_RX_SIZE ,
671
+ QED_IWARP_MAX_SYN_PKT_SIZE ,
672
+ iwarp_info -> ll2_syn_handle );
673
+ if (rc )
674
+ goto err ;
675
+
676
+ return rc ;
677
+ err :
678
+ qed_iwarp_ll2_stop (p_hwfn , p_ptt );
679
+
680
+ return rc ;
681
+ }
682
+
477
683
int qed_iwarp_setup (struct qed_hwfn * p_hwfn , struct qed_ptt * p_ptt ,
478
684
struct qed_rdma_start_in_params * params )
479
685
{
480
686
struct qed_iwarp_info * iwarp_info ;
481
687
u32 rcv_wnd_size ;
482
- int rc = 0 ;
483
688
484
689
iwarp_info = & p_hwfn -> p_rdma_info -> iwarp ;
485
690
@@ -499,7 +704,7 @@ int qed_iwarp_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
499
704
qed_spq_register_async_cb (p_hwfn , PROTOCOLID_IWARP ,
500
705
qed_iwarp_async_event );
501
706
502
- return rc ;
707
+ return qed_iwarp_ll2_start ( p_hwfn , params , p_ptt ) ;
503
708
}
504
709
505
710
int qed_iwarp_stop (struct qed_hwfn * p_hwfn , struct qed_ptt * p_ptt )
@@ -512,7 +717,7 @@ int qed_iwarp_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
512
717
513
718
qed_spq_unregister_async_cb (p_hwfn , PROTOCOLID_IWARP );
514
719
515
- return 0 ;
720
+ return qed_iwarp_ll2_stop ( p_hwfn , p_ptt ) ;
516
721
}
517
722
518
723
static int qed_iwarp_async_event (struct qed_hwfn * p_hwfn ,
0 commit comments