@@ -134,11 +134,9 @@ static int smc_release(struct socket *sock)
134
134
smc = smc_sk (sk );
135
135
136
136
/* cleanup for a dangling non-blocking connect */
137
- if (smc -> connect_info && sk -> sk_state == SMC_INIT )
137
+ if (smc -> connect_nonblock && sk -> sk_state == SMC_INIT )
138
138
tcp_abort (smc -> clcsock -> sk , ECONNABORTED );
139
139
flush_work (& smc -> connect_work );
140
- kfree (smc -> connect_info );
141
- smc -> connect_info = NULL ;
142
140
143
141
if (sk -> sk_state == SMC_LISTEN )
144
142
/* smc_close_non_accepted() is called and acquires
@@ -452,6 +450,7 @@ static int smc_connect_fallback(struct smc_sock *smc, int reason_code)
452
450
smc -> use_fallback = true;
453
451
smc -> fallback_rsn = reason_code ;
454
452
smc_copy_sock_settings_to_clc (smc );
453
+ smc -> connect_nonblock = 0 ;
455
454
if (smc -> sk .sk_state == SMC_INIT )
456
455
smc -> sk .sk_state = SMC_ACTIVE ;
457
456
return 0 ;
@@ -491,6 +490,7 @@ static int smc_connect_abort(struct smc_sock *smc, int reason_code,
491
490
mutex_unlock (& smc_client_lgr_pending );
492
491
493
492
smc_conn_free (& smc -> conn );
493
+ smc -> connect_nonblock = 0 ;
494
494
return reason_code ;
495
495
}
496
496
@@ -633,6 +633,7 @@ static int smc_connect_rdma(struct smc_sock *smc,
633
633
mutex_unlock (& smc_client_lgr_pending );
634
634
635
635
smc_copy_sock_settings_to_clc (smc );
636
+ smc -> connect_nonblock = 0 ;
636
637
if (smc -> sk .sk_state == SMC_INIT )
637
638
smc -> sk .sk_state = SMC_ACTIVE ;
638
639
@@ -671,6 +672,7 @@ static int smc_connect_ism(struct smc_sock *smc,
671
672
mutex_unlock (& smc_server_lgr_pending );
672
673
673
674
smc_copy_sock_settings_to_clc (smc );
675
+ smc -> connect_nonblock = 0 ;
674
676
if (smc -> sk .sk_state == SMC_INIT )
675
677
smc -> sk .sk_state = SMC_ACTIVE ;
676
678
@@ -756,17 +758,30 @@ static void smc_connect_work(struct work_struct *work)
756
758
{
757
759
struct smc_sock * smc = container_of (work , struct smc_sock ,
758
760
connect_work );
759
- int rc ;
761
+ long timeo = smc -> sk .sk_sndtimeo ;
762
+ int rc = 0 ;
760
763
761
- lock_sock ( & smc -> sk );
762
- rc = kernel_connect ( smc -> clcsock , & smc -> connect_info -> addr ,
763
- smc -> connect_info -> alen , smc -> connect_info -> flags );
764
+ if (! timeo )
765
+ timeo = MAX_SCHEDULE_TIMEOUT ;
766
+ lock_sock ( smc -> clcsock -> sk );
764
767
if (smc -> clcsock -> sk -> sk_err ) {
765
768
smc -> sk .sk_err = smc -> clcsock -> sk -> sk_err ;
766
- goto out ;
767
- }
768
- if (rc < 0 ) {
769
- smc -> sk .sk_err = - rc ;
769
+ } else if ((1 << smc -> clcsock -> sk -> sk_state ) &
770
+ (TCPF_SYN_SENT | TCP_SYN_RECV )) {
771
+ rc = sk_stream_wait_connect (smc -> clcsock -> sk , & timeo );
772
+ if ((rc == - EPIPE ) &&
773
+ ((1 << smc -> clcsock -> sk -> sk_state ) &
774
+ (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT )))
775
+ rc = 0 ;
776
+ }
777
+ release_sock (smc -> clcsock -> sk );
778
+ lock_sock (& smc -> sk );
779
+ if (rc != 0 || smc -> sk .sk_err ) {
780
+ smc -> sk .sk_state = SMC_CLOSED ;
781
+ if (rc == - EPIPE || rc == - EAGAIN )
782
+ smc -> sk .sk_err = EPIPE ;
783
+ else if (signal_pending (current ))
784
+ smc -> sk .sk_err = - sock_intr_errno (timeo );
770
785
goto out ;
771
786
}
772
787
@@ -779,8 +794,6 @@ static void smc_connect_work(struct work_struct *work)
779
794
smc -> sk .sk_state_change (& smc -> sk );
780
795
else
781
796
smc -> sk .sk_write_space (& smc -> sk );
782
- kfree (smc -> connect_info );
783
- smc -> connect_info = NULL ;
784
797
release_sock (& smc -> sk );
785
798
}
786
799
@@ -813,26 +826,18 @@ static int smc_connect(struct socket *sock, struct sockaddr *addr,
813
826
814
827
smc_copy_sock_settings_to_clc (smc );
815
828
tcp_sk (smc -> clcsock -> sk )-> syn_smc = 1 ;
829
+ if (smc -> connect_nonblock ) {
830
+ rc = - EALREADY ;
831
+ goto out ;
832
+ }
833
+ rc = kernel_connect (smc -> clcsock , addr , alen , flags );
834
+ if (rc && rc != - EINPROGRESS )
835
+ goto out ;
816
836
if (flags & O_NONBLOCK ) {
817
- if (smc -> connect_info ) {
818
- rc = - EALREADY ;
819
- goto out ;
820
- }
821
- smc -> connect_info = kzalloc (alen + 2 * sizeof (int ), GFP_KERNEL );
822
- if (!smc -> connect_info ) {
823
- rc = - ENOMEM ;
824
- goto out ;
825
- }
826
- smc -> connect_info -> alen = alen ;
827
- smc -> connect_info -> flags = flags ^ O_NONBLOCK ;
828
- memcpy (& smc -> connect_info -> addr , addr , alen );
829
- schedule_work (& smc -> connect_work );
837
+ if (schedule_work (& smc -> connect_work ))
838
+ smc -> connect_nonblock = 1 ;
830
839
rc = - EINPROGRESS ;
831
840
} else {
832
- rc = kernel_connect (smc -> clcsock , addr , alen , flags );
833
- if (rc )
834
- goto out ;
835
-
836
841
rc = __smc_connect (smc );
837
842
if (rc < 0 )
838
843
goto out ;
@@ -1571,8 +1576,8 @@ static __poll_t smc_poll(struct file *file, struct socket *sock,
1571
1576
poll_table * wait )
1572
1577
{
1573
1578
struct sock * sk = sock -> sk ;
1574
- __poll_t mask = 0 ;
1575
1579
struct smc_sock * smc ;
1580
+ __poll_t mask = 0 ;
1576
1581
1577
1582
if (!sk )
1578
1583
return EPOLLNVAL ;
@@ -1582,8 +1587,6 @@ static __poll_t smc_poll(struct file *file, struct socket *sock,
1582
1587
/* delegate to CLC child sock */
1583
1588
mask = smc -> clcsock -> ops -> poll (file , smc -> clcsock , wait );
1584
1589
sk -> sk_err = smc -> clcsock -> sk -> sk_err ;
1585
- if (sk -> sk_err )
1586
- mask |= EPOLLERR ;
1587
1590
} else {
1588
1591
if (sk -> sk_state != SMC_CLOSED )
1589
1592
sock_poll_wait (file , sock , wait );
@@ -1594,9 +1597,14 @@ static __poll_t smc_poll(struct file *file, struct socket *sock,
1594
1597
mask |= EPOLLHUP ;
1595
1598
if (sk -> sk_state == SMC_LISTEN ) {
1596
1599
/* woken up by sk_data_ready in smc_listen_work() */
1597
- mask = smc_accept_poll (sk );
1600
+ mask |= smc_accept_poll (sk );
1601
+ } else if (smc -> use_fallback ) { /* as result of connect_work()*/
1602
+ mask |= smc -> clcsock -> ops -> poll (file , smc -> clcsock ,
1603
+ wait );
1604
+ sk -> sk_err = smc -> clcsock -> sk -> sk_err ;
1598
1605
} else {
1599
- if (atomic_read (& smc -> conn .sndbuf_space ) ||
1606
+ if ((sk -> sk_state != SMC_INIT &&
1607
+ atomic_read (& smc -> conn .sndbuf_space )) ||
1600
1608
sk -> sk_shutdown & SEND_SHUTDOWN ) {
1601
1609
mask |= EPOLLOUT | EPOLLWRNORM ;
1602
1610
} else {
0 commit comments