|
117 | 117 | static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr);
|
118 | 118 | static void vsock_sk_destruct(struct sock *sk);
|
119 | 119 | static int vsock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
|
| 120 | +static void vsock_close(struct sock *sk, long timeout); |
120 | 121 |
|
121 | 122 | /* Protocol family. */
|
122 | 123 | struct proto vsock_proto = {
|
123 | 124 | .name = "AF_VSOCK",
|
124 | 125 | .owner = THIS_MODULE,
|
125 | 126 | .obj_size = sizeof(struct vsock_sock),
|
| 127 | + .close = vsock_close, |
126 | 128 | #ifdef CONFIG_BPF_SYSCALL
|
127 | 129 | .psock_update_sk_prot = vsock_bpf_update_proto,
|
128 | 130 | #endif
|
@@ -797,39 +799,37 @@ static bool sock_type_connectible(u16 type)
|
797 | 799 |
|
798 | 800 | static void __vsock_release(struct sock *sk, int level)
|
799 | 801 | {
|
800 |
| - if (sk) { |
801 |
| - struct sock *pending; |
802 |
| - struct vsock_sock *vsk; |
803 |
| - |
804 |
| - vsk = vsock_sk(sk); |
805 |
| - pending = NULL; /* Compiler warning. */ |
| 802 | + struct vsock_sock *vsk; |
| 803 | + struct sock *pending; |
806 | 804 |
|
807 |
| - /* When "level" is SINGLE_DEPTH_NESTING, use the nested |
808 |
| - * version to avoid the warning "possible recursive locking |
809 |
| - * detected". When "level" is 0, lock_sock_nested(sk, level) |
810 |
| - * is the same as lock_sock(sk). |
811 |
| - */ |
812 |
| - lock_sock_nested(sk, level); |
| 805 | + vsk = vsock_sk(sk); |
| 806 | + pending = NULL; /* Compiler warning. */ |
813 | 807 |
|
814 |
| - if (vsk->transport) |
815 |
| - vsk->transport->release(vsk); |
816 |
| - else if (sock_type_connectible(sk->sk_type)) |
817 |
| - vsock_remove_sock(vsk); |
| 808 | + /* When "level" is SINGLE_DEPTH_NESTING, use the nested |
| 809 | + * version to avoid the warning "possible recursive locking |
| 810 | + * detected". When "level" is 0, lock_sock_nested(sk, level) |
| 811 | + * is the same as lock_sock(sk). |
| 812 | + */ |
| 813 | + lock_sock_nested(sk, level); |
818 | 814 |
|
819 |
| - sock_orphan(sk); |
820 |
| - sk->sk_shutdown = SHUTDOWN_MASK; |
| 815 | + if (vsk->transport) |
| 816 | + vsk->transport->release(vsk); |
| 817 | + else if (sock_type_connectible(sk->sk_type)) |
| 818 | + vsock_remove_sock(vsk); |
821 | 819 |
|
822 |
| - skb_queue_purge(&sk->sk_receive_queue); |
| 820 | + sock_orphan(sk); |
| 821 | + sk->sk_shutdown = SHUTDOWN_MASK; |
823 | 822 |
|
824 |
| - /* Clean up any sockets that never were accepted. */ |
825 |
| - while ((pending = vsock_dequeue_accept(sk)) != NULL) { |
826 |
| - __vsock_release(pending, SINGLE_DEPTH_NESTING); |
827 |
| - sock_put(pending); |
828 |
| - } |
| 823 | + skb_queue_purge(&sk->sk_receive_queue); |
829 | 824 |
|
830 |
| - release_sock(sk); |
831 |
| - sock_put(sk); |
| 825 | + /* Clean up any sockets that never were accepted. */ |
| 826 | + while ((pending = vsock_dequeue_accept(sk)) != NULL) { |
| 827 | + __vsock_release(pending, SINGLE_DEPTH_NESTING); |
| 828 | + sock_put(pending); |
832 | 829 | }
|
| 830 | + |
| 831 | + release_sock(sk); |
| 832 | + sock_put(sk); |
833 | 833 | }
|
834 | 834 |
|
835 | 835 | static void vsock_sk_destruct(struct sock *sk)
|
@@ -901,9 +901,22 @@ void vsock_data_ready(struct sock *sk)
|
901 | 901 | }
|
902 | 902 | EXPORT_SYMBOL_GPL(vsock_data_ready);
|
903 | 903 |
|
| 904 | +/* Dummy callback required by sockmap. |
| 905 | + * See unconditional call of saved_close() in sock_map_close(). |
| 906 | + */ |
| 907 | +static void vsock_close(struct sock *sk, long timeout) |
| 908 | +{ |
| 909 | +} |
| 910 | + |
904 | 911 | static int vsock_release(struct socket *sock)
|
905 | 912 | {
|
906 |
| - __vsock_release(sock->sk, 0); |
| 913 | + struct sock *sk = sock->sk; |
| 914 | + |
| 915 | + if (!sk) |
| 916 | + return 0; |
| 917 | + |
| 918 | + sk->sk_prot->close(sk, 0); |
| 919 | + __vsock_release(sk, 0); |
907 | 920 | sock->sk = NULL;
|
908 | 921 | sock->state = SS_FREE;
|
909 | 922 |
|
|
0 commit comments