Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0
2 : /*
3 : * SUCS NET3:
4 : *
5 : * Generic stream handling routines. These are generic for most
6 : * protocols. Even IP. Tonight 8-).
7 : * This is used because TCP, LLC (others too) layer all have mostly
8 : * identical sendmsg() and recvmsg() code.
9 : * So we (will) share it here.
10 : *
11 : * Authors: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
12 : * (from old tcp.c code)
13 : * Alan Cox <alan@lxorguk.ukuu.org.uk> (Borrowed comments 8-))
14 : */
15 :
16 : #include <linux/module.h>
17 : #include <linux/sched/signal.h>
18 : #include <linux/net.h>
19 : #include <linux/signal.h>
20 : #include <linux/tcp.h>
21 : #include <linux/wait.h>
22 : #include <net/sock.h>
23 :
24 : /**
25 : * sk_stream_write_space - stream socket write_space callback.
26 : * @sk: socket
27 : *
28 : * FIXME: write proper description
29 : */
30 0 : void sk_stream_write_space(struct sock *sk)
31 : {
32 0 : struct socket *sock = sk->sk_socket;
33 0 : struct socket_wq *wq;
34 :
35 0 : if (__sk_stream_is_writeable(sk, 1) && sock) {
36 0 : clear_bit(SOCK_NOSPACE, &sock->flags);
37 :
38 0 : rcu_read_lock();
39 0 : wq = rcu_dereference(sk->sk_wq);
40 0 : if (skwq_has_sleeper(wq))
41 0 : wake_up_interruptible_poll(&wq->wait, EPOLLOUT |
42 : EPOLLWRNORM | EPOLLWRBAND);
43 0 : if (wq && wq->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN))
44 0 : sock_wake_async(wq, SOCK_WAKE_SPACE, POLL_OUT);
45 0 : rcu_read_unlock();
46 : }
47 0 : }
48 :
49 : /**
50 : * sk_stream_wait_connect - Wait for a socket to get into the connected state
51 : * @sk: sock to wait on
52 : * @timeo_p: for how long to wait
53 : *
54 : * Must be called with the socket locked.
55 : */
56 0 : int sk_stream_wait_connect(struct sock *sk, long *timeo_p)
57 : {
58 0 : DEFINE_WAIT_FUNC(wait, woken_wake_function);
59 0 : struct task_struct *tsk = current;
60 0 : int done;
61 :
62 0 : do {
63 0 : int err = sock_error(sk);
64 0 : if (err)
65 0 : return err;
66 0 : if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV))
67 : return -EPIPE;
68 0 : if (!*timeo_p)
69 : return -EAGAIN;
70 0 : if (signal_pending(tsk))
71 0 : return sock_intr_errno(*timeo_p);
72 :
73 0 : add_wait_queue(sk_sleep(sk), &wait);
74 0 : sk->sk_write_pending++;
75 0 : done = sk_wait_event(sk, timeo_p,
76 : !sk->sk_err &&
77 : !((1 << sk->sk_state) &
78 : ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)), &wait);
79 0 : remove_wait_queue(sk_sleep(sk), &wait);
80 0 : sk->sk_write_pending--;
81 0 : } while (!done);
82 : return 0;
83 : }
84 : EXPORT_SYMBOL(sk_stream_wait_connect);
85 :
86 : /**
87 : * sk_stream_closing - Return 1 if we still have things to send in our buffers.
88 : * @sk: socket to verify
89 : */
90 0 : static inline int sk_stream_closing(struct sock *sk)
91 : {
92 0 : return (1 << sk->sk_state) &
93 : (TCPF_FIN_WAIT1 | TCPF_CLOSING | TCPF_LAST_ACK);
94 : }
95 :
96 3 : void sk_stream_wait_close(struct sock *sk, long timeout)
97 : {
98 3 : if (timeout) {
99 0 : DEFINE_WAIT_FUNC(wait, woken_wake_function);
100 :
101 0 : add_wait_queue(sk_sleep(sk), &wait);
102 :
103 0 : do {
104 0 : if (sk_wait_event(sk, &timeout, !sk_stream_closing(sk), &wait))
105 : break;
106 0 : } while (!signal_pending(current) && timeout);
107 :
108 0 : remove_wait_queue(sk_sleep(sk), &wait);
109 : }
110 3 : }
111 : EXPORT_SYMBOL(sk_stream_wait_close);
112 :
113 : /**
114 : * sk_stream_wait_memory - Wait for more memory for a socket
115 : * @sk: socket to wait for memory
116 : * @timeo_p: for how long
117 : */
118 0 : int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
119 : {
120 0 : int err = 0;
121 0 : long vm_wait = 0;
122 0 : long current_timeo = *timeo_p;
123 0 : DEFINE_WAIT_FUNC(wait, woken_wake_function);
124 :
125 0 : if (sk_stream_memory_free(sk))
126 0 : current_timeo = vm_wait = (prandom_u32() % (HZ / 5)) + 2;
127 :
128 0 : add_wait_queue(sk_sleep(sk), &wait);
129 :
130 0 : while (1) {
131 0 : sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
132 :
133 0 : if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
134 0 : goto do_error;
135 0 : if (!*timeo_p)
136 0 : goto do_eagain;
137 0 : if (signal_pending(current))
138 0 : goto do_interrupted;
139 0 : sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
140 0 : if (sk_stream_memory_free(sk) && !vm_wait)
141 : break;
142 :
143 0 : set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
144 0 : sk->sk_write_pending++;
145 0 : sk_wait_event(sk, ¤t_timeo, sk->sk_err ||
146 : (sk->sk_shutdown & SEND_SHUTDOWN) ||
147 : (sk_stream_memory_free(sk) &&
148 : !vm_wait), &wait);
149 0 : sk->sk_write_pending--;
150 :
151 0 : if (vm_wait) {
152 0 : vm_wait -= current_timeo;
153 0 : current_timeo = *timeo_p;
154 0 : if (current_timeo != MAX_SCHEDULE_TIMEOUT &&
155 0 : (current_timeo -= vm_wait) < 0)
156 : current_timeo = 0;
157 : vm_wait = 0;
158 : }
159 0 : *timeo_p = current_timeo;
160 : }
161 0 : out:
162 0 : remove_wait_queue(sk_sleep(sk), &wait);
163 0 : return err;
164 :
165 0 : do_error:
166 0 : err = -EPIPE;
167 0 : goto out;
168 0 : do_eagain:
169 : /* Make sure that whenever EAGAIN is returned, EPOLLOUT event can
170 : * be generated later.
171 : * When TCP receives ACK packets that make room, tcp_check_space()
172 : * only calls tcp_new_space() if SOCK_NOSPACE is set.
173 : */
174 0 : set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
175 0 : err = -EAGAIN;
176 0 : goto out;
177 0 : do_interrupted:
178 0 : err = sock_intr_errno(*timeo_p);
179 0 : goto out;
180 : }
181 : EXPORT_SYMBOL(sk_stream_wait_memory);
182 :
183 0 : int sk_stream_error(struct sock *sk, int flags, int err)
184 : {
185 0 : if (err == -EPIPE)
186 0 : err = sock_error(sk) ? : -EPIPE;
187 0 : if (err == -EPIPE && !(flags & MSG_NOSIGNAL))
188 0 : send_sig(SIGPIPE, current, 0);
189 0 : return err;
190 : }
191 : EXPORT_SYMBOL(sk_stream_error);
192 :
193 4 : void sk_stream_kill_queues(struct sock *sk)
194 : {
195 : /* First the read buffer. */
196 4 : __skb_queue_purge(&sk->sk_receive_queue);
197 :
198 : /* Next, the error queue. */
199 4 : __skb_queue_purge(&sk->sk_error_queue);
200 :
201 : /* Next, the write queue. */
202 4 : WARN_ON(!skb_queue_empty(&sk->sk_write_queue));
203 :
204 : /* Account for returned memory. */
205 4 : sk_mem_reclaim(sk);
206 :
207 4 : WARN_ON(sk->sk_wmem_queued);
208 4 : WARN_ON(sk->sk_forward_alloc);
209 :
210 : /* It is _impossible_ for the backlog to contain anything
211 : * when we get here. All user references to this socket
212 : * have gone away, only the net layer knows can touch it.
213 : */
214 4 : }
215 : EXPORT_SYMBOL(sk_stream_kill_queues);
|