4 * Generic stream handling routines. These are generic for most
5 * protocols. Even IP. Tonight 8-).
6 * This is used because TCP, LLC (others too) layer all have mostly
7 * identical sendmsg() and recvmsg() code.
8 * So we (will) share it here.
10 * Authors: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
11 * (from old tcp.c code)
12 * Alan Cox <alan@redhat.com> (Borrowed comments 8-))
15 #include <linux/module.h>
16 #include <linux/net.h>
17 #include <linux/signal.h>
18 #include <linux/tcp.h>
19 #include <linux/wait.h>
23 * sk_stream_write_space - stream socket write_space callback.
26 * FIXME: write proper description
28 void sk_stream_write_space(struct sock *sk)
30 struct socket *sock = sk->sk_socket;
32 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) && sock) {
33 clear_bit(SOCK_NOSPACE, &sock->flags);
35 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
36 wake_up_interruptible(sk->sk_sleep);
37 if (sock->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN))
38 sock_wake_async(sock, 2, POLL_OUT);
42 EXPORT_SYMBOL(sk_stream_write_space);
45 * sk_stream_wait_connect - Wait for a socket to get into the connected state
46 * @sk: sock to wait on
47 * @timeo_p: for how long to wait
49 * Must be called with the socket locked.
51 int sk_stream_wait_connect(struct sock *sk, long *timeo_p)
53 struct task_struct *tsk = current;
58 int err = sock_error(sk);
61 if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV))
65 if (signal_pending(tsk))
66 return sock_intr_errno(*timeo_p);
68 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
69 sk->sk_write_pending++;
70 done = sk_wait_event(sk, timeo_p,
72 !((1 << sk->sk_state) &
73 ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)));
74 finish_wait(sk->sk_sleep, &wait);
75 sk->sk_write_pending--;
80 EXPORT_SYMBOL(sk_stream_wait_connect);
83 * sk_stream_closing - Return 1 if we still have things to send in our buffers.
84 * @sk: socket to verify
86 static inline int sk_stream_closing(struct sock *sk)
88 return (1 << sk->sk_state) &
89 (TCPF_FIN_WAIT1 | TCPF_CLOSING | TCPF_LAST_ACK);
92 void sk_stream_wait_close(struct sock *sk, long timeout)
98 prepare_to_wait(sk->sk_sleep, &wait,
100 if (sk_wait_event(sk, &timeout, !sk_stream_closing(sk)))
102 } while (!signal_pending(current) && timeout);
104 finish_wait(sk->sk_sleep, &wait);
108 EXPORT_SYMBOL(sk_stream_wait_close);
111 * sk_stream_wait_memory - Wait for more memory for a socket
112 * @sk: socket to wait for memory
113 * @timeo_p: for how long
115 int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
119 long current_timeo = *timeo_p;
122 if (sk_stream_memory_free(sk))
123 current_timeo = vm_wait = (net_random() % (HZ / 5)) + 2;
126 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
128 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
130 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
134 if (signal_pending(current))
136 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
137 if (sk_stream_memory_free(sk) && !vm_wait)
140 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
141 sk->sk_write_pending++;
142 sk_wait_event(sk, ¤t_timeo, !sk->sk_err &&
143 !(sk->sk_shutdown & SEND_SHUTDOWN) &&
144 sk_stream_memory_free(sk) &&
146 sk->sk_write_pending--;
149 vm_wait -= current_timeo;
150 current_timeo = *timeo_p;
151 if (current_timeo != MAX_SCHEDULE_TIMEOUT &&
152 (current_timeo -= vm_wait) < 0)
156 *timeo_p = current_timeo;
159 finish_wait(sk->sk_sleep, &wait);
169 err = sock_intr_errno(*timeo_p);
173 EXPORT_SYMBOL(sk_stream_wait_memory);
175 void sk_stream_rfree(struct sk_buff *skb)
177 struct sock *sk = skb->sk;
179 skb_truesize_check(skb);
180 atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
181 sk->sk_forward_alloc += skb->truesize;
184 EXPORT_SYMBOL(sk_stream_rfree);
186 int sk_stream_error(struct sock *sk, int flags, int err)
189 err = sock_error(sk) ? : -EPIPE;
190 if (err == -EPIPE && !(flags & MSG_NOSIGNAL))
191 send_sig(SIGPIPE, current, 0);
195 EXPORT_SYMBOL(sk_stream_error);
197 void __sk_stream_mem_reclaim(struct sock *sk)
199 atomic_sub(sk->sk_forward_alloc / SK_STREAM_MEM_QUANTUM,
200 sk->sk_prot->memory_allocated);
201 sk->sk_forward_alloc &= SK_STREAM_MEM_QUANTUM - 1;
202 if (*sk->sk_prot->memory_pressure &&
203 (atomic_read(sk->sk_prot->memory_allocated) <
204 sk->sk_prot->sysctl_mem[0]))
205 *sk->sk_prot->memory_pressure = 0;
208 EXPORT_SYMBOL(__sk_stream_mem_reclaim);
210 int sk_stream_mem_schedule(struct sock *sk, int size, int kind)
212 int amt = sk_stream_pages(size);
214 sk->sk_forward_alloc += amt * SK_STREAM_MEM_QUANTUM;
215 atomic_add(amt, sk->sk_prot->memory_allocated);
218 if (atomic_read(sk->sk_prot->memory_allocated) < sk->sk_prot->sysctl_mem[0]) {
219 if (*sk->sk_prot->memory_pressure)
220 *sk->sk_prot->memory_pressure = 0;
224 /* Over hard limit. */
225 if (atomic_read(sk->sk_prot->memory_allocated) > sk->sk_prot->sysctl_mem[2]) {
226 sk->sk_prot->enter_memory_pressure();
227 goto suppress_allocation;
230 /* Under pressure. */
231 if (atomic_read(sk->sk_prot->memory_allocated) > sk->sk_prot->sysctl_mem[1])
232 sk->sk_prot->enter_memory_pressure();
235 if (atomic_read(&sk->sk_rmem_alloc) < sk->sk_prot->sysctl_rmem[0])
237 } else if (sk->sk_wmem_queued < sk->sk_prot->sysctl_wmem[0])
240 if (!*sk->sk_prot->memory_pressure ||
241 sk->sk_prot->sysctl_mem[2] > atomic_read(sk->sk_prot->sockets_allocated) *
242 sk_stream_pages(sk->sk_wmem_queued +
243 atomic_read(&sk->sk_rmem_alloc) +
244 sk->sk_forward_alloc))
250 sk_stream_moderate_sndbuf(sk);
252 /* Fail only if socket is _under_ its sndbuf.
253 * In this case we cannot block, so that we have to fail.
255 if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
259 /* Alas. Undo changes. */
260 sk->sk_forward_alloc -= amt * SK_STREAM_MEM_QUANTUM;
261 atomic_sub(amt, sk->sk_prot->memory_allocated);
265 EXPORT_SYMBOL(sk_stream_mem_schedule);
267 void sk_stream_kill_queues(struct sock *sk)
269 /* First the read buffer. */
270 __skb_queue_purge(&sk->sk_receive_queue);
272 /* Next, the error queue. */
273 __skb_queue_purge(&sk->sk_error_queue);
275 /* Next, the write queue. */
276 BUG_TRAP(skb_queue_empty(&sk->sk_write_queue));
278 /* Account for returned memory. */
279 sk_stream_mem_reclaim(sk);
281 BUG_TRAP(!sk->sk_wmem_queued);
282 BUG_TRAP(!sk->sk_forward_alloc);
284 /* It is _impossible_ for the backlog to contain anything
285 * when we get here. All user references to this socket
286 * have gone away, only the net layer knows can touch it.
290 EXPORT_SYMBOL(sk_stream_kill_queues);