2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * The User Datagram Protocol (UDP).
8 * Version: $Id: udp.c,v 1.102 2002/02/01 22:01:04 davem Exp $
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
13 * Alan Cox, <Alan.Cox@linux.org>
14 * Hirokazu Takahashi, <taka@valinux.co.jp>
17 * Alan Cox : verify_area() calls
18 * Alan Cox : stopped close while in use off icmp
19 * messages. Not a fix but a botch that
20 * for udp at least is 'valid'.
21 * Alan Cox : Fixed icmp handling properly
22 * Alan Cox : Correct error for oversized datagrams
23 * Alan Cox : Tidied select() semantics.
24 * Alan Cox : udp_err() fixed properly, also now
25 * select and read wake correctly on errors
26 * Alan Cox : udp_send verify_area moved to avoid mem leak
27 * Alan Cox : UDP can count its memory
28 * Alan Cox : send to an unknown connection causes
29 * an ECONNREFUSED off the icmp, but
31 * Alan Cox : Switched to new sk_buff handlers. No more backlog!
32 * Alan Cox : Using generic datagram code. Even smaller and the PEEK
33 * bug no longer crashes it.
34 * Fred Van Kempen : Net2e support for sk->broadcast.
35 * Alan Cox : Uses skb_free_datagram
36 * Alan Cox : Added get/set sockopt support.
37 * Alan Cox : Broadcasting without option set returns EACCES.
38 * Alan Cox : No wakeup calls. Instead we now use the callbacks.
39 * Alan Cox : Use ip_tos and ip_ttl
40 * Alan Cox : SNMP Mibs
41 * Alan Cox : MSG_DONTROUTE, and 0.0.0.0 support.
42 * Matt Dillon : UDP length checks.
43 * Alan Cox : Smarter af_inet used properly.
44 * Alan Cox : Use new kernel side addressing.
45 * Alan Cox : Incorrect return on truncated datagram receive.
46 * Arnt Gulbrandsen : New udp_send and stuff
47 * Alan Cox : Cache last socket
48 * Alan Cox : Route cache
49 * Jon Peatfield : Minor efficiency fix to sendto().
50 * Mike Shaver : RFC1122 checks.
51 * Alan Cox : Nonblocking error fix.
52 * Willy Konynenberg : Transparent proxying support.
53 * Mike McLagan : Routing by source
54 * David S. Miller : New socket lookup architecture.
55 * Last socket cache retained as it
56 * does have a high hit rate.
57 * Olaf Kirch : Don't linearise iovec on sendmsg.
58 * Andi Kleen : Some cleanups, cache destination entry
60 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
61 * Melvin Smith : Check msg_name not msg_namelen in sendto(),
62 * return ENOTCONN for unconnected sockets (POSIX)
63 * Janos Farkas : don't deliver multi/broadcasts to a different
64 * bound-to-device socket
65 * Hirokazu Takahashi : HW checksumming for outgoing UDP
67 * Hirokazu Takahashi : sendfile() on UDP works now.
68 * Arnaldo C. Melo : convert /proc/net/udp to seq_file
69 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
70 * Alexey Kuznetsov: allow both IPv4 and IPv6 sockets to bind
71 * a single port at the same time.
72 * Derek Atkins <derek@ihtfp.com>: Add Encapulation Support
75 * This program is free software; you can redistribute it and/or
76 * modify it under the terms of the GNU General Public License
77 * as published by the Free Software Foundation; either version
78 * 2 of the License, or (at your option) any later version.
81 #include <asm/system.h>
82 #include <asm/uaccess.h>
83 #include <asm/ioctls.h>
84 #include <linux/types.h>
85 #include <linux/fcntl.h>
86 #include <linux/module.h>
87 #include <linux/socket.h>
88 #include <linux/sockios.h>
89 #include <linux/igmp.h>
91 #include <linux/errno.h>
92 #include <linux/timer.h>
94 #include <linux/inet.h>
95 #include <linux/netdevice.h>
96 #include <net/tcp_states.h>
97 #include <linux/skbuff.h>
98 #include <linux/proc_fs.h>
99 #include <linux/seq_file.h>
100 #include <net/icmp.h>
101 #include <net/route.h>
102 #include <net/checksum.h>
103 #include <net/xfrm.h>
104 #include "udp_impl.h"
107 * Snmp MIB for the UDP layer
110 DEFINE_SNMP_STAT(struct udp_mib, udp_statistics) __read_mostly;
112 struct hlist_head udp_hash[UDP_HTABLE_SIZE];
113 DEFINE_RWLOCK(udp_hash_lock);
115 static int udp_port_rover;
118 * Note about this hash function :
119 * Typical use is probably daddr = 0, only dport is going to vary hash
121 static inline unsigned int hash_port_and_addr(__u16 port, __be32 addr)
128 static inline int __udp_lib_port_inuse(unsigned int hash, int port,
129 __be32 daddr, struct hlist_head udptable[])
132 struct hlist_node *node;
133 struct inet_sock *inet;
135 sk_for_each(sk, node, &udptable[hash & (UDP_HTABLE_SIZE - 1)]) {
136 if (sk->sk_hash != hash)
139 if (inet->num != port)
141 if (inet->rcv_saddr == daddr)
148 * __udp_lib_get_port - UDP/-Lite port lookup for IPv4 and IPv6
150 * @sk: socket struct in question
151 * @snum: port number to look up
152 * @udptable: hash list table, must be of UDP_HTABLE_SIZE
153 * @port_rover: pointer to record of last unallocated port
154 * @saddr_comp: AF-dependent comparison of bound local IP addresses
156 int __udp_lib_get_port(struct sock *sk, unsigned short snum,
157 struct hlist_head udptable[], int *port_rover,
158 int (*saddr_comp)(const struct sock *sk1,
159 const struct sock *sk2 ) )
161 struct hlist_node *node;
162 struct hlist_head *head;
167 write_lock_bh(&udp_hash_lock);
169 int best_size_so_far, best, result, i;
171 if (*port_rover > sysctl_local_port_range[1] ||
172 *port_rover < sysctl_local_port_range[0])
173 *port_rover = sysctl_local_port_range[0];
174 best_size_so_far = 32767;
175 best = result = *port_rover;
176 for (i = 0; i < UDP_HTABLE_SIZE; i++, result++) {
179 hash = hash_port_and_addr(result,
180 inet_sk(sk)->rcv_saddr);
181 head = &udptable[hash & (UDP_HTABLE_SIZE - 1)];
182 if (hlist_empty(head)) {
183 if (result > sysctl_local_port_range[1])
184 result = sysctl_local_port_range[0] +
185 ((result - sysctl_local_port_range[0]) &
186 (UDP_HTABLE_SIZE - 1));
190 sk_for_each(sk2, node, head) {
191 if (++size >= best_size_so_far)
194 best_size_so_far = size;
200 for (i = 0; i < (1 << 16) / UDP_HTABLE_SIZE;
201 i++, result += UDP_HTABLE_SIZE) {
202 if (result > sysctl_local_port_range[1])
203 result = sysctl_local_port_range[0]
204 + ((result - sysctl_local_port_range[0]) &
205 (UDP_HTABLE_SIZE - 1));
206 hash = hash_port_and_addr(result,
207 inet_sk(sk)->rcv_saddr);
208 if (! __udp_lib_port_inuse(hash, result,
209 inet_sk(sk)->rcv_saddr, udptable))
212 if (i >= (1 << 16) / UDP_HTABLE_SIZE)
215 *port_rover = snum = result;
217 hash = hash_port_and_addr(snum, inet_sk(sk)->rcv_saddr);
218 head = &udptable[hash & (UDP_HTABLE_SIZE - 1)];
220 sk_for_each(sk2, node, head)
221 if (sk2->sk_hash == hash &&
223 inet_sk(sk2)->num == snum &&
224 (!sk2->sk_reuse || !sk->sk_reuse) &&
225 (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if
226 || sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
227 (*saddr_comp)(sk, sk2) )
230 inet_sk(sk)->num = snum;
232 if (sk_unhashed(sk)) {
233 head = &udptable[hash & (UDP_HTABLE_SIZE - 1)];
234 sk_add_node(sk, head);
235 sock_prot_inc_use(sk->sk_prot);
239 write_unlock_bh(&udp_hash_lock);
243 int udp_get_port(struct sock *sk, unsigned short snum,
244 int (*scmp)(const struct sock *, const struct sock *))
246 return __udp_lib_get_port(sk, snum, udp_hash, &udp_port_rover, scmp);
249 int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2)
251 struct inet_sock *inet1 = inet_sk(sk1), *inet2 = inet_sk(sk2);
253 return ( !ipv6_only_sock(sk2) &&
254 (!inet1->rcv_saddr || !inet2->rcv_saddr ||
255 inet1->rcv_saddr == inet2->rcv_saddr ));
258 static inline int udp_v4_get_port(struct sock *sk, unsigned short snum)
260 return udp_get_port(sk, snum, ipv4_rcv_saddr_equal);
263 /* UDP is nearly always wildcards out the wazoo, it makes no sense to try
264 * harder than this. -DaveM
266 static struct sock *__udp4_lib_lookup(__be32 saddr, __be16 sport,
267 __be32 daddr, __be16 dport,
268 int dif, struct hlist_head udptable[])
270 struct sock *sk, *result = NULL;
271 struct hlist_node *node;
272 unsigned int hash, hashwild;
273 int score, best = -1;
275 hash = hash_port_and_addr(ntohs(dport), daddr);
276 hashwild = hash_port_and_addr(ntohs(dport), 0);
278 read_lock(&udp_hash_lock);
282 sk_for_each(sk, node, &udptable[hash & (UDP_HTABLE_SIZE - 1)]) {
283 struct inet_sock *inet = inet_sk(sk);
285 if (sk->sk_hash != hash || ipv6_only_sock(sk) ||
289 score = (sk->sk_family == PF_INET ? 1 : 0);
290 if (inet->rcv_saddr) {
291 if (inet->rcv_saddr != daddr)
296 if (inet->daddr != saddr)
301 if (inet->dport != sport)
305 if (sk->sk_bound_dev_if) {
306 if (sk->sk_bound_dev_if != dif)
313 } else if (score > best) {
319 if (hash != hashwild) {
326 read_unlock(&udp_hash_lock);
330 static inline struct sock *udp_v4_mcast_next(
332 unsigned int hnum, __be16 loc_port, __be32 loc_addr,
333 __be16 rmt_port, __be32 rmt_addr,
336 struct hlist_node *node;
339 sk_for_each_from(s, node) {
340 struct inet_sock *inet = inet_sk(s);
342 if (s->sk_hash != hnum ||
343 inet->num != loc_port ||
344 (inet->daddr && inet->daddr != rmt_addr) ||
345 (inet->dport != rmt_port && inet->dport) ||
346 (inet->rcv_saddr && inet->rcv_saddr != loc_addr) ||
348 (s->sk_bound_dev_if && s->sk_bound_dev_if != dif))
350 if (!ip_mc_sf_allow(s, loc_addr, rmt_addr, dif))
360 * This routine is called by the ICMP module when it gets some
361 * sort of error condition. If err < 0 then the socket should
362 * be closed and the error returned to the user. If err > 0
363 * it's just the icmp type << 8 | icmp code.
364 * Header points to the ip header of the error packet. We move
365 * on past this. Then (as it used to claim before adjustment)
366 * header points to the first 8 bytes of the udp header. We need
367 * to find the appropriate port.
370 void __udp4_lib_err(struct sk_buff *skb, u32 info, struct hlist_head udptable[])
372 struct inet_sock *inet;
373 struct iphdr *iph = (struct iphdr*)skb->data;
374 struct udphdr *uh = (struct udphdr*)(skb->data+(iph->ihl<<2));
375 const int type = icmp_hdr(skb)->type;
376 const int code = icmp_hdr(skb)->code;
381 sk = __udp4_lib_lookup(iph->daddr, uh->dest, iph->saddr, uh->source,
382 skb->dev->ifindex, udptable );
384 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
385 return; /* No socket for error */
394 case ICMP_TIME_EXCEEDED:
397 case ICMP_SOURCE_QUENCH:
399 case ICMP_PARAMETERPROB:
403 case ICMP_DEST_UNREACH:
404 if (code == ICMP_FRAG_NEEDED) { /* Path MTU discovery */
405 if (inet->pmtudisc != IP_PMTUDISC_DONT) {
413 if (code <= NR_ICMP_UNREACH) {
414 harderr = icmp_err_convert[code].fatal;
415 err = icmp_err_convert[code].errno;
421 * RFC1122: OK. Passes ICMP errors back to application, as per
424 if (!inet->recverr) {
425 if (!harderr || sk->sk_state != TCP_ESTABLISHED)
428 ip_icmp_error(sk, skb, err, uh->dest, info, (u8*)(uh+1));
431 sk->sk_error_report(sk);
436 void udp_err(struct sk_buff *skb, u32 info)
438 return __udp4_lib_err(skb, info, udp_hash);
442 * Throw away all pending data and cancel the corking. Socket is locked.
444 static void udp_flush_pending_frames(struct sock *sk)
446 struct udp_sock *up = udp_sk(sk);
451 ip_flush_pending_frames(sk);
456 * udp4_hwcsum_outgoing - handle outgoing HW checksumming
457 * @sk: socket we are sending on
458 * @skb: sk_buff containing the filled-in UDP header
459 * (checksum field must be zeroed out)
461 static void udp4_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb,
462 __be32 src, __be32 dst, int len )
465 struct udphdr *uh = udp_hdr(skb);
468 if (skb_queue_len(&sk->sk_write_queue) == 1) {
470 * Only one fragment on the socket.
472 skb->csum_start = skb_transport_header(skb) - skb->head;
473 skb->csum_offset = offsetof(struct udphdr, check);
474 uh->check = ~csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, 0);
477 * HW-checksum won't work as there are two or more
478 * fragments on the socket so that all csums of sk_buffs
481 offset = skb_transport_offset(skb);
482 skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
484 skb->ip_summed = CHECKSUM_NONE;
486 skb_queue_walk(&sk->sk_write_queue, skb) {
487 csum = csum_add(csum, skb->csum);
490 uh->check = csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, csum);
492 uh->check = CSUM_MANGLED_0;
497 * Push out all pending data as one UDP datagram. Socket is locked.
499 static int udp_push_pending_frames(struct sock *sk)
501 struct udp_sock *up = udp_sk(sk);
502 struct inet_sock *inet = inet_sk(sk);
503 struct flowi *fl = &inet->cork.fl;
509 /* Grab the skbuff where UDP header space exists. */
510 if ((skb = skb_peek(&sk->sk_write_queue)) == NULL)
514 * Create a UDP header
517 uh->source = fl->fl_ip_sport;
518 uh->dest = fl->fl_ip_dport;
519 uh->len = htons(up->len);
522 if (up->pcflag) /* UDP-Lite */
523 csum = udplite_csum_outgoing(sk, skb);
525 else if (sk->sk_no_check == UDP_CSUM_NOXMIT) { /* UDP csum disabled */
527 skb->ip_summed = CHECKSUM_NONE;
530 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
532 udp4_hwcsum_outgoing(sk, skb, fl->fl4_src,fl->fl4_dst, up->len);
535 } else /* `normal' UDP */
536 csum = udp_csum_outgoing(sk, skb);
538 /* add protocol-dependent pseudo-header */
539 uh->check = csum_tcpudp_magic(fl->fl4_src, fl->fl4_dst, up->len,
540 sk->sk_protocol, csum );
542 uh->check = CSUM_MANGLED_0;
545 err = ip_push_pending_frames(sk);
552 int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
555 struct inet_sock *inet = inet_sk(sk);
556 struct udp_sock *up = udp_sk(sk);
558 struct ipcm_cookie ipc;
559 struct rtable *rt = NULL;
562 __be32 daddr, faddr, saddr;
565 int err, is_udplite = up->pcflag;
566 int corkreq = up->corkflag || msg->msg_flags&MSG_MORE;
567 int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
576 if (msg->msg_flags&MSG_OOB) /* Mirror BSD error message compatibility */
583 * There are pending frames.
584 * The socket lock must be held while it's corked.
587 if (likely(up->pending)) {
588 if (unlikely(up->pending != AF_INET)) {
596 ulen += sizeof(struct udphdr);
599 * Get and verify the address.
602 struct sockaddr_in * usin = (struct sockaddr_in*)msg->msg_name;
603 if (msg->msg_namelen < sizeof(*usin))
605 if (usin->sin_family != AF_INET) {
606 if (usin->sin_family != AF_UNSPEC)
607 return -EAFNOSUPPORT;
610 daddr = usin->sin_addr.s_addr;
611 dport = usin->sin_port;
615 if (sk->sk_state != TCP_ESTABLISHED)
616 return -EDESTADDRREQ;
619 /* Open fast path for connected socket.
620 Route will not be used, if at least one option is set.
624 ipc.addr = inet->saddr;
626 ipc.oif = sk->sk_bound_dev_if;
627 if (msg->msg_controllen) {
628 err = ip_cmsg_send(msg, &ipc);
639 ipc.addr = faddr = daddr;
641 if (ipc.opt && ipc.opt->srr) {
644 faddr = ipc.opt->faddr;
647 tos = RT_TOS(inet->tos);
648 if (sock_flag(sk, SOCK_LOCALROUTE) ||
649 (msg->msg_flags & MSG_DONTROUTE) ||
650 (ipc.opt && ipc.opt->is_strictroute)) {
655 if (MULTICAST(daddr)) {
657 ipc.oif = inet->mc_index;
659 saddr = inet->mc_addr;
664 rt = (struct rtable*)sk_dst_check(sk, 0);
667 struct flowi fl = { .oif = ipc.oif,
672 .proto = sk->sk_protocol,
674 { .sport = inet->sport,
675 .dport = dport } } };
676 security_sk_classify_flow(sk, &fl);
677 err = ip_route_output_flow(&rt, &fl, sk, 1);
682 if ((rt->rt_flags & RTCF_BROADCAST) &&
683 !sock_flag(sk, SOCK_BROADCAST))
686 sk_dst_set(sk, dst_clone(&rt->u.dst));
689 if (msg->msg_flags&MSG_CONFIRM)
695 daddr = ipc.addr = rt->rt_dst;
698 if (unlikely(up->pending)) {
699 /* The socket is already corked while preparing it. */
700 /* ... which is an evident application bug. --ANK */
703 LIMIT_NETDEBUG(KERN_DEBUG "udp cork app bug 2\n");
708 * Now cork the socket to pend data.
710 inet->cork.fl.fl4_dst = daddr;
711 inet->cork.fl.fl_ip_dport = dport;
712 inet->cork.fl.fl4_src = saddr;
713 inet->cork.fl.fl_ip_sport = inet->sport;
714 up->pending = AF_INET;
718 getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;
719 err = ip_append_data(sk, getfrag, msg->msg_iov, ulen,
720 sizeof(struct udphdr), &ipc, rt,
721 corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
723 udp_flush_pending_frames(sk);
725 err = udp_push_pending_frames(sk);
726 else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
735 UDP_INC_STATS_USER(UDP_MIB_OUTDATAGRAMS, is_udplite);
739 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting
740 * ENOBUFS might not be good (it's not tunable per se), but otherwise
741 * we don't have a good statistic (IpOutDiscards but it can be too many
742 * things). We could add another new stat but at least for now that
743 * seems like overkill.
745 if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
746 UDP_INC_STATS_USER(UDP_MIB_SNDBUFERRORS, is_udplite);
751 dst_confirm(&rt->u.dst);
752 if (!(msg->msg_flags&MSG_PROBE) || len)
753 goto back_from_confirm;
758 int udp_sendpage(struct sock *sk, struct page *page, int offset,
759 size_t size, int flags)
761 struct udp_sock *up = udp_sk(sk);
765 struct msghdr msg = { .msg_flags = flags|MSG_MORE };
767 /* Call udp_sendmsg to specify destination address which
768 * sendpage interface can't pass.
769 * This will succeed only when the socket is connected.
771 ret = udp_sendmsg(NULL, sk, &msg, 0);
778 if (unlikely(!up->pending)) {
781 LIMIT_NETDEBUG(KERN_DEBUG "udp cork app bug 3\n");
785 ret = ip_append_page(sk, page, offset, size, flags);
786 if (ret == -EOPNOTSUPP) {
788 return sock_no_sendpage(sk->sk_socket, page, offset,
792 udp_flush_pending_frames(sk);
797 if (!(up->corkflag || (flags&MSG_MORE)))
798 ret = udp_push_pending_frames(sk);
807 * IOCTL requests applicable to the UDP protocol
810 int udp_ioctl(struct sock *sk, int cmd, unsigned long arg)
815 int amount = atomic_read(&sk->sk_wmem_alloc);
816 return put_user(amount, (int __user *)arg);
822 unsigned long amount;
825 spin_lock_bh(&sk->sk_receive_queue.lock);
826 skb = skb_peek(&sk->sk_receive_queue);
829 * We will only return the amount
830 * of this packet since that is all
833 amount = skb->len - sizeof(struct udphdr);
835 spin_unlock_bh(&sk->sk_receive_queue.lock);
836 return put_user(amount, (int __user *)arg);
847 * This should be easy, if there is something there we
848 * return it, otherwise we block.
851 int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
852 size_t len, int noblock, int flags, int *addr_len)
854 struct inet_sock *inet = inet_sk(sk);
855 struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name;
857 unsigned int ulen, copied;
859 int is_udplite = IS_UDPLITE(sk);
862 * Check any passed addresses
865 *addr_len=sizeof(*sin);
867 if (flags & MSG_ERRQUEUE)
868 return ip_recv_error(sk, msg, len);
871 skb = skb_recv_datagram(sk, flags, noblock, &err);
875 ulen = skb->len - sizeof(struct udphdr);
879 else if (copied < ulen)
880 msg->msg_flags |= MSG_TRUNC;
883 * If checksum is needed at all, try to do it while copying the
884 * data. If the data is truncated, or if we only want a partial
885 * coverage checksum (UDP-Lite), do it before the copy.
888 if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) {
889 if (udp_lib_checksum_complete(skb))
893 if (skb_csum_unnecessary(skb))
894 err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr),
895 msg->msg_iov, copied );
897 err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov);
906 sock_recv_timestamp(msg, sk, skb);
908 /* Copy the address. */
911 sin->sin_family = AF_INET;
912 sin->sin_port = udp_hdr(skb)->source;
913 sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
914 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
916 if (inet->cmsg_flags)
917 ip_cmsg_recv(msg, skb);
920 if (flags & MSG_TRUNC)
924 skb_free_datagram(sk, skb);
929 UDP_INC_STATS_BH(UDP_MIB_INERRORS, is_udplite);
931 skb_kill_datagram(sk, skb, flags);
939 int udp_disconnect(struct sock *sk, int flags)
941 struct inet_sock *inet = inet_sk(sk);
943 * 1003.1g - break association.
946 sk->sk_state = TCP_CLOSE;
949 sk->sk_bound_dev_if = 0;
950 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
951 inet_reset_saddr(sk);
953 if (!(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) {
954 sk->sk_prot->unhash(sk);
962 * 1 if the the UDP system should process it
963 * 0 if we should drop this packet
964 * -1 if it should get processed by xfrm4_rcv_encap
966 static int udp_encap_rcv(struct sock * sk, struct sk_buff *skb)
971 struct udp_sock *up = udp_sk(sk);
978 __u16 encap_type = up->encap_type;
980 /* if we're overly short, let UDP handle it */
981 len = skb->len - sizeof(struct udphdr);
985 /* if this is not encapsulated socket, then just return now */
989 /* If this is a paged skb, make sure we pull up
990 * whatever data we need to look at. */
991 if (!pskb_may_pull(skb, sizeof(struct udphdr) + min(len, 8)))
994 /* Now we can get the pointers */
996 udpdata = (__u8 *)uh + sizeof(struct udphdr);
997 udpdata32 = (__be32 *)udpdata;
999 switch (encap_type) {
1001 case UDP_ENCAP_ESPINUDP:
1002 /* Check if this is a keepalive packet. If so, eat it. */
1003 if (len == 1 && udpdata[0] == 0xff) {
1005 } else if (len > sizeof(struct ip_esp_hdr) && udpdata32[0] != 0) {
1006 /* ESP Packet without Non-ESP header */
1007 len = sizeof(struct udphdr);
1009 /* Must be an IKE packet.. pass it through */
1012 case UDP_ENCAP_ESPINUDP_NON_IKE:
1013 /* Check if this is a keepalive packet. If so, eat it. */
1014 if (len == 1 && udpdata[0] == 0xff) {
1016 } else if (len > 2 * sizeof(u32) + sizeof(struct ip_esp_hdr) &&
1017 udpdata32[0] == 0 && udpdata32[1] == 0) {
1019 /* ESP Packet with Non-IKE marker */
1020 len = sizeof(struct udphdr) + 2 * sizeof(u32);
1022 /* Must be an IKE packet.. pass it through */
1027 /* At this point we are sure that this is an ESPinUDP packet,
1028 * so we need to remove 'len' bytes from the packet (the UDP
1029 * header and optional ESP marker bytes) and then modify the
1030 * protocol to ESP, and then call into the transform receiver.
1032 if (skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
1035 /* Now we can update and verify the packet length... */
1037 iphlen = iph->ihl << 2;
1038 iph->tot_len = htons(ntohs(iph->tot_len) - len);
1039 if (skb->len < iphlen + len) {
1040 /* packet is too small!?! */
1044 /* pull the data buffer up to the ESP header and set the
1045 * transport header to point to ESP. Keep UDP on the stack
1048 __skb_pull(skb, len);
1049 skb_reset_transport_header(skb);
1051 /* modify the protocol (it's ESP!) */
1052 iph->protocol = IPPROTO_ESP;
1054 /* and let the caller know to send this into the ESP processor... */
1062 * >0: "udp encap" protocol resubmission
1064 * Note that in the success and error cases, the skb is assumed to
1065 * have either been requeued or freed.
1067 int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
1069 struct udp_sock *up = udp_sk(sk);
1073 * Charge it to the socket, dropping if the queue is full.
1075 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1079 if (up->encap_type) {
1081 * This is an encapsulation socket, so let's see if this is
1082 * an encapsulated packet.
1083 * If it's a keepalive packet, then just eat it.
1084 * If it's an encapsulateed packet, then pass it to the
1085 * IPsec xfrm input and return the response
1086 * appropriately. Otherwise, just fall through and
1087 * pass this up the UDP socket.
1091 ret = udp_encap_rcv(sk, skb);
1093 /* Eat the packet .. */
1098 /* process the ESP packet */
1099 ret = xfrm4_rcv_encap(skb, up->encap_type);
1100 UDP_INC_STATS_BH(UDP_MIB_INDATAGRAMS, up->pcflag);
1103 /* FALLTHROUGH -- it's a UDP Packet */
1107 * UDP-Lite specific tests, ignored on UDP sockets
1109 if ((up->pcflag & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) {
1112 * MIB statistics other than incrementing the error count are
1113 * disabled for the following two types of errors: these depend
1114 * on the application settings, not on the functioning of the
1115 * protocol stack as such.
1117 * RFC 3828 here recommends (sec 3.3): "There should also be a
1118 * way ... to ... at least let the receiving application block
1119 * delivery of packets with coverage values less than a value
1120 * provided by the application."
1122 if (up->pcrlen == 0) { /* full coverage was set */
1123 LIMIT_NETDEBUG(KERN_WARNING "UDPLITE: partial coverage "
1124 "%d while full coverage %d requested\n",
1125 UDP_SKB_CB(skb)->cscov, skb->len);
1128 /* The next case involves violating the min. coverage requested
1129 * by the receiver. This is subtle: if receiver wants x and x is
1130 * greater than the buffersize/MTU then receiver will complain
1131 * that it wants x while sender emits packets of smaller size y.
1132 * Therefore the above ...()->partial_cov statement is essential.
1134 if (UDP_SKB_CB(skb)->cscov < up->pcrlen) {
1135 LIMIT_NETDEBUG(KERN_WARNING
1136 "UDPLITE: coverage %d too small, need min %d\n",
1137 UDP_SKB_CB(skb)->cscov, up->pcrlen);
1142 if (sk->sk_filter) {
1143 if (udp_lib_checksum_complete(skb))
1147 if ((rc = sock_queue_rcv_skb(sk,skb)) < 0) {
1148 /* Note that an ENOMEM error is charged twice */
1150 UDP_INC_STATS_BH(UDP_MIB_RCVBUFERRORS, up->pcflag);
1154 UDP_INC_STATS_BH(UDP_MIB_INDATAGRAMS, up->pcflag);
1158 UDP_INC_STATS_BH(UDP_MIB_INERRORS, up->pcflag);
1164 * Multicasts and broadcasts go to each listener.
1166 * Note: called only from the BH handler context,
1167 * so we don't need to lock the hashes.
1169 static int __udp4_lib_mcast_deliver(struct sk_buff *skb,
1171 __be32 saddr, __be32 daddr,
1172 struct hlist_head udptable[])
1174 struct sock *sk, *skw, *sknext;
1176 unsigned int hash = hash_port_and_addr(ntohs(uh->dest), daddr);
1177 unsigned int hashwild = hash_port_and_addr(ntohs(uh->dest), 0);
1179 dif = skb->dev->ifindex;
1181 read_lock(&udp_hash_lock);
1183 sk = sk_head(&udptable[hash & (UDP_HTABLE_SIZE - 1)]);
1184 skw = sk_head(&udptable[hashwild & (UDP_HTABLE_SIZE - 1)]);
1186 sk = udp_v4_mcast_next(sk, hash, uh->dest, daddr, uh->source, saddr, dif);
1189 sk = udp_v4_mcast_next(skw, hash, uh->dest, daddr, uh->source,
1194 struct sk_buff *skb1 = skb;
1195 sknext = udp_v4_mcast_next(sk_next(sk), hash, uh->dest,
1196 daddr, uh->source, saddr, dif);
1197 if (!sknext && hash != hashwild) {
1199 sknext = udp_v4_mcast_next(skw, hash, uh->dest,
1200 daddr, uh->source, saddr, dif);
1203 skb1 = skb_clone(skb, GFP_ATOMIC);
1206 int ret = udp_queue_rcv_skb(sk, skb1);
1209 * we should probably re-process
1210 * instead of dropping packets here.
1218 read_unlock(&udp_hash_lock);
1222 /* Initialize UDP checksum. If exited with zero value (success),
1223 * CHECKSUM_UNNECESSARY means, that no more checks are required.
1224 * Otherwise, csum completion requires chacksumming packet body,
1225 * including udp header and folding it to skb->csum.
1227 static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh,
1230 const struct iphdr *iph;
1233 UDP_SKB_CB(skb)->partial_cov = 0;
1234 UDP_SKB_CB(skb)->cscov = skb->len;
1236 if (proto == IPPROTO_UDPLITE) {
1237 err = udplite_checksum_init(skb, uh);
1243 if (uh->check == 0) {
1244 skb->ip_summed = CHECKSUM_UNNECESSARY;
1245 } else if (skb->ip_summed == CHECKSUM_COMPLETE) {
1246 if (!csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len,
1248 skb->ip_summed = CHECKSUM_UNNECESSARY;
1250 if (!skb_csum_unnecessary(skb))
1251 skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
1252 skb->len, proto, 0);
1253 /* Probably, we should checksum udp header (it should be in cache
1254 * in any case) and data in tiny packets (< rx copybreak).
1261 * All we need to do is get the socket, and then do a checksum.
1264 int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[],
1268 struct udphdr *uh = udp_hdr(skb);
1269 unsigned short ulen;
1270 struct rtable *rt = (struct rtable*)skb->dst;
1271 __be32 saddr = ip_hdr(skb)->saddr;
1272 __be32 daddr = ip_hdr(skb)->daddr;
1275 * Validate the packet.
1277 if (!pskb_may_pull(skb, sizeof(struct udphdr)))
1278 goto drop; /* No space for header. */
1280 ulen = ntohs(uh->len);
1281 if (ulen > skb->len)
1284 if (proto == IPPROTO_UDP) {
1285 /* UDP validates ulen. */
1286 if (ulen < sizeof(*uh) || pskb_trim_rcsum(skb, ulen))
1291 if (udp4_csum_init(skb, uh, proto))
1294 if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST))
1295 return __udp4_lib_mcast_deliver(skb, uh, saddr, daddr, udptable);
1297 sk = __udp4_lib_lookup(saddr, uh->source, daddr, uh->dest,
1298 skb->dev->ifindex, udptable );
1301 int ret = udp_queue_rcv_skb(sk, skb);
1304 /* a return value > 0 means to resubmit the input, but
1305 * it wants the return to be -protocol, or 0
1312 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1316 /* No socket. Drop packet silently, if checksum is wrong */
1317 if (udp_lib_checksum_complete(skb))
1320 UDP_INC_STATS_BH(UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
1321 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
1324 * Hmm. We got an UDP packet to a port to which we
1325 * don't wanna listen. Ignore it.
1331 LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: short packet: From %u.%u.%u.%u:%u %d/%d to %u.%u.%u.%u:%u\n",
1332 proto == IPPROTO_UDPLITE ? "-Lite" : "",
1343 * RFC1122: OK. Discards the bad packet silently (as far as
1344 * the network is concerned, anyway) as per 4.1.3.4 (MUST).
1346 LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: bad checksum. From %d.%d.%d.%d:%d to %d.%d.%d.%d:%d ulen %d\n",
1347 proto == IPPROTO_UDPLITE ? "-Lite" : "",
1354 UDP_INC_STATS_BH(UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
1359 int udp_rcv(struct sk_buff *skb)
1361 return __udp4_lib_rcv(skb, udp_hash, IPPROTO_UDP);
1364 int udp_destroy_sock(struct sock *sk)
1367 udp_flush_pending_frames(sk);
1373 * Socket option code for UDP
1375 int udp_lib_setsockopt(struct sock *sk, int level, int optname,
1376 char __user *optval, int optlen,
1377 int (*push_pending_frames)(struct sock *))
1379 struct udp_sock *up = udp_sk(sk);
1383 if (optlen<sizeof(int))
1386 if (get_user(val, (int __user *)optval))
1396 (*push_pending_frames)(sk);
1404 case UDP_ENCAP_ESPINUDP:
1405 case UDP_ENCAP_ESPINUDP_NON_IKE:
1406 up->encap_type = val;
1415 * UDP-Lite's partial checksum coverage (RFC 3828).
1417 /* The sender sets actual checksum coverage length via this option.
1418 * The case coverage > packet length is handled by send module. */
1419 case UDPLITE_SEND_CSCOV:
1420 if (!up->pcflag) /* Disable the option on UDP sockets */
1421 return -ENOPROTOOPT;
1422 if (val != 0 && val < 8) /* Illegal coverage: use default (8) */
1425 up->pcflag |= UDPLITE_SEND_CC;
1428 /* The receiver specifies a minimum checksum coverage value. To make
1429 * sense, this should be set to at least 8 (as done below). If zero is
1430 * used, this again means full checksum coverage. */
1431 case UDPLITE_RECV_CSCOV:
1432 if (!up->pcflag) /* Disable the option on UDP sockets */
1433 return -ENOPROTOOPT;
1434 if (val != 0 && val < 8) /* Avoid silly minimal values. */
1437 up->pcflag |= UDPLITE_RECV_CC;
1448 int udp_setsockopt(struct sock *sk, int level, int optname,
1449 char __user *optval, int optlen)
1451 if (level == SOL_UDP || level == SOL_UDPLITE)
1452 return udp_lib_setsockopt(sk, level, optname, optval, optlen,
1453 udp_push_pending_frames);
1454 return ip_setsockopt(sk, level, optname, optval, optlen);
1457 #ifdef CONFIG_COMPAT
1458 int compat_udp_setsockopt(struct sock *sk, int level, int optname,
1459 char __user *optval, int optlen)
1461 if (level == SOL_UDP || level == SOL_UDPLITE)
1462 return udp_lib_setsockopt(sk, level, optname, optval, optlen,
1463 udp_push_pending_frames);
1464 return compat_ip_setsockopt(sk, level, optname, optval, optlen);
1468 int udp_lib_getsockopt(struct sock *sk, int level, int optname,
1469 char __user *optval, int __user *optlen)
1471 struct udp_sock *up = udp_sk(sk);
1474 if (get_user(len,optlen))
1477 len = min_t(unsigned int, len, sizeof(int));
1488 val = up->encap_type;
1491 /* The following two cannot be changed on UDP sockets, the return is
1492 * always 0 (which corresponds to the full checksum coverage of UDP). */
1493 case UDPLITE_SEND_CSCOV:
1497 case UDPLITE_RECV_CSCOV:
1502 return -ENOPROTOOPT;
1505 if (put_user(len, optlen))
1507 if (copy_to_user(optval, &val,len))
1512 int udp_getsockopt(struct sock *sk, int level, int optname,
1513 char __user *optval, int __user *optlen)
1515 if (level == SOL_UDP || level == SOL_UDPLITE)
1516 return udp_lib_getsockopt(sk, level, optname, optval, optlen);
1517 return ip_getsockopt(sk, level, optname, optval, optlen);
1520 #ifdef CONFIG_COMPAT
1521 int compat_udp_getsockopt(struct sock *sk, int level, int optname,
1522 char __user *optval, int __user *optlen)
1524 if (level == SOL_UDP || level == SOL_UDPLITE)
1525 return udp_lib_getsockopt(sk, level, optname, optval, optlen);
1526 return compat_ip_getsockopt(sk, level, optname, optval, optlen);
1530 * udp_poll - wait for a UDP event.
1531 * @file - file struct
1533 * @wait - poll table
1535 * This is same as datagram poll, except for the special case of
1536 * blocking sockets. If application is using a blocking fd
1537 * and a packet with checksum error is in the queue;
1538 * then it could get return from select indicating data available
1539 * but then block when reading it. Add special case code
1540 * to work around these arguably broken applications.
1542 unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait)
1544 unsigned int mask = datagram_poll(file, sock, wait);
1545 struct sock *sk = sock->sk;
1546 int is_lite = IS_UDPLITE(sk);
1548 /* Check for false positives due to checksum errors */
1549 if ( (mask & POLLRDNORM) &&
1550 !(file->f_flags & O_NONBLOCK) &&
1551 !(sk->sk_shutdown & RCV_SHUTDOWN)){
1552 struct sk_buff_head *rcvq = &sk->sk_receive_queue;
1553 struct sk_buff *skb;
1555 spin_lock_bh(&rcvq->lock);
1556 while ((skb = skb_peek(rcvq)) != NULL &&
1557 udp_lib_checksum_complete(skb)) {
1558 UDP_INC_STATS_BH(UDP_MIB_INERRORS, is_lite);
1559 __skb_unlink(skb, rcvq);
1562 spin_unlock_bh(&rcvq->lock);
1564 /* nothing to see, move along */
1566 mask &= ~(POLLIN | POLLRDNORM);
1573 struct proto udp_prot = {
1575 .owner = THIS_MODULE,
1576 .close = udp_lib_close,
1577 .connect = ip4_datagram_connect,
1578 .disconnect = udp_disconnect,
1580 .destroy = udp_destroy_sock,
1581 .setsockopt = udp_setsockopt,
1582 .getsockopt = udp_getsockopt,
1583 .sendmsg = udp_sendmsg,
1584 .recvmsg = udp_recvmsg,
1585 .sendpage = udp_sendpage,
1586 .backlog_rcv = udp_queue_rcv_skb,
1587 .hash = udp_lib_hash,
1588 .unhash = udp_lib_unhash,
1589 .get_port = udp_v4_get_port,
1590 .obj_size = sizeof(struct udp_sock),
1591 #ifdef CONFIG_COMPAT
1592 .compat_setsockopt = compat_udp_setsockopt,
1593 .compat_getsockopt = compat_udp_getsockopt,
1597 /* ------------------------------------------------------------------------ */
1598 #ifdef CONFIG_PROC_FS
1600 static struct sock *udp_get_first(struct seq_file *seq)
1603 struct udp_iter_state *state = seq->private;
1605 for (state->bucket = 0; state->bucket < UDP_HTABLE_SIZE; ++state->bucket) {
1606 struct hlist_node *node;
1607 sk_for_each(sk, node, state->hashtable + state->bucket) {
1608 if (sk->sk_family == state->family)
1617 static struct sock *udp_get_next(struct seq_file *seq, struct sock *sk)
1619 struct udp_iter_state *state = seq->private;
1625 } while (sk && sk->sk_family != state->family);
1627 if (!sk && ++state->bucket < UDP_HTABLE_SIZE) {
1628 sk = sk_head(state->hashtable + state->bucket);
1634 static struct sock *udp_get_idx(struct seq_file *seq, loff_t pos)
1636 struct sock *sk = udp_get_first(seq);
1639 while (pos && (sk = udp_get_next(seq, sk)) != NULL)
1641 return pos ? NULL : sk;
1644 static void *udp_seq_start(struct seq_file *seq, loff_t *pos)
1646 read_lock(&udp_hash_lock);
1647 return *pos ? udp_get_idx(seq, *pos-1) : (void *)1;
1650 static void *udp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1655 sk = udp_get_idx(seq, 0);
1657 sk = udp_get_next(seq, v);
1663 static void udp_seq_stop(struct seq_file *seq, void *v)
1665 read_unlock(&udp_hash_lock);
1668 static int udp_seq_open(struct inode *inode, struct file *file)
1670 struct udp_seq_afinfo *afinfo = PDE(inode)->data;
1671 struct seq_file *seq;
1673 struct udp_iter_state *s = kzalloc(sizeof(*s), GFP_KERNEL);
1677 s->family = afinfo->family;
1678 s->hashtable = afinfo->hashtable;
1679 s->seq_ops.start = udp_seq_start;
1680 s->seq_ops.next = udp_seq_next;
1681 s->seq_ops.show = afinfo->seq_show;
1682 s->seq_ops.stop = udp_seq_stop;
1684 rc = seq_open(file, &s->seq_ops);
1688 seq = file->private_data;
1697 /* ------------------------------------------------------------------------ */
1698 int udp_proc_register(struct udp_seq_afinfo *afinfo)
1700 struct proc_dir_entry *p;
1705 afinfo->seq_fops->owner = afinfo->owner;
1706 afinfo->seq_fops->open = udp_seq_open;
1707 afinfo->seq_fops->read = seq_read;
1708 afinfo->seq_fops->llseek = seq_lseek;
1709 afinfo->seq_fops->release = seq_release_private;
1711 p = proc_net_fops_create(afinfo->name, S_IRUGO, afinfo->seq_fops);
1719 void udp_proc_unregister(struct udp_seq_afinfo *afinfo)
1723 proc_net_remove(afinfo->name);
1724 memset(afinfo->seq_fops, 0, sizeof(*afinfo->seq_fops));
1727 /* ------------------------------------------------------------------------ */
1728 static void udp4_format_sock(struct sock *sp, char *tmpbuf, int bucket)
1730 struct inet_sock *inet = inet_sk(sp);
1731 __be32 dest = inet->daddr;
1732 __be32 src = inet->rcv_saddr;
1733 __u16 destp = ntohs(inet->dport);
1734 __u16 srcp = ntohs(inet->sport);
1736 sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X"
1737 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p",
1738 bucket, src, srcp, dest, destp, sp->sk_state,
1739 atomic_read(&sp->sk_wmem_alloc),
1740 atomic_read(&sp->sk_rmem_alloc),
1741 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
1742 atomic_read(&sp->sk_refcnt), sp);
1745 int udp4_seq_show(struct seq_file *seq, void *v)
1747 if (v == SEQ_START_TOKEN)
1748 seq_printf(seq, "%-127s\n",
1749 " sl local_address rem_address st tx_queue "
1750 "rx_queue tr tm->when retrnsmt uid timeout "
1754 struct udp_iter_state *state = seq->private;
1756 udp4_format_sock(v, tmpbuf, state->bucket);
1757 seq_printf(seq, "%-127s\n", tmpbuf);
1762 /* ------------------------------------------------------------------------ */
1763 static struct file_operations udp4_seq_fops;
1764 static struct udp_seq_afinfo udp4_seq_afinfo = {
1765 .owner = THIS_MODULE,
1768 .hashtable = udp_hash,
1769 .seq_show = udp4_seq_show,
1770 .seq_fops = &udp4_seq_fops,
1773 int __init udp4_proc_init(void)
1775 return udp_proc_register(&udp4_seq_afinfo);
1778 void udp4_proc_exit(void)
1780 udp_proc_unregister(&udp4_seq_afinfo);
1782 #endif /* CONFIG_PROC_FS */
1784 EXPORT_SYMBOL(udp_disconnect);
1785 EXPORT_SYMBOL(udp_hash);
1786 EXPORT_SYMBOL(udp_hash_lock);
1787 EXPORT_SYMBOL(udp_ioctl);
1788 EXPORT_SYMBOL(udp_get_port);
1789 EXPORT_SYMBOL(udp_prot);
1790 EXPORT_SYMBOL(udp_sendmsg);
1791 EXPORT_SYMBOL(udp_lib_getsockopt);
1792 EXPORT_SYMBOL(udp_lib_setsockopt);
1793 EXPORT_SYMBOL(udp_poll);
1795 #ifdef CONFIG_PROC_FS
1796 EXPORT_SYMBOL(udp_proc_register);
1797 EXPORT_SYMBOL(udp_proc_unregister);