2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * The User Datagram Protocol (UDP).
8 * Version: $Id: udp.c,v 1.102 2002/02/01 22:01:04 davem Exp $
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
13 * Alan Cox, <Alan.Cox@linux.org>
14 * Hirokazu Takahashi, <taka@valinux.co.jp>
17 * Alan Cox : verify_area() calls
18 * Alan Cox : stopped close while in use off icmp
19 * messages. Not a fix but a botch that
20 * for udp at least is 'valid'.
21 * Alan Cox : Fixed icmp handling properly
22 * Alan Cox : Correct error for oversized datagrams
23 * Alan Cox : Tidied select() semantics.
24 * Alan Cox : udp_err() fixed properly, also now
25 * select and read wake correctly on errors
26 * Alan Cox : udp_send verify_area moved to avoid mem leak
27 * Alan Cox : UDP can count its memory
28 * Alan Cox : send to an unknown connection causes
29 * an ECONNREFUSED off the icmp, but
31 * Alan Cox : Switched to new sk_buff handlers. No more backlog!
32 * Alan Cox : Using generic datagram code. Even smaller and the PEEK
33 * bug no longer crashes it.
34 * Fred Van Kempen : Net2e support for sk->broadcast.
35 * Alan Cox : Uses skb_free_datagram
36 * Alan Cox : Added get/set sockopt support.
37 * Alan Cox : Broadcasting without option set returns EACCES.
38 * Alan Cox : No wakeup calls. Instead we now use the callbacks.
39 * Alan Cox : Use ip_tos and ip_ttl
40 * Alan Cox : SNMP Mibs
41 * Alan Cox : MSG_DONTROUTE, and 0.0.0.0 support.
42 * Matt Dillon : UDP length checks.
43 * Alan Cox : Smarter af_inet used properly.
44 * Alan Cox : Use new kernel side addressing.
45 * Alan Cox : Incorrect return on truncated datagram receive.
46 * Arnt Gulbrandsen : New udp_send and stuff
47 * Alan Cox : Cache last socket
48 * Alan Cox : Route cache
49 * Jon Peatfield : Minor efficiency fix to sendto().
50 * Mike Shaver : RFC1122 checks.
51 * Alan Cox : Nonblocking error fix.
52 * Willy Konynenberg : Transparent proxying support.
53 * Mike McLagan : Routing by source
54 * David S. Miller : New socket lookup architecture.
55 * Last socket cache retained as it
56 * does have a high hit rate.
57 * Olaf Kirch : Don't linearise iovec on sendmsg.
58 * Andi Kleen : Some cleanups, cache destination entry
60 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
61 * Melvin Smith : Check msg_name not msg_namelen in sendto(),
62 * return ENOTCONN for unconnected sockets (POSIX)
63 * Janos Farkas : don't deliver multi/broadcasts to a different
64 * bound-to-device socket
65 * Hirokazu Takahashi : HW checksumming for outgoing UDP
67 * Hirokazu Takahashi : sendfile() on UDP works now.
68 * Arnaldo C. Melo : convert /proc/net/udp to seq_file
69 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
70 * Alexey Kuznetsov: allow both IPv4 and IPv6 sockets to bind
71 * a single port at the same time.
72 * Derek Atkins <derek@ihtfp.com>: Add Encapulation Support
73 * James Chapman : Add L2TP encapsulation type.
76 * This program is free software; you can redistribute it and/or
77 * modify it under the terms of the GNU General Public License
78 * as published by the Free Software Foundation; either version
79 * 2 of the License, or (at your option) any later version.
82 #include <asm/system.h>
83 #include <asm/uaccess.h>
84 #include <asm/ioctls.h>
85 #include <linux/types.h>
86 #include <linux/fcntl.h>
87 #include <linux/module.h>
88 #include <linux/socket.h>
89 #include <linux/sockios.h>
90 #include <linux/igmp.h>
92 #include <linux/errno.h>
93 #include <linux/timer.h>
95 #include <linux/inet.h>
96 #include <linux/netdevice.h>
97 #include <net/tcp_states.h>
98 #include <linux/skbuff.h>
99 #include <linux/proc_fs.h>
100 #include <linux/seq_file.h>
101 #include <net/icmp.h>
102 #include <net/route.h>
103 #include <net/checksum.h>
104 #include <net/xfrm.h>
105 #include "udp_impl.h"
108 * Snmp MIB for the UDP layer
111 DEFINE_SNMP_STAT(struct udp_mib, udp_statistics) __read_mostly;
113 struct hlist_head udp_hash[UDP_HTABLE_SIZE];
114 DEFINE_RWLOCK(udp_hash_lock);
116 static int udp_port_rover;
118 static inline int __udp_lib_lport_inuse(__u16 num, struct hlist_head udptable[])
121 struct hlist_node *node;
123 sk_for_each(sk, node, &udptable[num & (UDP_HTABLE_SIZE - 1)])
124 if (sk->sk_hash == num)
130 * __udp_lib_get_port - UDP/-Lite port lookup for IPv4 and IPv6
132 * @sk: socket struct in question
133 * @snum: port number to look up
134 * @udptable: hash list table, must be of UDP_HTABLE_SIZE
135 * @port_rover: pointer to record of last unallocated port
136 * @saddr_comp: AF-dependent comparison of bound local IP addresses
138 int __udp_lib_get_port(struct sock *sk, unsigned short snum,
139 struct hlist_head udptable[], int *port_rover,
140 int (*saddr_comp)(const struct sock *sk1,
141 const struct sock *sk2 ) )
143 struct hlist_node *node;
144 struct hlist_head *head;
148 write_lock_bh(&udp_hash_lock);
150 int best_size_so_far, best, result, i;
152 if (*port_rover > sysctl_local_port_range[1] ||
153 *port_rover < sysctl_local_port_range[0])
154 *port_rover = sysctl_local_port_range[0];
155 best_size_so_far = 32767;
156 best = result = *port_rover;
157 for (i = 0; i < UDP_HTABLE_SIZE; i++, result++) {
160 head = &udptable[result & (UDP_HTABLE_SIZE - 1)];
161 if (hlist_empty(head)) {
162 if (result > sysctl_local_port_range[1])
163 result = sysctl_local_port_range[0] +
164 ((result - sysctl_local_port_range[0]) &
165 (UDP_HTABLE_SIZE - 1));
169 sk_for_each(sk2, node, head) {
170 if (++size >= best_size_so_far)
173 best_size_so_far = size;
179 for (i = 0; i < (1 << 16) / UDP_HTABLE_SIZE;
180 i++, result += UDP_HTABLE_SIZE) {
181 if (result > sysctl_local_port_range[1])
182 result = sysctl_local_port_range[0]
183 + ((result - sysctl_local_port_range[0]) &
184 (UDP_HTABLE_SIZE - 1));
185 if (! __udp_lib_lport_inuse(result, udptable))
188 if (i >= (1 << 16) / UDP_HTABLE_SIZE)
191 *port_rover = snum = result;
193 head = &udptable[snum & (UDP_HTABLE_SIZE - 1)];
195 sk_for_each(sk2, node, head)
196 if (sk2->sk_hash == snum &&
198 (!sk2->sk_reuse || !sk->sk_reuse) &&
199 (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if
200 || sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
201 (*saddr_comp)(sk, sk2) )
204 inet_sk(sk)->num = snum;
206 if (sk_unhashed(sk)) {
207 head = &udptable[snum & (UDP_HTABLE_SIZE - 1)];
208 sk_add_node(sk, head);
209 sock_prot_inc_use(sk->sk_prot);
213 write_unlock_bh(&udp_hash_lock);
217 int udp_get_port(struct sock *sk, unsigned short snum,
218 int (*scmp)(const struct sock *, const struct sock *))
220 return __udp_lib_get_port(sk, snum, udp_hash, &udp_port_rover, scmp);
223 int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2)
225 struct inet_sock *inet1 = inet_sk(sk1), *inet2 = inet_sk(sk2);
227 return ( !ipv6_only_sock(sk2) &&
228 (!inet1->rcv_saddr || !inet2->rcv_saddr ||
229 inet1->rcv_saddr == inet2->rcv_saddr ));
232 static inline int udp_v4_get_port(struct sock *sk, unsigned short snum)
234 return udp_get_port(sk, snum, ipv4_rcv_saddr_equal);
237 /* UDP is nearly always wildcards out the wazoo, it makes no sense to try
238 * harder than this. -DaveM
240 static struct sock *__udp4_lib_lookup(__be32 saddr, __be16 sport,
241 __be32 daddr, __be16 dport,
242 int dif, struct hlist_head udptable[])
244 struct sock *sk, *result = NULL;
245 struct hlist_node *node;
246 unsigned short hnum = ntohs(dport);
249 read_lock(&udp_hash_lock);
250 sk_for_each(sk, node, &udptable[hnum & (UDP_HTABLE_SIZE - 1)]) {
251 struct inet_sock *inet = inet_sk(sk);
253 if (sk->sk_hash == hnum && !ipv6_only_sock(sk)) {
254 int score = (sk->sk_family == PF_INET ? 1 : 0);
255 if (inet->rcv_saddr) {
256 if (inet->rcv_saddr != daddr)
261 if (inet->daddr != saddr)
266 if (inet->dport != sport)
270 if (sk->sk_bound_dev_if) {
271 if (sk->sk_bound_dev_if != dif)
278 } else if (score > badness) {
286 read_unlock(&udp_hash_lock);
290 static inline struct sock *udp_v4_mcast_next(struct sock *sk,
291 __be16 loc_port, __be32 loc_addr,
292 __be16 rmt_port, __be32 rmt_addr,
295 struct hlist_node *node;
297 unsigned short hnum = ntohs(loc_port);
299 sk_for_each_from(s, node) {
300 struct inet_sock *inet = inet_sk(s);
302 if (s->sk_hash != hnum ||
303 (inet->daddr && inet->daddr != rmt_addr) ||
304 (inet->dport != rmt_port && inet->dport) ||
305 (inet->rcv_saddr && inet->rcv_saddr != loc_addr) ||
307 (s->sk_bound_dev_if && s->sk_bound_dev_if != dif))
309 if (!ip_mc_sf_allow(s, loc_addr, rmt_addr, dif))
319 * This routine is called by the ICMP module when it gets some
320 * sort of error condition. If err < 0 then the socket should
321 * be closed and the error returned to the user. If err > 0
322 * it's just the icmp type << 8 | icmp code.
323 * Header points to the ip header of the error packet. We move
324 * on past this. Then (as it used to claim before adjustment)
325 * header points to the first 8 bytes of the udp header. We need
326 * to find the appropriate port.
329 void __udp4_lib_err(struct sk_buff *skb, u32 info, struct hlist_head udptable[])
331 struct inet_sock *inet;
332 struct iphdr *iph = (struct iphdr*)skb->data;
333 struct udphdr *uh = (struct udphdr*)(skb->data+(iph->ihl<<2));
334 const int type = icmp_hdr(skb)->type;
335 const int code = icmp_hdr(skb)->code;
340 sk = __udp4_lib_lookup(iph->daddr, uh->dest, iph->saddr, uh->source,
341 skb->dev->ifindex, udptable );
343 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
344 return; /* No socket for error */
353 case ICMP_TIME_EXCEEDED:
356 case ICMP_SOURCE_QUENCH:
358 case ICMP_PARAMETERPROB:
362 case ICMP_DEST_UNREACH:
363 if (code == ICMP_FRAG_NEEDED) { /* Path MTU discovery */
364 if (inet->pmtudisc != IP_PMTUDISC_DONT) {
372 if (code <= NR_ICMP_UNREACH) {
373 harderr = icmp_err_convert[code].fatal;
374 err = icmp_err_convert[code].errno;
380 * RFC1122: OK. Passes ICMP errors back to application, as per
383 if (!inet->recverr) {
384 if (!harderr || sk->sk_state != TCP_ESTABLISHED)
387 ip_icmp_error(sk, skb, err, uh->dest, info, (u8*)(uh+1));
390 sk->sk_error_report(sk);
395 void udp_err(struct sk_buff *skb, u32 info)
397 return __udp4_lib_err(skb, info, udp_hash);
401 * Throw away all pending data and cancel the corking. Socket is locked.
403 static void udp_flush_pending_frames(struct sock *sk)
405 struct udp_sock *up = udp_sk(sk);
410 ip_flush_pending_frames(sk);
415 * udp4_hwcsum_outgoing - handle outgoing HW checksumming
416 * @sk: socket we are sending on
417 * @skb: sk_buff containing the filled-in UDP header
418 * (checksum field must be zeroed out)
420 static void udp4_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb,
421 __be32 src, __be32 dst, int len )
424 struct udphdr *uh = udp_hdr(skb);
427 if (skb_queue_len(&sk->sk_write_queue) == 1) {
429 * Only one fragment on the socket.
431 skb->csum_start = skb_transport_header(skb) - skb->head;
432 skb->csum_offset = offsetof(struct udphdr, check);
433 uh->check = ~csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, 0);
436 * HW-checksum won't work as there are two or more
437 * fragments on the socket so that all csums of sk_buffs
440 offset = skb_transport_offset(skb);
441 skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
443 skb->ip_summed = CHECKSUM_NONE;
445 skb_queue_walk(&sk->sk_write_queue, skb) {
446 csum = csum_add(csum, skb->csum);
449 uh->check = csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, csum);
451 uh->check = CSUM_MANGLED_0;
456 * Push out all pending data as one UDP datagram. Socket is locked.
458 static int udp_push_pending_frames(struct sock *sk)
460 struct udp_sock *up = udp_sk(sk);
461 struct inet_sock *inet = inet_sk(sk);
462 struct flowi *fl = &inet->cork.fl;
468 /* Grab the skbuff where UDP header space exists. */
469 if ((skb = skb_peek(&sk->sk_write_queue)) == NULL)
473 * Create a UDP header
476 uh->source = fl->fl_ip_sport;
477 uh->dest = fl->fl_ip_dport;
478 uh->len = htons(up->len);
481 if (up->pcflag) /* UDP-Lite */
482 csum = udplite_csum_outgoing(sk, skb);
484 else if (sk->sk_no_check == UDP_CSUM_NOXMIT) { /* UDP csum disabled */
486 skb->ip_summed = CHECKSUM_NONE;
489 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
491 udp4_hwcsum_outgoing(sk, skb, fl->fl4_src,fl->fl4_dst, up->len);
494 } else /* `normal' UDP */
495 csum = udp_csum_outgoing(sk, skb);
497 /* add protocol-dependent pseudo-header */
498 uh->check = csum_tcpudp_magic(fl->fl4_src, fl->fl4_dst, up->len,
499 sk->sk_protocol, csum );
501 uh->check = CSUM_MANGLED_0;
504 err = ip_push_pending_frames(sk);
509 UDP_INC_STATS_USER(UDP_MIB_OUTDATAGRAMS, up->pcflag);
513 int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
516 struct inet_sock *inet = inet_sk(sk);
517 struct udp_sock *up = udp_sk(sk);
519 struct ipcm_cookie ipc;
520 struct rtable *rt = NULL;
523 __be32 daddr, faddr, saddr;
526 int err, is_udplite = up->pcflag;
527 int corkreq = up->corkflag || msg->msg_flags&MSG_MORE;
528 int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
537 if (msg->msg_flags&MSG_OOB) /* Mirror BSD error message compatibility */
544 * There are pending frames.
545 * The socket lock must be held while it's corked.
548 if (likely(up->pending)) {
549 if (unlikely(up->pending != AF_INET)) {
557 ulen += sizeof(struct udphdr);
560 * Get and verify the address.
563 struct sockaddr_in * usin = (struct sockaddr_in*)msg->msg_name;
564 if (msg->msg_namelen < sizeof(*usin))
566 if (usin->sin_family != AF_INET) {
567 if (usin->sin_family != AF_UNSPEC)
568 return -EAFNOSUPPORT;
571 daddr = usin->sin_addr.s_addr;
572 dport = usin->sin_port;
576 if (sk->sk_state != TCP_ESTABLISHED)
577 return -EDESTADDRREQ;
580 /* Open fast path for connected socket.
581 Route will not be used, if at least one option is set.
585 ipc.addr = inet->saddr;
587 ipc.oif = sk->sk_bound_dev_if;
588 if (msg->msg_controllen) {
589 err = ip_cmsg_send(msg, &ipc);
600 ipc.addr = faddr = daddr;
602 if (ipc.opt && ipc.opt->srr) {
605 faddr = ipc.opt->faddr;
608 tos = RT_TOS(inet->tos);
609 if (sock_flag(sk, SOCK_LOCALROUTE) ||
610 (msg->msg_flags & MSG_DONTROUTE) ||
611 (ipc.opt && ipc.opt->is_strictroute)) {
616 if (MULTICAST(daddr)) {
618 ipc.oif = inet->mc_index;
620 saddr = inet->mc_addr;
625 rt = (struct rtable*)sk_dst_check(sk, 0);
628 struct flowi fl = { .oif = ipc.oif,
633 .proto = sk->sk_protocol,
635 { .sport = inet->sport,
636 .dport = dport } } };
637 security_sk_classify_flow(sk, &fl);
638 err = ip_route_output_flow(&rt, &fl, sk, 1);
640 if (err == -ENETUNREACH)
641 IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);
646 if ((rt->rt_flags & RTCF_BROADCAST) &&
647 !sock_flag(sk, SOCK_BROADCAST))
650 sk_dst_set(sk, dst_clone(&rt->u.dst));
653 if (msg->msg_flags&MSG_CONFIRM)
659 daddr = ipc.addr = rt->rt_dst;
662 if (unlikely(up->pending)) {
663 /* The socket is already corked while preparing it. */
664 /* ... which is an evident application bug. --ANK */
667 LIMIT_NETDEBUG(KERN_DEBUG "udp cork app bug 2\n");
672 * Now cork the socket to pend data.
674 inet->cork.fl.fl4_dst = daddr;
675 inet->cork.fl.fl_ip_dport = dport;
676 inet->cork.fl.fl4_src = saddr;
677 inet->cork.fl.fl_ip_sport = inet->sport;
678 up->pending = AF_INET;
682 getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;
683 err = ip_append_data(sk, getfrag, msg->msg_iov, ulen,
684 sizeof(struct udphdr), &ipc, rt,
685 corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
687 udp_flush_pending_frames(sk);
689 err = udp_push_pending_frames(sk);
690 else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
701 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting
702 * ENOBUFS might not be good (it's not tunable per se), but otherwise
703 * we don't have a good statistic (IpOutDiscards but it can be too many
704 * things). We could add another new stat but at least for now that
705 * seems like overkill.
707 if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
708 UDP_INC_STATS_USER(UDP_MIB_SNDBUFERRORS, is_udplite);
713 dst_confirm(&rt->u.dst);
714 if (!(msg->msg_flags&MSG_PROBE) || len)
715 goto back_from_confirm;
720 int udp_sendpage(struct sock *sk, struct page *page, int offset,
721 size_t size, int flags)
723 struct udp_sock *up = udp_sk(sk);
727 struct msghdr msg = { .msg_flags = flags|MSG_MORE };
729 /* Call udp_sendmsg to specify destination address which
730 * sendpage interface can't pass.
731 * This will succeed only when the socket is connected.
733 ret = udp_sendmsg(NULL, sk, &msg, 0);
740 if (unlikely(!up->pending)) {
743 LIMIT_NETDEBUG(KERN_DEBUG "udp cork app bug 3\n");
747 ret = ip_append_page(sk, page, offset, size, flags);
748 if (ret == -EOPNOTSUPP) {
750 return sock_no_sendpage(sk->sk_socket, page, offset,
754 udp_flush_pending_frames(sk);
759 if (!(up->corkflag || (flags&MSG_MORE)))
760 ret = udp_push_pending_frames(sk);
769 * IOCTL requests applicable to the UDP protocol
772 int udp_ioctl(struct sock *sk, int cmd, unsigned long arg)
777 int amount = atomic_read(&sk->sk_wmem_alloc);
778 return put_user(amount, (int __user *)arg);
784 unsigned long amount;
787 spin_lock_bh(&sk->sk_receive_queue.lock);
788 skb = skb_peek(&sk->sk_receive_queue);
791 * We will only return the amount
792 * of this packet since that is all
795 amount = skb->len - sizeof(struct udphdr);
797 spin_unlock_bh(&sk->sk_receive_queue.lock);
798 return put_user(amount, (int __user *)arg);
809 * This should be easy, if there is something there we
810 * return it, otherwise we block.
813 int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
814 size_t len, int noblock, int flags, int *addr_len)
816 struct inet_sock *inet = inet_sk(sk);
817 struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name;
819 unsigned int ulen, copied;
821 int is_udplite = IS_UDPLITE(sk);
824 * Check any passed addresses
827 *addr_len=sizeof(*sin);
829 if (flags & MSG_ERRQUEUE)
830 return ip_recv_error(sk, msg, len);
833 skb = skb_recv_datagram(sk, flags, noblock, &err);
837 ulen = skb->len - sizeof(struct udphdr);
841 else if (copied < ulen)
842 msg->msg_flags |= MSG_TRUNC;
845 * If checksum is needed at all, try to do it while copying the
846 * data. If the data is truncated, or if we only want a partial
847 * coverage checksum (UDP-Lite), do it before the copy.
850 if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) {
851 if (udp_lib_checksum_complete(skb))
855 if (skb_csum_unnecessary(skb))
856 err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr),
857 msg->msg_iov, copied );
859 err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov);
868 sock_recv_timestamp(msg, sk, skb);
870 /* Copy the address. */
873 sin->sin_family = AF_INET;
874 sin->sin_port = udp_hdr(skb)->source;
875 sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
876 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
878 if (inet->cmsg_flags)
879 ip_cmsg_recv(msg, skb);
882 if (flags & MSG_TRUNC)
886 skb_free_datagram(sk, skb);
891 UDP_INC_STATS_BH(UDP_MIB_INERRORS, is_udplite);
893 skb_kill_datagram(sk, skb, flags);
901 int udp_disconnect(struct sock *sk, int flags)
903 struct inet_sock *inet = inet_sk(sk);
905 * 1003.1g - break association.
908 sk->sk_state = TCP_CLOSE;
911 sk->sk_bound_dev_if = 0;
912 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
913 inet_reset_saddr(sk);
915 if (!(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) {
916 sk->sk_prot->unhash(sk);
926 * >0: "udp encap" protocol resubmission
928 * Note that in the success and error cases, the skb is assumed to
929 * have either been requeued or freed.
931 int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
933 struct udp_sock *up = udp_sk(sk);
937 * Charge it to the socket, dropping if the queue is full.
939 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
943 if (up->encap_type) {
945 * This is an encapsulation socket so pass the skb to
946 * the socket's udp_encap_rcv() hook. Otherwise, just
947 * fall through and pass this up the UDP socket.
948 * up->encap_rcv() returns the following value:
949 * =0 if skb was successfully passed to the encap
950 * handler or was discarded by it.
951 * >0 if skb should be passed on to UDP.
952 * <0 if skb should be resubmitted as proto -N
955 /* if we're overly short, let UDP handle it */
956 if (skb->len > sizeof(struct udphdr) &&
957 up->encap_rcv != NULL) {
960 ret = (*up->encap_rcv)(sk, skb);
962 UDP_INC_STATS_BH(UDP_MIB_INDATAGRAMS, up->pcflag);
967 /* FALLTHROUGH -- it's a UDP Packet */
971 * UDP-Lite specific tests, ignored on UDP sockets
973 if ((up->pcflag & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) {
976 * MIB statistics other than incrementing the error count are
977 * disabled for the following two types of errors: these depend
978 * on the application settings, not on the functioning of the
979 * protocol stack as such.
981 * RFC 3828 here recommends (sec 3.3): "There should also be a
982 * way ... to ... at least let the receiving application block
983 * delivery of packets with coverage values less than a value
984 * provided by the application."
986 if (up->pcrlen == 0) { /* full coverage was set */
987 LIMIT_NETDEBUG(KERN_WARNING "UDPLITE: partial coverage "
988 "%d while full coverage %d requested\n",
989 UDP_SKB_CB(skb)->cscov, skb->len);
992 /* The next case involves violating the min. coverage requested
993 * by the receiver. This is subtle: if receiver wants x and x is
994 * greater than the buffersize/MTU then receiver will complain
995 * that it wants x while sender emits packets of smaller size y.
996 * Therefore the above ...()->partial_cov statement is essential.
998 if (UDP_SKB_CB(skb)->cscov < up->pcrlen) {
999 LIMIT_NETDEBUG(KERN_WARNING
1000 "UDPLITE: coverage %d too small, need min %d\n",
1001 UDP_SKB_CB(skb)->cscov, up->pcrlen);
1006 if (sk->sk_filter) {
1007 if (udp_lib_checksum_complete(skb))
1011 if ((rc = sock_queue_rcv_skb(sk,skb)) < 0) {
1012 /* Note that an ENOMEM error is charged twice */
1014 UDP_INC_STATS_BH(UDP_MIB_RCVBUFERRORS, up->pcflag);
1018 UDP_INC_STATS_BH(UDP_MIB_INDATAGRAMS, up->pcflag);
1022 UDP_INC_STATS_BH(UDP_MIB_INERRORS, up->pcflag);
1028 * Multicasts and broadcasts go to each listener.
1030 * Note: called only from the BH handler context,
1031 * so we don't need to lock the hashes.
1033 static int __udp4_lib_mcast_deliver(struct sk_buff *skb,
1035 __be32 saddr, __be32 daddr,
1036 struct hlist_head udptable[])
1041 read_lock(&udp_hash_lock);
1042 sk = sk_head(&udptable[ntohs(uh->dest) & (UDP_HTABLE_SIZE - 1)]);
1043 dif = skb->dev->ifindex;
1044 sk = udp_v4_mcast_next(sk, uh->dest, daddr, uh->source, saddr, dif);
1046 struct sock *sknext = NULL;
1049 struct sk_buff *skb1 = skb;
1051 sknext = udp_v4_mcast_next(sk_next(sk), uh->dest, daddr,
1052 uh->source, saddr, dif);
1054 skb1 = skb_clone(skb, GFP_ATOMIC);
1057 int ret = udp_queue_rcv_skb(sk, skb1);
1059 /* we should probably re-process instead
1060 * of dropping packets here. */
1067 read_unlock(&udp_hash_lock);
1071 /* Initialize UDP checksum. If exited with zero value (success),
1072 * CHECKSUM_UNNECESSARY means, that no more checks are required.
1073 * Otherwise, csum completion requires chacksumming packet body,
1074 * including udp header and folding it to skb->csum.
1076 static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh,
1079 const struct iphdr *iph;
1082 UDP_SKB_CB(skb)->partial_cov = 0;
1083 UDP_SKB_CB(skb)->cscov = skb->len;
1085 if (proto == IPPROTO_UDPLITE) {
1086 err = udplite_checksum_init(skb, uh);
1092 if (uh->check == 0) {
1093 skb->ip_summed = CHECKSUM_UNNECESSARY;
1094 } else if (skb->ip_summed == CHECKSUM_COMPLETE) {
1095 if (!csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len,
1097 skb->ip_summed = CHECKSUM_UNNECESSARY;
1099 if (!skb_csum_unnecessary(skb))
1100 skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
1101 skb->len, proto, 0);
1102 /* Probably, we should checksum udp header (it should be in cache
1103 * in any case) and data in tiny packets (< rx copybreak).
1110 * All we need to do is get the socket, and then do a checksum.
1113 int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[],
1117 struct udphdr *uh = udp_hdr(skb);
1118 unsigned short ulen;
1119 struct rtable *rt = (struct rtable*)skb->dst;
1120 __be32 saddr = ip_hdr(skb)->saddr;
1121 __be32 daddr = ip_hdr(skb)->daddr;
1124 * Validate the packet.
1126 if (!pskb_may_pull(skb, sizeof(struct udphdr)))
1127 goto drop; /* No space for header. */
1129 ulen = ntohs(uh->len);
1130 if (ulen > skb->len)
1133 if (proto == IPPROTO_UDP) {
1134 /* UDP validates ulen. */
1135 if (ulen < sizeof(*uh) || pskb_trim_rcsum(skb, ulen))
1140 if (udp4_csum_init(skb, uh, proto))
1143 if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST))
1144 return __udp4_lib_mcast_deliver(skb, uh, saddr, daddr, udptable);
1146 sk = __udp4_lib_lookup(saddr, uh->source, daddr, uh->dest,
1147 skb->dev->ifindex, udptable );
1150 int ret = udp_queue_rcv_skb(sk, skb);
1153 /* a return value > 0 means to resubmit the input, but
1154 * it wants the return to be -protocol, or 0
1161 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1165 /* No socket. Drop packet silently, if checksum is wrong */
1166 if (udp_lib_checksum_complete(skb))
1169 UDP_INC_STATS_BH(UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
1170 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
1173 * Hmm. We got an UDP packet to a port to which we
1174 * don't wanna listen. Ignore it.
1180 LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: short packet: From %u.%u.%u.%u:%u %d/%d to %u.%u.%u.%u:%u\n",
1181 proto == IPPROTO_UDPLITE ? "-Lite" : "",
1192 * RFC1122: OK. Discards the bad packet silently (as far as
1193 * the network is concerned, anyway) as per 4.1.3.4 (MUST).
1195 LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: bad checksum. From %d.%d.%d.%d:%d to %d.%d.%d.%d:%d ulen %d\n",
1196 proto == IPPROTO_UDPLITE ? "-Lite" : "",
1203 UDP_INC_STATS_BH(UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
1208 int udp_rcv(struct sk_buff *skb)
1210 return __udp4_lib_rcv(skb, udp_hash, IPPROTO_UDP);
1213 int udp_destroy_sock(struct sock *sk)
1216 udp_flush_pending_frames(sk);
1222 * Socket option code for UDP
1224 int udp_lib_setsockopt(struct sock *sk, int level, int optname,
1225 char __user *optval, int optlen,
1226 int (*push_pending_frames)(struct sock *))
1228 struct udp_sock *up = udp_sk(sk);
1232 if (optlen<sizeof(int))
1235 if (get_user(val, (int __user *)optval))
1245 (*push_pending_frames)(sk);
1253 case UDP_ENCAP_ESPINUDP:
1254 case UDP_ENCAP_ESPINUDP_NON_IKE:
1255 up->encap_rcv = xfrm4_udp_encap_rcv;
1257 case UDP_ENCAP_L2TPINUDP:
1258 up->encap_type = val;
1267 * UDP-Lite's partial checksum coverage (RFC 3828).
1269 /* The sender sets actual checksum coverage length via this option.
1270 * The case coverage > packet length is handled by send module. */
1271 case UDPLITE_SEND_CSCOV:
1272 if (!up->pcflag) /* Disable the option on UDP sockets */
1273 return -ENOPROTOOPT;
1274 if (val != 0 && val < 8) /* Illegal coverage: use default (8) */
1277 up->pcflag |= UDPLITE_SEND_CC;
1280 /* The receiver specifies a minimum checksum coverage value. To make
1281 * sense, this should be set to at least 8 (as done below). If zero is
1282 * used, this again means full checksum coverage. */
1283 case UDPLITE_RECV_CSCOV:
1284 if (!up->pcflag) /* Disable the option on UDP sockets */
1285 return -ENOPROTOOPT;
1286 if (val != 0 && val < 8) /* Avoid silly minimal values. */
1289 up->pcflag |= UDPLITE_RECV_CC;
1300 int udp_setsockopt(struct sock *sk, int level, int optname,
1301 char __user *optval, int optlen)
1303 if (level == SOL_UDP || level == SOL_UDPLITE)
1304 return udp_lib_setsockopt(sk, level, optname, optval, optlen,
1305 udp_push_pending_frames);
1306 return ip_setsockopt(sk, level, optname, optval, optlen);
1309 #ifdef CONFIG_COMPAT
1310 int compat_udp_setsockopt(struct sock *sk, int level, int optname,
1311 char __user *optval, int optlen)
1313 if (level == SOL_UDP || level == SOL_UDPLITE)
1314 return udp_lib_setsockopt(sk, level, optname, optval, optlen,
1315 udp_push_pending_frames);
1316 return compat_ip_setsockopt(sk, level, optname, optval, optlen);
1320 int udp_lib_getsockopt(struct sock *sk, int level, int optname,
1321 char __user *optval, int __user *optlen)
1323 struct udp_sock *up = udp_sk(sk);
1326 if (get_user(len,optlen))
1329 len = min_t(unsigned int, len, sizeof(int));
1340 val = up->encap_type;
1343 /* The following two cannot be changed on UDP sockets, the return is
1344 * always 0 (which corresponds to the full checksum coverage of UDP). */
1345 case UDPLITE_SEND_CSCOV:
1349 case UDPLITE_RECV_CSCOV:
1354 return -ENOPROTOOPT;
1357 if (put_user(len, optlen))
1359 if (copy_to_user(optval, &val,len))
1364 int udp_getsockopt(struct sock *sk, int level, int optname,
1365 char __user *optval, int __user *optlen)
1367 if (level == SOL_UDP || level == SOL_UDPLITE)
1368 return udp_lib_getsockopt(sk, level, optname, optval, optlen);
1369 return ip_getsockopt(sk, level, optname, optval, optlen);
1372 #ifdef CONFIG_COMPAT
1373 int compat_udp_getsockopt(struct sock *sk, int level, int optname,
1374 char __user *optval, int __user *optlen)
1376 if (level == SOL_UDP || level == SOL_UDPLITE)
1377 return udp_lib_getsockopt(sk, level, optname, optval, optlen);
1378 return compat_ip_getsockopt(sk, level, optname, optval, optlen);
1382 * udp_poll - wait for a UDP event.
1383 * @file - file struct
1385 * @wait - poll table
1387 * This is same as datagram poll, except for the special case of
1388 * blocking sockets. If application is using a blocking fd
1389 * and a packet with checksum error is in the queue;
1390 * then it could get return from select indicating data available
1391 * but then block when reading it. Add special case code
1392 * to work around these arguably broken applications.
1394 unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait)
1396 unsigned int mask = datagram_poll(file, sock, wait);
1397 struct sock *sk = sock->sk;
1398 int is_lite = IS_UDPLITE(sk);
1400 /* Check for false positives due to checksum errors */
1401 if ( (mask & POLLRDNORM) &&
1402 !(file->f_flags & O_NONBLOCK) &&
1403 !(sk->sk_shutdown & RCV_SHUTDOWN)){
1404 struct sk_buff_head *rcvq = &sk->sk_receive_queue;
1405 struct sk_buff *skb;
1407 spin_lock_bh(&rcvq->lock);
1408 while ((skb = skb_peek(rcvq)) != NULL &&
1409 udp_lib_checksum_complete(skb)) {
1410 UDP_INC_STATS_BH(UDP_MIB_INERRORS, is_lite);
1411 __skb_unlink(skb, rcvq);
1414 spin_unlock_bh(&rcvq->lock);
1416 /* nothing to see, move along */
1418 mask &= ~(POLLIN | POLLRDNORM);
1425 struct proto udp_prot = {
1427 .owner = THIS_MODULE,
1428 .close = udp_lib_close,
1429 .connect = ip4_datagram_connect,
1430 .disconnect = udp_disconnect,
1432 .destroy = udp_destroy_sock,
1433 .setsockopt = udp_setsockopt,
1434 .getsockopt = udp_getsockopt,
1435 .sendmsg = udp_sendmsg,
1436 .recvmsg = udp_recvmsg,
1437 .sendpage = udp_sendpage,
1438 .backlog_rcv = udp_queue_rcv_skb,
1439 .hash = udp_lib_hash,
1440 .unhash = udp_lib_unhash,
1441 .get_port = udp_v4_get_port,
1442 .obj_size = sizeof(struct udp_sock),
1443 #ifdef CONFIG_COMPAT
1444 .compat_setsockopt = compat_udp_setsockopt,
1445 .compat_getsockopt = compat_udp_getsockopt,
1449 /* ------------------------------------------------------------------------ */
1450 #ifdef CONFIG_PROC_FS
1452 static struct sock *udp_get_first(struct seq_file *seq)
1455 struct udp_iter_state *state = seq->private;
1457 for (state->bucket = 0; state->bucket < UDP_HTABLE_SIZE; ++state->bucket) {
1458 struct hlist_node *node;
1459 sk_for_each(sk, node, state->hashtable + state->bucket) {
1460 if (sk->sk_family == state->family)
1469 static struct sock *udp_get_next(struct seq_file *seq, struct sock *sk)
1471 struct udp_iter_state *state = seq->private;
1477 } while (sk && sk->sk_family != state->family);
1479 if (!sk && ++state->bucket < UDP_HTABLE_SIZE) {
1480 sk = sk_head(state->hashtable + state->bucket);
1486 static struct sock *udp_get_idx(struct seq_file *seq, loff_t pos)
1488 struct sock *sk = udp_get_first(seq);
1491 while (pos && (sk = udp_get_next(seq, sk)) != NULL)
1493 return pos ? NULL : sk;
1496 static void *udp_seq_start(struct seq_file *seq, loff_t *pos)
1498 read_lock(&udp_hash_lock);
1499 return *pos ? udp_get_idx(seq, *pos-1) : (void *)1;
1502 static void *udp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1507 sk = udp_get_idx(seq, 0);
1509 sk = udp_get_next(seq, v);
1515 static void udp_seq_stop(struct seq_file *seq, void *v)
1517 read_unlock(&udp_hash_lock);
1520 static int udp_seq_open(struct inode *inode, struct file *file)
1522 struct udp_seq_afinfo *afinfo = PDE(inode)->data;
1523 struct seq_file *seq;
1525 struct udp_iter_state *s = kzalloc(sizeof(*s), GFP_KERNEL);
1529 s->family = afinfo->family;
1530 s->hashtable = afinfo->hashtable;
1531 s->seq_ops.start = udp_seq_start;
1532 s->seq_ops.next = udp_seq_next;
1533 s->seq_ops.show = afinfo->seq_show;
1534 s->seq_ops.stop = udp_seq_stop;
1536 rc = seq_open(file, &s->seq_ops);
1540 seq = file->private_data;
1549 /* ------------------------------------------------------------------------ */
1550 int udp_proc_register(struct udp_seq_afinfo *afinfo)
1552 struct proc_dir_entry *p;
1557 afinfo->seq_fops->owner = afinfo->owner;
1558 afinfo->seq_fops->open = udp_seq_open;
1559 afinfo->seq_fops->read = seq_read;
1560 afinfo->seq_fops->llseek = seq_lseek;
1561 afinfo->seq_fops->release = seq_release_private;
1563 p = proc_net_fops_create(afinfo->name, S_IRUGO, afinfo->seq_fops);
1571 void udp_proc_unregister(struct udp_seq_afinfo *afinfo)
1575 proc_net_remove(afinfo->name);
1576 memset(afinfo->seq_fops, 0, sizeof(*afinfo->seq_fops));
1579 /* ------------------------------------------------------------------------ */
1580 static void udp4_format_sock(struct sock *sp, char *tmpbuf, int bucket)
1582 struct inet_sock *inet = inet_sk(sp);
1583 __be32 dest = inet->daddr;
1584 __be32 src = inet->rcv_saddr;
1585 __u16 destp = ntohs(inet->dport);
1586 __u16 srcp = ntohs(inet->sport);
1588 sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X"
1589 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p",
1590 bucket, src, srcp, dest, destp, sp->sk_state,
1591 atomic_read(&sp->sk_wmem_alloc),
1592 atomic_read(&sp->sk_rmem_alloc),
1593 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
1594 atomic_read(&sp->sk_refcnt), sp);
1597 int udp4_seq_show(struct seq_file *seq, void *v)
1599 if (v == SEQ_START_TOKEN)
1600 seq_printf(seq, "%-127s\n",
1601 " sl local_address rem_address st tx_queue "
1602 "rx_queue tr tm->when retrnsmt uid timeout "
1606 struct udp_iter_state *state = seq->private;
1608 udp4_format_sock(v, tmpbuf, state->bucket);
1609 seq_printf(seq, "%-127s\n", tmpbuf);
1614 /* ------------------------------------------------------------------------ */
1615 static struct file_operations udp4_seq_fops;
1616 static struct udp_seq_afinfo udp4_seq_afinfo = {
1617 .owner = THIS_MODULE,
1620 .hashtable = udp_hash,
1621 .seq_show = udp4_seq_show,
1622 .seq_fops = &udp4_seq_fops,
1625 int __init udp4_proc_init(void)
1627 return udp_proc_register(&udp4_seq_afinfo);
1630 void udp4_proc_exit(void)
1632 udp_proc_unregister(&udp4_seq_afinfo);
1634 #endif /* CONFIG_PROC_FS */
1636 EXPORT_SYMBOL(udp_disconnect);
1637 EXPORT_SYMBOL(udp_hash);
1638 EXPORT_SYMBOL(udp_hash_lock);
1639 EXPORT_SYMBOL(udp_ioctl);
1640 EXPORT_SYMBOL(udp_get_port);
1641 EXPORT_SYMBOL(udp_prot);
1642 EXPORT_SYMBOL(udp_sendmsg);
1643 EXPORT_SYMBOL(udp_lib_getsockopt);
1644 EXPORT_SYMBOL(udp_lib_setsockopt);
1645 EXPORT_SYMBOL(udp_poll);
1647 #ifdef CONFIG_PROC_FS
1648 EXPORT_SYMBOL(udp_proc_register);
1649 EXPORT_SYMBOL(udp_proc_unregister);