#include <linux/ipv6.h>
#include <linux/icmpv6.h>
#include <linux/init.h>
+#include <linux/module.h>
#include <linux/skbuff.h>
#include <asm/uaccess.h>
#include <linux/seq_file.h>
#include "udp_impl.h"
-DEFINE_SNMP_STAT(struct udp_mib, udp_stats_in6) __read_mostly;
-
-static inline int udp_v6_get_port(struct sock *sk, unsigned short snum)
+int udp_v6_get_port(struct sock *sk, unsigned short snum)
{
- return udp_get_port(sk, snum, ipv6_rcv_saddr_equal);
+ return udp_lib_get_port(sk, snum, ipv6_rcv_saddr_equal);
}
-static struct sock *__udp6_lib_lookup(struct in6_addr *saddr, __be16 sport,
+static struct sock *__udp6_lib_lookup(struct net *net,
+ struct in6_addr *saddr, __be16 sport,
struct in6_addr *daddr, __be16 dport,
int dif, struct hlist_head udptable[])
{
sk_for_each(sk, node, &udptable[hnum & (UDP_HTABLE_SIZE - 1)]) {
struct inet_sock *inet = inet_sk(sk);
- if (sk->sk_hash == hnum && sk->sk_family == PF_INET6) {
+ if (net_eq(sock_net(sk), net) && sk->sk_hash == hnum &&
+ sk->sk_family == PF_INET6) {
struct ipv6_pinfo *np = inet6_sk(sk);
int score = 0;
if (inet->dport) {
struct inet_sock *inet = inet_sk(sk);
struct sk_buff *skb;
unsigned int ulen, copied;
+ int peeked;
int err;
int is_udplite = IS_UDPLITE(sk);
return ipv6_recv_error(sk, msg, len);
try_again:
- skb = skb_recv_datagram(sk, flags, noblock, &err);
+ skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
+ &peeked, &err);
if (!skb)
goto out;
if (err)
goto out_free;
+ if (!peeked)
+ UDP6_INC_STATS_USER(UDP_MIB_INDATAGRAMS, is_udplite);
+
sock_recv_timestamp(msg, sk, skb);
/* Copy the address. */
err = ulen;
out_free:
+ lock_sock(sk);
skb_free_datagram(sk, skb);
+ release_sock(sk);
out:
return err;
csum_copy_err:
- skb_kill_datagram(sk, skb, flags);
-
- if (flags & MSG_DONTWAIT) {
+ lock_sock(sk);
+ if (!skb_kill_datagram(sk, skb, flags))
UDP6_INC_STATS_USER(UDP_MIB_INERRORS, is_udplite);
+ release_sock(sk);
+
+ if (flags & MSG_DONTWAIT)
return -EAGAIN;
- }
goto try_again;
}
struct sock *sk;
int err;
- sk = __udp6_lib_lookup(daddr, uh->dest,
+ sk = __udp6_lib_lookup(dev_net(skb->dev), daddr, uh->dest,
saddr, uh->source, inet6_iif(skb), udptable);
if (sk == NULL)
return;
struct inet6_skb_parm *opt, int type,
int code, int offset, __be32 info )
{
- return __udp6_lib_err(skb, opt, type, code, offset, info, udp_hash);
+ __udp6_lib_err(skb, opt, type, code, offset, info, udp_hash);
}
int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
{
struct udp_sock *up = udp_sk(sk);
int rc;
+ int is_udplite = IS_UDPLITE(sk);
if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
goto drop;
/*
* UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c).
*/
- if ((up->pcflag & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) {
+ if ((is_udplite & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) {
if (up->pcrlen == 0) { /* full coverage was set */
LIMIT_NETDEBUG(KERN_WARNING "UDPLITE6: partial coverage"
if ((rc = sock_queue_rcv_skb(sk,skb)) < 0) {
/* Note that an ENOMEM error is charged twice */
if (rc == -ENOMEM)
- UDP6_INC_STATS_BH(UDP_MIB_RCVBUFERRORS, up->pcflag);
+ UDP6_INC_STATS_BH(UDP_MIB_RCVBUFERRORS, is_udplite);
goto drop;
}
- UDP6_INC_STATS_BH(UDP_MIB_INDATAGRAMS, up->pcflag);
+
return 0;
drop:
- UDP6_INC_STATS_BH(UDP_MIB_INERRORS, up->pcflag);
+ UDP6_INC_STATS_BH(UDP_MIB_INERRORS, is_udplite);
kfree_skb(skb);
return -1;
}
sk_for_each_from(s, node) {
struct inet_sock *inet = inet_sk(s);
+ if (sock_net(s) != sock_net(sk))
+ continue;
+
if (s->sk_hash == num && s->sk_family == PF_INET6) {
struct ipv6_pinfo *np = inet6_sk(s);
if (inet->dport) {
while ((sk2 = udp_v6_mcast_next(sk_next(sk2), uh->dest, daddr,
uh->source, saddr, dif))) {
struct sk_buff *buff = skb_clone(skb, GFP_ATOMIC);
- if (buff)
- udpv6_queue_rcv_skb(sk2, buff);
+ if (buff) {
+ bh_lock_sock_nested(sk2);
+ if (!sock_owned_by_user(sk2))
+ udpv6_queue_rcv_skb(sk2, buff);
+ else
+ sk_add_backlog(sk2, buff);
+ bh_unlock_sock(sk2);
+ }
}
- udpv6_queue_rcv_skb(sk, skb);
+ bh_lock_sock_nested(sk);
+ if (!sock_owned_by_user(sk))
+ udpv6_queue_rcv_skb(sk, skb);
+ else
+ sk_add_backlog(sk, skb);
+ bh_unlock_sock(sk);
out:
read_unlock(&udp_hash_lock);
return 0;
* check socket cache ... must talk to Alan about his plans
* for sock caches... i'll skip this for now.
*/
- sk = __udp6_lib_lookup(saddr, uh->source,
+ sk = __udp6_lib_lookup(dev_net(skb->dev), saddr, uh->source,
daddr, uh->dest, inet6_iif(skb), udptable);
if (sk == NULL) {
/* deliver */
- udpv6_queue_rcv_skb(sk, skb);
+ bh_lock_sock_nested(sk);
+ if (!sock_owned_by_user(sk))
+ udpv6_queue_rcv_skb(sk, skb);
+ else
+ sk_add_backlog(sk, skb);
+ bh_unlock_sock(sk);
sock_put(sk);
return 0;
struct inet_sock *inet = inet_sk(sk);
struct flowi *fl = &inet->cork.fl;
int err = 0;
+ int is_udplite = IS_UDPLITE(sk);
__wsum csum = 0;
/* Grab the skbuff where UDP header space exists. */
uh->len = htons(up->len);
uh->check = 0;
- if (up->pcflag)
+ if (is_udplite)
csum = udplite_csum_outgoing(sk, skb);
else
csum = udp_csum_outgoing(sk, skb);
up->len = 0;
up->pending = 0;
if (!err)
- UDP6_INC_STATS_USER(UDP_MIB_OUTDATAGRAMS, up->pcflag);
+ UDP6_INC_STATS_USER(UDP_MIB_OUTDATAGRAMS, is_udplite);
return err;
}
int corkreq = up->corkflag || msg->msg_flags&MSG_MORE;
int err;
int connected = 0;
- int is_udplite = up->pcflag;
+ int is_udplite = IS_UDPLITE(sk);
int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
/* destination address check */
if (final_p)
ipv6_addr_copy(&fl.fl6_dst, final_p);
- if ((err = __xfrm_lookup(&dst, &fl, sk, 1)) < 0) {
+ if ((err = __xfrm_lookup(&dst, &fl, sk, XFRM_LOOKUP_WAIT)) < 0) {
if (err == -EREMOTE)
err = ip6_dst_blackhole(sk, &dst, &fl);
if (err < 0)
else
hlimit = np->hop_limit;
if (hlimit < 0)
- hlimit = dst_metric(dst, RTAX_HOPLIMIT);
- if (hlimit < 0)
- hlimit = ipv6_get_hoplimit(dst->dev);
+ hlimit = ip6_dst_hoplimit(dst);
}
if (tclass < 0) {
.seq_fops = &udp6_seq_fops,
};
-int __init udp6_proc_init(void)
+int udp6_proc_init(struct net *net)
{
- return udp_proc_register(&udp6_seq_afinfo);
+ return udp_proc_register(net, &udp6_seq_afinfo);
}
-void udp6_proc_exit(void) {
- udp_proc_unregister(&udp6_seq_afinfo);
+void udp6_proc_exit(struct net *net) {
+ udp_proc_unregister(net, &udp6_seq_afinfo);
}
#endif /* CONFIG_PROC_FS */
/* ------------------------------------------------------------------------ */
+DEFINE_PROTO_INUSE(udpv6)
+
struct proto udpv6_prot = {
.name = "UDPv6",
.owner = THIS_MODULE,
.hash = udp_lib_hash,
.unhash = udp_lib_unhash,
.get_port = udp_v6_get_port,
+ .memory_allocated = &udp_memory_allocated,
+ .sysctl_mem = sysctl_udp_mem,
+ .sysctl_wmem = &sysctl_udp_wmem_min,
+ .sysctl_rmem = &sysctl_udp_rmem_min,
.obj_size = sizeof(struct udp6_sock),
+ .h.udp_hash = udp_hash,
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_udpv6_setsockopt,
.compat_getsockopt = compat_udpv6_getsockopt,
#endif
+ REF_PROTO_INUSE(udpv6)
};
static struct inet_protosw udpv6_protosw = {
};
-void __init udpv6_init(void)
+int __init udpv6_init(void)
+{
+ int ret;
+
+ ret = inet6_add_protocol(&udpv6_protocol, IPPROTO_UDP);
+ if (ret)
+ goto out;
+
+ ret = inet6_register_protosw(&udpv6_protosw);
+ if (ret)
+ goto out_udpv6_protocol;
+out:
+ return ret;
+
+out_udpv6_protocol:
+ inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP);
+ goto out;
+}
+
+void udpv6_exit(void)
{
- if (inet6_add_protocol(&udpv6_protocol, IPPROTO_UDP) < 0)
- printk(KERN_ERR "udpv6_init: Could not register protocol\n");
- inet6_register_protosw(&udpv6_protosw);
+ inet6_unregister_protosw(&udpv6_protosw);
+ inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP);
}