2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * PACKET - implements raw packet sockets.
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Alan Cox, <gw4pts@gw4pts.ampr.org>
13 * Alan Cox : verify_area() now used correctly
14 * Alan Cox : new skbuff lists, look ma no backlogs!
15 * Alan Cox : tidied skbuff lists.
16 * Alan Cox : Now uses generic datagram routines I
17 * added. Also fixed the peek/read crash
18 * from all old Linux datagram code.
19 * Alan Cox : Uses the improved datagram code.
20 * Alan Cox : Added NULL's for socket options.
21 * Alan Cox : Re-commented the code.
22 * Alan Cox : Use new kernel side addressing
23 * Rob Janssen : Correct MTU usage.
24 * Dave Platt : Counter leaks caused by incorrect
25 * interrupt locking and some slightly
26 * dubious gcc output. Can you read
27 * compiler: it said _VOLATILE_
28 * Richard Kooijman : Timestamp fixes.
29 * Alan Cox : New buffers. Use sk->mac.raw.
30 * Alan Cox : sendmsg/recvmsg support.
31 * Alan Cox : Protocol setting support
32 * Alexey Kuznetsov : Untied from IPv4 stack.
33 * Cyrus Durgin : Fixed kerneld for kmod.
34 * Michal Ostrowski : Module initialization cleanup.
35 * Ulises Alonso : Frame number limit removal and
36 * packet_set_ring memory leak.
37 * Eric Biederman : Allow for > 8 byte hardware addresses.
38 * The convention is that longer addresses
39 * will simply extend the hardware address
40 * byte arrays at the end of sockaddr_ll
43 * This program is free software; you can redistribute it and/or
44 * modify it under the terms of the GNU General Public License
45 * as published by the Free Software Foundation; either version
46 * 2 of the License, or (at your option) any later version.
50 #include <linux/types.h>
52 #include <linux/capability.h>
53 #include <linux/fcntl.h>
54 #include <linux/socket.h>
56 #include <linux/inet.h>
57 #include <linux/netdevice.h>
58 #include <linux/if_packet.h>
59 #include <linux/wireless.h>
60 #include <linux/kernel.h>
61 #include <linux/kmod.h>
62 #include <net/net_namespace.h>
64 #include <net/protocol.h>
65 #include <linux/skbuff.h>
67 #include <linux/errno.h>
68 #include <linux/timer.h>
69 #include <asm/system.h>
70 #include <asm/uaccess.h>
71 #include <asm/ioctls.h>
73 #include <asm/cacheflush.h>
75 #include <linux/proc_fs.h>
76 #include <linux/seq_file.h>
77 #include <linux/poll.h>
78 #include <linux/module.h>
79 #include <linux/init.h>
82 #include <net/inet_common.h>
87 - if device has no dev->hard_header routine, it adds and removes ll header
88 inside itself. In this case ll header is invisible outside of device,
89 but higher levels still should reserve dev->hard_header_len.
90 Some devices are enough clever to reallocate skb, when header
91 will not fit to reserved space (tunnel), another ones are silly
93 - packet socket receives packets with pulled ll header,
94 so that SOCK_RAW should push it back.
99 Incoming, dev->hard_header!=NULL
100 mac_header -> ll header
103 Outgoing, dev->hard_header!=NULL
104 mac_header -> ll header
107 Incoming, dev->hard_header==NULL
108 mac_header -> UNKNOWN position. It is very likely, that it points to ll
109 header. PPP makes it, that is wrong, because introduce
110 assymetry between rx and tx paths.
113 Outgoing, dev->hard_header==NULL
114 mac_header -> data. ll header is still not built!
118 If dev->hard_header==NULL we are unlikely to restore sensible ll header.
124 dev->hard_header != NULL
125 mac_header -> ll header
128 dev->hard_header == NULL (ll header is added by device, we cannot control it)
132 We should set nh.raw on output to correct posistion,
133 packet classifier depends on it.
136 /* Private packet socket structures. */
140 struct packet_mclist *next;
145 unsigned char addr[MAX_ADDR_LEN];
147 /* identical to struct packet_mreq except it has
148 * a longer address field.
150 struct packet_mreq_max
153 unsigned short mr_type;
154 unsigned short mr_alen;
155 unsigned char mr_address[MAX_ADDR_LEN];
158 #ifdef CONFIG_PACKET_MMAP
159 static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing);
162 static void packet_flush_mclist(struct sock *sk);
165 /* struct sock has to be the first member of packet_sock */
167 struct tpacket_stats stats;
168 #ifdef CONFIG_PACKET_MMAP
171 unsigned int frames_per_block;
172 unsigned int frame_size;
173 unsigned int frame_max;
176 struct packet_type prot_hook;
177 spinlock_t bind_lock;
178 unsigned int running:1, /* prot_hook is attached*/
181 int ifindex; /* bound device */
183 struct packet_mclist *mclist;
184 #ifdef CONFIG_PACKET_MMAP
186 unsigned int pg_vec_order;
187 unsigned int pg_vec_pages;
188 unsigned int pg_vec_len;
192 struct packet_skb_cb {
193 unsigned int origlen;
195 struct sockaddr_pkt pkt;
196 struct sockaddr_ll ll;
200 #define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb))
202 #ifdef CONFIG_PACKET_MMAP
204 static inline struct tpacket_hdr *packet_lookup_frame(struct packet_sock *po, unsigned int position)
206 unsigned int pg_vec_pos, frame_offset;
208 pg_vec_pos = position / po->frames_per_block;
209 frame_offset = position % po->frames_per_block;
211 return (struct tpacket_hdr *)(po->pg_vec[pg_vec_pos] + (frame_offset * po->frame_size));
215 static inline struct packet_sock *pkt_sk(struct sock *sk)
217 return (struct packet_sock *)sk;
220 static void packet_sock_destruct(struct sock *sk)
222 BUG_TRAP(!atomic_read(&sk->sk_rmem_alloc));
223 BUG_TRAP(!atomic_read(&sk->sk_wmem_alloc));
225 if (!sock_flag(sk, SOCK_DEAD)) {
226 printk("Attempt to release alive packet socket: %p\n", sk);
230 sk_refcnt_debug_dec(sk);
234 static const struct proto_ops packet_ops;
236 static const struct proto_ops packet_ops_spkt;
238 static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
241 struct sockaddr_pkt *spkt;
244 * When we registered the protocol we saved the socket in the data
245 * field for just this event.
248 sk = pt->af_packet_priv;
251 * Yank back the headers [hope the device set this
252 * right or kerboom...]
254 * Incoming packets have ll header pulled,
257 * For outgoing ones skb->data == skb_mac_header(skb)
258 * so that this procedure is noop.
261 if (skb->pkt_type == PACKET_LOOPBACK)
264 if (dev_net(dev) != sock_net(sk))
267 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
270 /* drop any routing info */
271 dst_release(skb->dst);
274 /* drop conntrack reference */
277 spkt = &PACKET_SKB_CB(skb)->sa.pkt;
279 skb_push(skb, skb->data - skb_mac_header(skb));
282 * The SOCK_PACKET socket receives _all_ frames.
285 spkt->spkt_family = dev->type;
286 strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device));
287 spkt->spkt_protocol = skb->protocol;
290 * Charge the memory to the socket. This is done specifically
291 * to prevent sockets using all the memory up.
294 if (sock_queue_rcv_skb(sk,skb) == 0)
305 * Output a raw packet to a device layer. This bypasses all the other
306 * protocol layers and you must therefore supply it with a complete frame
309 static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
310 struct msghdr *msg, size_t len)
312 struct sock *sk = sock->sk;
313 struct sockaddr_pkt *saddr=(struct sockaddr_pkt *)msg->msg_name;
315 struct net_device *dev;
320 * Get and verify the address.
325 if (msg->msg_namelen < sizeof(struct sockaddr))
327 if (msg->msg_namelen==sizeof(struct sockaddr_pkt))
328 proto=saddr->spkt_protocol;
331 return(-ENOTCONN); /* SOCK_PACKET must be sent giving an address */
334 * Find the device first to size check it
337 saddr->spkt_device[13] = 0;
338 dev = dev_get_by_name(sock_net(sk), saddr->spkt_device);
344 if (!(dev->flags & IFF_UP))
348 * You may not queue a frame bigger than the mtu. This is the lowest level
349 * raw protocol and you must do your own fragmentation at this level.
353 if (len > dev->mtu + dev->hard_header_len)
357 skb = sock_wmalloc(sk, len + LL_RESERVED_SPACE(dev), 0, GFP_KERNEL);
360 * If the write buffer is full, then tough. At this level the user gets to
361 * deal with the problem - do your own algorithmic backoffs. That's far
372 /* FIXME: Save some space for broken drivers that write a
373 * hard header at transmission time by themselves. PPP is the
374 * notable one here. This should really be fixed at the driver level.
376 skb_reserve(skb, LL_RESERVED_SPACE(dev));
377 skb_reset_network_header(skb);
379 /* Try to align data part correctly */
380 if (dev->header_ops) {
381 skb->data -= dev->hard_header_len;
382 skb->tail -= dev->hard_header_len;
383 if (len < dev->hard_header_len)
384 skb_reset_network_header(skb);
387 /* Returns -EFAULT on error */
388 err = memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len);
389 skb->protocol = proto;
391 skb->priority = sk->sk_priority;
411 static inline unsigned int run_filter(struct sk_buff *skb, struct sock *sk,
414 struct sk_filter *filter;
417 filter = rcu_dereference(sk->sk_filter);
419 res = sk_run_filter(skb, filter->insns, filter->len);
420 rcu_read_unlock_bh();
426 This function makes lazy skb cloning in hope that most of packets
427 are discarded by BPF.
429 Note tricky part: we DO mangle shared skb! skb->data, skb->len
430 and skb->cb are mangled. It works because (and until) packets
431 falling here are owned by current CPU. Output packets are cloned
432 by dev_queue_xmit_nit(), input packets are processed by net_bh
433 sequencially, so that if we return skb to original state on exit,
434 we will not harm anyone.
437 static int packet_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
440 struct sockaddr_ll *sll;
441 struct packet_sock *po;
442 u8 * skb_head = skb->data;
443 int skb_len = skb->len;
444 unsigned int snaplen, res;
446 if (skb->pkt_type == PACKET_LOOPBACK)
449 sk = pt->af_packet_priv;
452 if (dev_net(dev) != sock_net(sk))
457 if (dev->header_ops) {
458 /* The device has an explicit notion of ll header,
459 exported to higher levels.
461 Otherwise, the device hides datails of it frame
462 structure, so that corresponding packet head
463 never delivered to user.
465 if (sk->sk_type != SOCK_DGRAM)
466 skb_push(skb, skb->data - skb_mac_header(skb));
467 else if (skb->pkt_type == PACKET_OUTGOING) {
468 /* Special case: outgoing packets have ll header at head */
469 skb_pull(skb, skb_network_offset(skb));
475 res = run_filter(skb, sk, snaplen);
481 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
482 (unsigned)sk->sk_rcvbuf)
485 if (skb_shared(skb)) {
486 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
490 if (skb_head != skb->data) {
491 skb->data = skb_head;
498 BUILD_BUG_ON(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8 >
501 sll = &PACKET_SKB_CB(skb)->sa.ll;
502 sll->sll_family = AF_PACKET;
503 sll->sll_hatype = dev->type;
504 sll->sll_protocol = skb->protocol;
505 sll->sll_pkttype = skb->pkt_type;
506 if (unlikely(po->origdev))
507 sll->sll_ifindex = orig_dev->ifindex;
509 sll->sll_ifindex = dev->ifindex;
511 sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
513 PACKET_SKB_CB(skb)->origlen = skb->len;
515 if (pskb_trim(skb, snaplen))
518 skb_set_owner_r(skb, sk);
520 dst_release(skb->dst);
523 /* drop conntrack reference */
526 spin_lock(&sk->sk_receive_queue.lock);
527 po->stats.tp_packets++;
528 __skb_queue_tail(&sk->sk_receive_queue, skb);
529 spin_unlock(&sk->sk_receive_queue.lock);
530 sk->sk_data_ready(sk, skb->len);
534 spin_lock(&sk->sk_receive_queue.lock);
535 po->stats.tp_drops++;
536 spin_unlock(&sk->sk_receive_queue.lock);
539 if (skb_head != skb->data && skb_shared(skb)) {
540 skb->data = skb_head;
548 #ifdef CONFIG_PACKET_MMAP
549 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
552 struct packet_sock *po;
553 struct sockaddr_ll *sll;
554 struct tpacket_hdr *h;
555 u8 * skb_head = skb->data;
556 int skb_len = skb->len;
557 unsigned int snaplen, res;
558 unsigned long status = TP_STATUS_LOSING|TP_STATUS_USER;
559 unsigned short macoff, netoff;
560 struct sk_buff *copy_skb = NULL;
563 if (skb->pkt_type == PACKET_LOOPBACK)
566 sk = pt->af_packet_priv;
569 if (dev_net(dev) != sock_net(sk))
572 if (dev->header_ops) {
573 if (sk->sk_type != SOCK_DGRAM)
574 skb_push(skb, skb->data - skb_mac_header(skb));
575 else if (skb->pkt_type == PACKET_OUTGOING) {
576 /* Special case: outgoing packets have ll header at head */
577 skb_pull(skb, skb_network_offset(skb));
581 if (skb->ip_summed == CHECKSUM_PARTIAL)
582 status |= TP_STATUS_CSUMNOTREADY;
586 res = run_filter(skb, sk, snaplen);
592 if (sk->sk_type == SOCK_DGRAM) {
593 macoff = netoff = TPACKET_ALIGN(TPACKET_HDRLEN) + 16;
595 unsigned maclen = skb_network_offset(skb);
596 netoff = TPACKET_ALIGN(TPACKET_HDRLEN + (maclen < 16 ? 16 : maclen));
597 macoff = netoff - maclen;
600 if (macoff + snaplen > po->frame_size) {
601 if (po->copy_thresh &&
602 atomic_read(&sk->sk_rmem_alloc) + skb->truesize <
603 (unsigned)sk->sk_rcvbuf) {
604 if (skb_shared(skb)) {
605 copy_skb = skb_clone(skb, GFP_ATOMIC);
607 copy_skb = skb_get(skb);
608 skb_head = skb->data;
611 skb_set_owner_r(copy_skb, sk);
613 snaplen = po->frame_size - macoff;
614 if ((int)snaplen < 0)
618 spin_lock(&sk->sk_receive_queue.lock);
619 h = packet_lookup_frame(po, po->head);
623 po->head = po->head != po->frame_max ? po->head+1 : 0;
624 po->stats.tp_packets++;
626 status |= TP_STATUS_COPY;
627 __skb_queue_tail(&sk->sk_receive_queue, copy_skb);
629 if (!po->stats.tp_drops)
630 status &= ~TP_STATUS_LOSING;
631 spin_unlock(&sk->sk_receive_queue.lock);
633 skb_copy_bits(skb, 0, (u8*)h + macoff, snaplen);
635 h->tp_len = skb->len;
636 h->tp_snaplen = snaplen;
639 if (skb->tstamp.tv64)
640 tv = ktime_to_timeval(skb->tstamp);
642 do_gettimeofday(&tv);
643 h->tp_sec = tv.tv_sec;
644 h->tp_usec = tv.tv_usec;
646 sll = (struct sockaddr_ll*)((u8*)h + TPACKET_ALIGN(sizeof(*h)));
647 sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
648 sll->sll_family = AF_PACKET;
649 sll->sll_hatype = dev->type;
650 sll->sll_protocol = skb->protocol;
651 sll->sll_pkttype = skb->pkt_type;
652 if (unlikely(po->origdev))
653 sll->sll_ifindex = orig_dev->ifindex;
655 sll->sll_ifindex = dev->ifindex;
657 h->tp_status = status;
661 struct page *p_start, *p_end;
662 u8 *h_end = (u8 *)h + macoff + snaplen - 1;
664 p_start = virt_to_page(h);
665 p_end = virt_to_page(h_end);
666 while (p_start <= p_end) {
667 flush_dcache_page(p_start);
672 sk->sk_data_ready(sk, 0);
675 if (skb_head != skb->data && skb_shared(skb)) {
676 skb->data = skb_head;
684 po->stats.tp_drops++;
685 spin_unlock(&sk->sk_receive_queue.lock);
687 sk->sk_data_ready(sk, 0);
696 static int packet_sendmsg(struct kiocb *iocb, struct socket *sock,
697 struct msghdr *msg, size_t len)
699 struct sock *sk = sock->sk;
700 struct sockaddr_ll *saddr=(struct sockaddr_ll *)msg->msg_name;
702 struct net_device *dev;
705 int ifindex, err, reserve = 0;
708 * Get and verify the address.
712 struct packet_sock *po = pkt_sk(sk);
714 ifindex = po->ifindex;
719 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
721 if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
723 ifindex = saddr->sll_ifindex;
724 proto = saddr->sll_protocol;
725 addr = saddr->sll_addr;
729 dev = dev_get_by_index(sock_net(sk), ifindex);
733 if (sock->type == SOCK_RAW)
734 reserve = dev->hard_header_len;
737 if (!(dev->flags & IFF_UP))
741 if (len > dev->mtu+reserve)
744 skb = sock_alloc_send_skb(sk, len + LL_ALLOCATED_SPACE(dev),
745 msg->msg_flags & MSG_DONTWAIT, &err);
749 skb_reserve(skb, LL_RESERVED_SPACE(dev));
750 skb_reset_network_header(skb);
753 if (sock->type == SOCK_DGRAM &&
754 dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len) < 0)
757 /* Returns -EFAULT on error */
758 err = memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len);
762 skb->protocol = proto;
764 skb->priority = sk->sk_priority;
770 err = dev_queue_xmit(skb);
771 if (err > 0 && (err = net_xmit_errno(err)) != 0)
788 * Close a PACKET socket. This is fairly simple. We immediately go
789 * to 'closed' state and remove our protocol entry in the device list.
792 static int packet_release(struct socket *sock)
794 struct sock *sk = sock->sk;
795 struct packet_sock *po;
804 write_lock_bh(&net->packet.sklist_lock);
805 sk_del_node_init(sk);
806 write_unlock_bh(&net->packet.sklist_lock);
809 * Unhook packet receive handler.
814 * Remove the protocol hook
816 dev_remove_pack(&po->prot_hook);
822 packet_flush_mclist(sk);
824 #ifdef CONFIG_PACKET_MMAP
826 struct tpacket_req req;
827 memset(&req, 0, sizeof(req));
828 packet_set_ring(sk, &req, 1);
833 * Now the socket is dead. No more input will appear.
841 skb_queue_purge(&sk->sk_receive_queue);
842 sk_refcnt_debug_release(sk);
849 * Attach a packet hook.
852 static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 protocol)
854 struct packet_sock *po = pkt_sk(sk);
856 * Detach an existing hook if present.
861 spin_lock(&po->bind_lock);
866 spin_unlock(&po->bind_lock);
867 dev_remove_pack(&po->prot_hook);
868 spin_lock(&po->bind_lock);
872 po->prot_hook.type = protocol;
873 po->prot_hook.dev = dev;
875 po->ifindex = dev ? dev->ifindex : 0;
880 if (!dev || (dev->flags & IFF_UP)) {
881 dev_add_pack(&po->prot_hook);
885 sk->sk_err = ENETDOWN;
886 if (!sock_flag(sk, SOCK_DEAD))
887 sk->sk_error_report(sk);
891 spin_unlock(&po->bind_lock);
897 * Bind a packet socket to a device
900 static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr, int addr_len)
902 struct sock *sk=sock->sk;
904 struct net_device *dev;
911 if (addr_len != sizeof(struct sockaddr))
913 strlcpy(name,uaddr->sa_data,sizeof(name));
915 dev = dev_get_by_name(sock_net(sk), name);
917 err = packet_do_bind(sk, dev, pkt_sk(sk)->num);
923 static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
925 struct sockaddr_ll *sll = (struct sockaddr_ll*)uaddr;
926 struct sock *sk=sock->sk;
927 struct net_device *dev = NULL;
935 if (addr_len < sizeof(struct sockaddr_ll))
937 if (sll->sll_family != AF_PACKET)
940 if (sll->sll_ifindex) {
942 dev = dev_get_by_index(sock_net(sk), sll->sll_ifindex);
946 err = packet_do_bind(sk, dev, sll->sll_protocol ? : pkt_sk(sk)->num);
954 static struct proto packet_proto = {
956 .owner = THIS_MODULE,
957 .obj_size = sizeof(struct packet_sock),
961 * Create a packet of type SOCK_PACKET.
964 static int packet_create(struct net *net, struct socket *sock, int protocol)
967 struct packet_sock *po;
968 __be16 proto = (__force __be16)protocol; /* weird, but documented */
971 if (!capable(CAP_NET_RAW))
973 if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
974 sock->type != SOCK_PACKET)
975 return -ESOCKTNOSUPPORT;
977 sock->state = SS_UNCONNECTED;
980 sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto);
984 sock->ops = &packet_ops;
985 if (sock->type == SOCK_PACKET)
986 sock->ops = &packet_ops_spkt;
988 sock_init_data(sock, sk);
991 sk->sk_family = PF_PACKET;
994 sk->sk_destruct = packet_sock_destruct;
995 sk_refcnt_debug_inc(sk);
998 * Attach a protocol block
1001 spin_lock_init(&po->bind_lock);
1002 po->prot_hook.func = packet_rcv;
1004 if (sock->type == SOCK_PACKET)
1005 po->prot_hook.func = packet_rcv_spkt;
1007 po->prot_hook.af_packet_priv = sk;
1010 po->prot_hook.type = proto;
1011 dev_add_pack(&po->prot_hook);
1016 write_lock_bh(&net->packet.sklist_lock);
1017 sk_add_node(sk, &net->packet.sklist);
1018 write_unlock_bh(&net->packet.sklist_lock);
1025 * Pull a packet from our receive queue and hand it to the user.
1026 * If necessary we block.
1029 static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
1030 struct msghdr *msg, size_t len, int flags)
1032 struct sock *sk = sock->sk;
1033 struct sk_buff *skb;
1035 struct sockaddr_ll *sll;
1038 if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT))
1042 /* What error should we return now? EUNATTACH? */
1043 if (pkt_sk(sk)->ifindex < 0)
1048 * Call the generic datagram receiver. This handles all sorts
1049 * of horrible races and re-entrancy so we can forget about it
1050 * in the protocol layers.
1052 * Now it will return ENETDOWN, if device have just gone down,
1053 * but then it will block.
1056 skb=skb_recv_datagram(sk,flags,flags&MSG_DONTWAIT,&err);
1059 * An error occurred so return it. Because skb_recv_datagram()
1060 * handles the blocking we don't see and worry about blocking
1068 * If the address length field is there to be filled in, we fill
1072 sll = &PACKET_SKB_CB(skb)->sa.ll;
1073 if (sock->type == SOCK_PACKET)
1074 msg->msg_namelen = sizeof(struct sockaddr_pkt);
1076 msg->msg_namelen = sll->sll_halen + offsetof(struct sockaddr_ll, sll_addr);
1079 * You lose any data beyond the buffer you gave. If it worries a
1080 * user program they can ask the device for its MTU anyway.
1087 msg->msg_flags|=MSG_TRUNC;
1090 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
1094 sock_recv_timestamp(msg, sk, skb);
1097 memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa,
1100 if (pkt_sk(sk)->auxdata) {
1101 struct tpacket_auxdata aux;
1103 aux.tp_status = TP_STATUS_USER;
1104 if (skb->ip_summed == CHECKSUM_PARTIAL)
1105 aux.tp_status |= TP_STATUS_CSUMNOTREADY;
1106 aux.tp_len = PACKET_SKB_CB(skb)->origlen;
1107 aux.tp_snaplen = skb->len;
1109 aux.tp_net = skb_network_offset(skb);
1111 put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
1115 * Free or return the buffer as appropriate. Again this
1116 * hides all the races and re-entrancy issues from us.
1118 err = (flags&MSG_TRUNC) ? skb->len : copied;
1121 skb_free_datagram(sk, skb);
1126 static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
1127 int *uaddr_len, int peer)
1129 struct net_device *dev;
1130 struct sock *sk = sock->sk;
1135 uaddr->sa_family = AF_PACKET;
1136 dev = dev_get_by_index(sock_net(sk), pkt_sk(sk)->ifindex);
1138 strlcpy(uaddr->sa_data, dev->name, 15);
1141 memset(uaddr->sa_data, 0, 14);
1142 *uaddr_len = sizeof(*uaddr);
1147 static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
1148 int *uaddr_len, int peer)
1150 struct net_device *dev;
1151 struct sock *sk = sock->sk;
1152 struct packet_sock *po = pkt_sk(sk);
1153 struct sockaddr_ll *sll = (struct sockaddr_ll*)uaddr;
1158 sll->sll_family = AF_PACKET;
1159 sll->sll_ifindex = po->ifindex;
1160 sll->sll_protocol = po->num;
1161 dev = dev_get_by_index(sock_net(sk), po->ifindex);
1163 sll->sll_hatype = dev->type;
1164 sll->sll_halen = dev->addr_len;
1165 memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len);
1168 sll->sll_hatype = 0; /* Bad: we have no ARPHRD_UNSPEC */
1171 *uaddr_len = offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen;
1176 static void packet_dev_mc(struct net_device *dev, struct packet_mclist *i, int what)
1179 case PACKET_MR_MULTICAST:
1181 dev_mc_add(dev, i->addr, i->alen, 0);
1183 dev_mc_delete(dev, i->addr, i->alen, 0);
1185 case PACKET_MR_PROMISC:
1186 dev_set_promiscuity(dev, what);
1188 case PACKET_MR_ALLMULTI:
1189 dev_set_allmulti(dev, what);
1195 static void packet_dev_mclist(struct net_device *dev, struct packet_mclist *i, int what)
1197 for ( ; i; i=i->next) {
1198 if (i->ifindex == dev->ifindex)
1199 packet_dev_mc(dev, i, what);
1203 static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
1205 struct packet_sock *po = pkt_sk(sk);
1206 struct packet_mclist *ml, *i;
1207 struct net_device *dev;
1213 dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex);
1218 if (mreq->mr_alen > dev->addr_len)
1222 i = kmalloc(sizeof(*i), GFP_KERNEL);
1227 for (ml = po->mclist; ml; ml = ml->next) {
1228 if (ml->ifindex == mreq->mr_ifindex &&
1229 ml->type == mreq->mr_type &&
1230 ml->alen == mreq->mr_alen &&
1231 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
1233 /* Free the new element ... */
1239 i->type = mreq->mr_type;
1240 i->ifindex = mreq->mr_ifindex;
1241 i->alen = mreq->mr_alen;
1242 memcpy(i->addr, mreq->mr_address, i->alen);
1244 i->next = po->mclist;
1246 packet_dev_mc(dev, i, +1);
1253 static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
1255 struct packet_mclist *ml, **mlp;
1259 for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) {
1260 if (ml->ifindex == mreq->mr_ifindex &&
1261 ml->type == mreq->mr_type &&
1262 ml->alen == mreq->mr_alen &&
1263 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
1264 if (--ml->count == 0) {
1265 struct net_device *dev;
1267 dev = dev_get_by_index(sock_net(sk), ml->ifindex);
1269 packet_dev_mc(dev, ml, -1);
1279 return -EADDRNOTAVAIL;
1282 static void packet_flush_mclist(struct sock *sk)
1284 struct packet_sock *po = pkt_sk(sk);
1285 struct packet_mclist *ml;
1291 while ((ml = po->mclist) != NULL) {
1292 struct net_device *dev;
1294 po->mclist = ml->next;
1295 if ((dev = dev_get_by_index(sock_net(sk), ml->ifindex)) != NULL) {
1296 packet_dev_mc(dev, ml, -1);
1305 packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
1307 struct sock *sk = sock->sk;
1308 struct packet_sock *po = pkt_sk(sk);
1311 if (level != SOL_PACKET)
1312 return -ENOPROTOOPT;
1315 case PACKET_ADD_MEMBERSHIP:
1316 case PACKET_DROP_MEMBERSHIP:
1318 struct packet_mreq_max mreq;
1320 memset(&mreq, 0, sizeof(mreq));
1321 if (len < sizeof(struct packet_mreq))
1323 if (len > sizeof(mreq))
1325 if (copy_from_user(&mreq,optval,len))
1327 if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address)))
1329 if (optname == PACKET_ADD_MEMBERSHIP)
1330 ret = packet_mc_add(sk, &mreq);
1332 ret = packet_mc_drop(sk, &mreq);
1336 #ifdef CONFIG_PACKET_MMAP
1337 case PACKET_RX_RING:
1339 struct tpacket_req req;
1341 if (optlen<sizeof(req))
1343 if (copy_from_user(&req,optval,sizeof(req)))
1345 return packet_set_ring(sk, &req, 0);
1347 case PACKET_COPY_THRESH:
1351 if (optlen!=sizeof(val))
1353 if (copy_from_user(&val,optval,sizeof(val)))
1356 pkt_sk(sk)->copy_thresh = val;
1360 case PACKET_AUXDATA:
1364 if (optlen < sizeof(val))
1366 if (copy_from_user(&val, optval, sizeof(val)))
1369 po->auxdata = !!val;
1372 case PACKET_ORIGDEV:
1376 if (optlen < sizeof(val))
1378 if (copy_from_user(&val, optval, sizeof(val)))
1381 po->origdev = !!val;
1385 return -ENOPROTOOPT;
1389 static int packet_getsockopt(struct socket *sock, int level, int optname,
1390 char __user *optval, int __user *optlen)
1394 struct sock *sk = sock->sk;
1395 struct packet_sock *po = pkt_sk(sk);
1397 struct tpacket_stats st;
1399 if (level != SOL_PACKET)
1400 return -ENOPROTOOPT;
1402 if (get_user(len, optlen))
1409 case PACKET_STATISTICS:
1410 if (len > sizeof(struct tpacket_stats))
1411 len = sizeof(struct tpacket_stats);
1412 spin_lock_bh(&sk->sk_receive_queue.lock);
1414 memset(&po->stats, 0, sizeof(st));
1415 spin_unlock_bh(&sk->sk_receive_queue.lock);
1416 st.tp_packets += st.tp_drops;
1420 case PACKET_AUXDATA:
1421 if (len > sizeof(int))
1427 case PACKET_ORIGDEV:
1428 if (len > sizeof(int))
1435 return -ENOPROTOOPT;
1438 if (put_user(len, optlen))
1440 if (copy_to_user(optval, data, len))
1446 static int packet_notifier(struct notifier_block *this, unsigned long msg, void *data)
1449 struct hlist_node *node;
1450 struct net_device *dev = data;
1451 struct net *net = dev_net(dev);
1453 read_lock(&net->packet.sklist_lock);
1454 sk_for_each(sk, node, &net->packet.sklist) {
1455 struct packet_sock *po = pkt_sk(sk);
1458 case NETDEV_UNREGISTER:
1460 packet_dev_mclist(dev, po->mclist, -1);
1464 if (dev->ifindex == po->ifindex) {
1465 spin_lock(&po->bind_lock);
1467 __dev_remove_pack(&po->prot_hook);
1470 sk->sk_err = ENETDOWN;
1471 if (!sock_flag(sk, SOCK_DEAD))
1472 sk->sk_error_report(sk);
1474 if (msg == NETDEV_UNREGISTER) {
1476 po->prot_hook.dev = NULL;
1478 spin_unlock(&po->bind_lock);
1482 spin_lock(&po->bind_lock);
1483 if (dev->ifindex == po->ifindex && po->num &&
1485 dev_add_pack(&po->prot_hook);
1489 spin_unlock(&po->bind_lock);
1493 read_unlock(&net->packet.sklist_lock);
1498 static int packet_ioctl(struct socket *sock, unsigned int cmd,
1501 struct sock *sk = sock->sk;
1506 int amount = atomic_read(&sk->sk_wmem_alloc);
1507 return put_user(amount, (int __user *)arg);
1511 struct sk_buff *skb;
1514 spin_lock_bh(&sk->sk_receive_queue.lock);
1515 skb = skb_peek(&sk->sk_receive_queue);
1518 spin_unlock_bh(&sk->sk_receive_queue.lock);
1519 return put_user(amount, (int __user *)arg);
1522 return sock_get_timestamp(sk, (struct timeval __user *)arg);
1524 return sock_get_timestampns(sk, (struct timespec __user *)arg);
1534 case SIOCGIFBRDADDR:
1535 case SIOCSIFBRDADDR:
1536 case SIOCGIFNETMASK:
1537 case SIOCSIFNETMASK:
1538 case SIOCGIFDSTADDR:
1539 case SIOCSIFDSTADDR:
1541 if (sock_net(sk) != &init_net)
1542 return -ENOIOCTLCMD;
1543 return inet_dgram_ops.ioctl(sock, cmd, arg);
1547 return -ENOIOCTLCMD;
1552 #ifndef CONFIG_PACKET_MMAP
1553 #define packet_mmap sock_no_mmap
1554 #define packet_poll datagram_poll
1557 static unsigned int packet_poll(struct file * file, struct socket *sock,
1560 struct sock *sk = sock->sk;
1561 struct packet_sock *po = pkt_sk(sk);
1562 unsigned int mask = datagram_poll(file, sock, wait);
1564 spin_lock_bh(&sk->sk_receive_queue.lock);
1566 unsigned last = po->head ? po->head-1 : po->frame_max;
1567 struct tpacket_hdr *h;
1569 h = packet_lookup_frame(po, last);
1572 mask |= POLLIN | POLLRDNORM;
1574 spin_unlock_bh(&sk->sk_receive_queue.lock);
1579 /* Dirty? Well, I still did not learn better way to account
1583 static void packet_mm_open(struct vm_area_struct *vma)
1585 struct file *file = vma->vm_file;
1586 struct socket * sock = file->private_data;
1587 struct sock *sk = sock->sk;
1590 atomic_inc(&pkt_sk(sk)->mapped);
1593 static void packet_mm_close(struct vm_area_struct *vma)
1595 struct file *file = vma->vm_file;
1596 struct socket * sock = file->private_data;
1597 struct sock *sk = sock->sk;
1600 atomic_dec(&pkt_sk(sk)->mapped);
1603 static struct vm_operations_struct packet_mmap_ops = {
1604 .open = packet_mm_open,
1605 .close =packet_mm_close,
1608 static void free_pg_vec(char **pg_vec, unsigned int order, unsigned int len)
1612 for (i = 0; i < len; i++) {
1613 if (likely(pg_vec[i]))
1614 free_pages((unsigned long) pg_vec[i], order);
1619 static inline char *alloc_one_pg_vec_page(unsigned long order)
1621 return (char *) __get_free_pages(GFP_KERNEL | __GFP_COMP | __GFP_ZERO,
1625 static char **alloc_pg_vec(struct tpacket_req *req, int order)
1627 unsigned int block_nr = req->tp_block_nr;
1631 pg_vec = kzalloc(block_nr * sizeof(char *), GFP_KERNEL);
1632 if (unlikely(!pg_vec))
1635 for (i = 0; i < block_nr; i++) {
1636 pg_vec[i] = alloc_one_pg_vec_page(order);
1637 if (unlikely(!pg_vec[i]))
1638 goto out_free_pgvec;
1645 free_pg_vec(pg_vec, order, block_nr);
1650 static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing)
1652 char **pg_vec = NULL;
1653 struct packet_sock *po = pkt_sk(sk);
1654 int was_running, order = 0;
1658 if (req->tp_block_nr) {
1661 /* Sanity tests and some calculations */
1663 if (unlikely(po->pg_vec))
1666 if (unlikely((int)req->tp_block_size <= 0))
1668 if (unlikely(req->tp_block_size & (PAGE_SIZE - 1)))
1670 if (unlikely(req->tp_frame_size < TPACKET_HDRLEN))
1672 if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
1675 po->frames_per_block = req->tp_block_size/req->tp_frame_size;
1676 if (unlikely(po->frames_per_block <= 0))
1678 if (unlikely((po->frames_per_block * req->tp_block_nr) !=
1683 order = get_order(req->tp_block_size);
1684 pg_vec = alloc_pg_vec(req, order);
1685 if (unlikely(!pg_vec))
1688 for (i = 0; i < req->tp_block_nr; i++) {
1689 char *ptr = pg_vec[i];
1690 struct tpacket_hdr *header;
1693 for (k = 0; k < po->frames_per_block; k++) {
1694 header = (struct tpacket_hdr *) ptr;
1695 header->tp_status = TP_STATUS_KERNEL;
1696 ptr += req->tp_frame_size;
1701 if (unlikely(req->tp_frame_nr))
1707 /* Detach socket from network */
1708 spin_lock(&po->bind_lock);
1709 was_running = po->running;
1712 __dev_remove_pack(&po->prot_hook);
1717 spin_unlock(&po->bind_lock);
1722 if (closing || atomic_read(&po->mapped) == 0) {
1724 #define XC(a, b) ({ __typeof__ ((a)) __t; __t = (a); (a) = (b); __t; })
1726 spin_lock_bh(&sk->sk_receive_queue.lock);
1727 pg_vec = XC(po->pg_vec, pg_vec);
1728 po->frame_max = (req->tp_frame_nr - 1);
1730 po->frame_size = req->tp_frame_size;
1731 spin_unlock_bh(&sk->sk_receive_queue.lock);
1733 order = XC(po->pg_vec_order, order);
1734 req->tp_block_nr = XC(po->pg_vec_len, req->tp_block_nr);
1736 po->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
1737 po->prot_hook.func = po->pg_vec ? tpacket_rcv : packet_rcv;
1738 skb_queue_purge(&sk->sk_receive_queue);
1740 if (atomic_read(&po->mapped))
1741 printk(KERN_DEBUG "packet_mmap: vma is busy: %d\n", atomic_read(&po->mapped));
1744 spin_lock(&po->bind_lock);
1745 if (was_running && !po->running) {
1749 dev_add_pack(&po->prot_hook);
1751 spin_unlock(&po->bind_lock);
1756 free_pg_vec(pg_vec, order, req->tp_block_nr);
1761 static int packet_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
1763 struct sock *sk = sock->sk;
1764 struct packet_sock *po = pkt_sk(sk);
1766 unsigned long start;
1773 size = vma->vm_end - vma->vm_start;
1776 if (po->pg_vec == NULL)
1778 if (size != po->pg_vec_len*po->pg_vec_pages*PAGE_SIZE)
1781 start = vma->vm_start;
1782 for (i = 0; i < po->pg_vec_len; i++) {
1783 struct page *page = virt_to_page(po->pg_vec[i]);
1786 for (pg_num = 0; pg_num < po->pg_vec_pages; pg_num++, page++) {
1787 err = vm_insert_page(vma, start, page);
1793 atomic_inc(&po->mapped);
1794 vma->vm_ops = &packet_mmap_ops;
1804 static const struct proto_ops packet_ops_spkt = {
1805 .family = PF_PACKET,
1806 .owner = THIS_MODULE,
1807 .release = packet_release,
1808 .bind = packet_bind_spkt,
1809 .connect = sock_no_connect,
1810 .socketpair = sock_no_socketpair,
1811 .accept = sock_no_accept,
1812 .getname = packet_getname_spkt,
1813 .poll = datagram_poll,
1814 .ioctl = packet_ioctl,
1815 .listen = sock_no_listen,
1816 .shutdown = sock_no_shutdown,
1817 .setsockopt = sock_no_setsockopt,
1818 .getsockopt = sock_no_getsockopt,
1819 .sendmsg = packet_sendmsg_spkt,
1820 .recvmsg = packet_recvmsg,
1821 .mmap = sock_no_mmap,
1822 .sendpage = sock_no_sendpage,
1825 static const struct proto_ops packet_ops = {
1826 .family = PF_PACKET,
1827 .owner = THIS_MODULE,
1828 .release = packet_release,
1829 .bind = packet_bind,
1830 .connect = sock_no_connect,
1831 .socketpair = sock_no_socketpair,
1832 .accept = sock_no_accept,
1833 .getname = packet_getname,
1834 .poll = packet_poll,
1835 .ioctl = packet_ioctl,
1836 .listen = sock_no_listen,
1837 .shutdown = sock_no_shutdown,
1838 .setsockopt = packet_setsockopt,
1839 .getsockopt = packet_getsockopt,
1840 .sendmsg = packet_sendmsg,
1841 .recvmsg = packet_recvmsg,
1842 .mmap = packet_mmap,
1843 .sendpage = sock_no_sendpage,
1846 static struct net_proto_family packet_family_ops = {
1847 .family = PF_PACKET,
1848 .create = packet_create,
1849 .owner = THIS_MODULE,
1852 static struct notifier_block packet_netdev_notifier = {
1853 .notifier_call =packet_notifier,
1856 #ifdef CONFIG_PROC_FS
1857 static inline struct sock *packet_seq_idx(struct net *net, loff_t off)
1860 struct hlist_node *node;
1862 sk_for_each(s, node, &net->packet.sklist) {
1869 static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
1870 __acquires(seq_file_net(seq)->packet.sklist_lock)
1872 struct net *net = seq_file_net(seq);
1873 read_lock(&net->packet.sklist_lock);
1874 return *pos ? packet_seq_idx(net, *pos - 1) : SEQ_START_TOKEN;
1877 static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1879 struct net *net = seq_file_net(seq);
1881 return (v == SEQ_START_TOKEN)
1882 ? sk_head(&net->packet.sklist)
1883 : sk_next((struct sock*)v) ;
1886 static void packet_seq_stop(struct seq_file *seq, void *v)
1887 __releases(seq_file_net(seq)->packet.sklist_lock)
1889 struct net *net = seq_file_net(seq);
1890 read_unlock(&net->packet.sklist_lock);
1893 static int packet_seq_show(struct seq_file *seq, void *v)
1895 if (v == SEQ_START_TOKEN)
1896 seq_puts(seq, "sk RefCnt Type Proto Iface R Rmem User Inode\n");
1899 const struct packet_sock *po = pkt_sk(s);
1902 "%p %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
1904 atomic_read(&s->sk_refcnt),
1909 atomic_read(&s->sk_rmem_alloc),
1917 static const struct seq_operations packet_seq_ops = {
1918 .start = packet_seq_start,
1919 .next = packet_seq_next,
1920 .stop = packet_seq_stop,
1921 .show = packet_seq_show,
1924 static int packet_seq_open(struct inode *inode, struct file *file)
1926 return seq_open_net(inode, file, &packet_seq_ops,
1927 sizeof(struct seq_net_private));
1930 static const struct file_operations packet_seq_fops = {
1931 .owner = THIS_MODULE,
1932 .open = packet_seq_open,
1934 .llseek = seq_lseek,
1935 .release = seq_release_net,
1940 static int packet_net_init(struct net *net)
1942 rwlock_init(&net->packet.sklist_lock);
1943 INIT_HLIST_HEAD(&net->packet.sklist);
1945 if (!proc_net_fops_create(net, "packet", 0, &packet_seq_fops))
1951 static void packet_net_exit(struct net *net)
1953 proc_net_remove(net, "packet");
1956 static struct pernet_operations packet_net_ops = {
1957 .init = packet_net_init,
1958 .exit = packet_net_exit,
1962 static void __exit packet_exit(void)
1964 unregister_netdevice_notifier(&packet_netdev_notifier);
1965 unregister_pernet_subsys(&packet_net_ops);
1966 sock_unregister(PF_PACKET);
1967 proto_unregister(&packet_proto);
1970 static int __init packet_init(void)
1972 int rc = proto_register(&packet_proto, 0);
1977 sock_register(&packet_family_ops);
1978 register_pernet_subsys(&packet_net_ops);
1979 register_netdevice_notifier(&packet_netdev_notifier);
1984 module_init(packet_init);
1985 module_exit(packet_exit);
1986 MODULE_LICENSE("GPL");
1987 MODULE_ALIAS_NETPROTO(PF_PACKET);