2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * PACKET - implements raw packet sockets.
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Alan Cox, <gw4pts@gw4pts.ampr.org>
13 * Alan Cox : verify_area() now used correctly
14 * Alan Cox : new skbuff lists, look ma no backlogs!
15 * Alan Cox : tidied skbuff lists.
16 * Alan Cox : Now uses generic datagram routines I
17 * added. Also fixed the peek/read crash
18 * from all old Linux datagram code.
19 * Alan Cox : Uses the improved datagram code.
20 * Alan Cox : Added NULL's for socket options.
21 * Alan Cox : Re-commented the code.
22 * Alan Cox : Use new kernel side addressing
23 * Rob Janssen : Correct MTU usage.
24 * Dave Platt : Counter leaks caused by incorrect
25 * interrupt locking and some slightly
26 * dubious gcc output. Can you read
27 * compiler: it said _VOLATILE_
28 * Richard Kooijman : Timestamp fixes.
29 * Alan Cox : New buffers. Use sk->mac.raw.
30 * Alan Cox : sendmsg/recvmsg support.
31 * Alan Cox : Protocol setting support
32 * Alexey Kuznetsov : Untied from IPv4 stack.
33 * Cyrus Durgin : Fixed kerneld for kmod.
34 * Michal Ostrowski : Module initialization cleanup.
35 * Ulises Alonso : Frame number limit removal and
36 * packet_set_ring memory leak.
37 * Eric Biederman : Allow for > 8 byte hardware addresses.
38 * The convention is that longer addresses
39 * will simply extend the hardware address
40 * byte arrays at the end of sockaddr_ll
42 * Johann Baudy : Added TX RING.
44 * This program is free software; you can redistribute it and/or
45 * modify it under the terms of the GNU General Public License
46 * as published by the Free Software Foundation; either version
47 * 2 of the License, or (at your option) any later version.
51 #include <linux/types.h>
53 #include <linux/capability.h>
54 #include <linux/fcntl.h>
55 #include <linux/socket.h>
57 #include <linux/inet.h>
58 #include <linux/netdevice.h>
59 #include <linux/if_packet.h>
60 #include <linux/wireless.h>
61 #include <linux/kernel.h>
62 #include <linux/kmod.h>
63 #include <net/net_namespace.h>
65 #include <net/protocol.h>
66 #include <linux/skbuff.h>
68 #include <linux/errno.h>
69 #include <linux/timer.h>
70 #include <asm/system.h>
71 #include <asm/uaccess.h>
72 #include <asm/ioctls.h>
74 #include <asm/cacheflush.h>
76 #include <linux/proc_fs.h>
77 #include <linux/seq_file.h>
78 #include <linux/poll.h>
79 #include <linux/module.h>
80 #include <linux/init.h>
81 #include <linux/mutex.h>
84 #include <net/inet_common.h>
89 - if device has no dev->hard_header routine, it adds and removes ll header
90 inside itself. In this case ll header is invisible outside of device,
91 but higher levels still should reserve dev->hard_header_len.
92 Some devices are enough clever to reallocate skb, when header
93 will not fit to reserved space (tunnel), another ones are silly
95 - packet socket receives packets with pulled ll header,
96 so that SOCK_RAW should push it back.
101 Incoming, dev->hard_header!=NULL
102 mac_header -> ll header
105 Outgoing, dev->hard_header!=NULL
106 mac_header -> ll header
109 Incoming, dev->hard_header==NULL
110 mac_header -> UNKNOWN position. It is very likely, that it points to ll
111 header. PPP makes it, that is wrong, because introduce
112 assymetry between rx and tx paths.
115 Outgoing, dev->hard_header==NULL
116 mac_header -> data. ll header is still not built!
120 If dev->hard_header==NULL we are unlikely to restore sensible ll header.
126 dev->hard_header != NULL
127 mac_header -> ll header
130 dev->hard_header == NULL (ll header is added by device, we cannot control it)
134 We should set nh.raw on output to correct posistion,
135 packet classifier depends on it.
138 /* Private packet socket structures. */
142 struct packet_mclist *next;
147 unsigned char addr[MAX_ADDR_LEN];
149 /* identical to struct packet_mreq except it has
150 * a longer address field.
152 struct packet_mreq_max
155 unsigned short mr_type;
156 unsigned short mr_alen;
157 unsigned char mr_address[MAX_ADDR_LEN];
160 #ifdef CONFIG_PACKET_MMAP
161 static int packet_set_ring(struct sock *sk, struct tpacket_req *req,
162 int closing, int tx_ring);
164 struct packet_ring_buffer {
167 unsigned int frames_per_block;
168 unsigned int frame_size;
169 unsigned int frame_max;
171 unsigned int pg_vec_order;
172 unsigned int pg_vec_pages;
173 unsigned int pg_vec_len;
179 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg);
182 static void packet_flush_mclist(struct sock *sk);
185 /* struct sock has to be the first member of packet_sock */
187 struct tpacket_stats stats;
188 #ifdef CONFIG_PACKET_MMAP
189 struct packet_ring_buffer rx_ring;
190 struct packet_ring_buffer tx_ring;
193 struct packet_type prot_hook;
194 spinlock_t bind_lock;
195 struct mutex pg_vec_lock;
196 unsigned int running:1, /* prot_hook is attached*/
199 int ifindex; /* bound device */
201 struct packet_mclist *mclist;
202 #ifdef CONFIG_PACKET_MMAP
204 enum tpacket_versions tp_version;
205 unsigned int tp_hdrlen;
206 unsigned int tp_reserve;
207 unsigned int tp_loss:1;
211 struct packet_skb_cb {
212 unsigned int origlen;
214 struct sockaddr_pkt pkt;
215 struct sockaddr_ll ll;
219 #define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb))
221 #ifdef CONFIG_PACKET_MMAP
223 static void __packet_set_status(struct packet_sock *po, void *frame, int status)
226 struct tpacket_hdr *h1;
227 struct tpacket2_hdr *h2;
232 switch (po->tp_version) {
234 h.h1->tp_status = status;
235 flush_dcache_page(virt_to_page(&h.h1->tp_status));
238 h.h2->tp_status = status;
239 flush_dcache_page(virt_to_page(&h.h2->tp_status));
242 printk(KERN_ERR "TPACKET version not supported\n");
249 static int __packet_get_status(struct packet_sock *po, void *frame)
252 struct tpacket_hdr *h1;
253 struct tpacket2_hdr *h2;
260 switch (po->tp_version) {
262 flush_dcache_page(virt_to_page(&h.h1->tp_status));
263 return h.h1->tp_status;
265 flush_dcache_page(virt_to_page(&h.h2->tp_status));
266 return h.h2->tp_status;
268 printk(KERN_ERR "TPACKET version not supported\n");
274 static void *packet_lookup_frame(struct packet_sock *po,
275 struct packet_ring_buffer *rb,
276 unsigned int position,
279 unsigned int pg_vec_pos, frame_offset;
281 struct tpacket_hdr *h1;
282 struct tpacket2_hdr *h2;
286 pg_vec_pos = position / rb->frames_per_block;
287 frame_offset = position % rb->frames_per_block;
289 h.raw = rb->pg_vec[pg_vec_pos] + (frame_offset * rb->frame_size);
291 if (status != __packet_get_status(po, h.raw))
297 static inline void *packet_current_frame(struct packet_sock *po,
298 struct packet_ring_buffer *rb,
301 return packet_lookup_frame(po, rb, rb->head, status);
304 static inline void *packet_previous_frame(struct packet_sock *po,
305 struct packet_ring_buffer *rb,
308 unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max;
309 return packet_lookup_frame(po, rb, previous, status);
312 static inline void packet_increment_head(struct packet_ring_buffer *buff)
314 buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
319 static inline struct packet_sock *pkt_sk(struct sock *sk)
321 return (struct packet_sock *)sk;
324 static void packet_sock_destruct(struct sock *sk)
326 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
327 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
329 if (!sock_flag(sk, SOCK_DEAD)) {
330 printk("Attempt to release alive packet socket: %p\n", sk);
334 sk_refcnt_debug_dec(sk);
338 static const struct proto_ops packet_ops;
340 static const struct proto_ops packet_ops_spkt;
342 static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
345 struct sockaddr_pkt *spkt;
348 * When we registered the protocol we saved the socket in the data
349 * field for just this event.
352 sk = pt->af_packet_priv;
355 * Yank back the headers [hope the device set this
356 * right or kerboom...]
358 * Incoming packets have ll header pulled,
361 * For outgoing ones skb->data == skb_mac_header(skb)
362 * so that this procedure is noop.
365 if (skb->pkt_type == PACKET_LOOPBACK)
368 if (dev_net(dev) != sock_net(sk))
371 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
374 /* drop any routing info */
375 dst_release(skb->dst);
378 /* drop conntrack reference */
381 spkt = &PACKET_SKB_CB(skb)->sa.pkt;
383 skb_push(skb, skb->data - skb_mac_header(skb));
386 * The SOCK_PACKET socket receives _all_ frames.
389 spkt->spkt_family = dev->type;
390 strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device));
391 spkt->spkt_protocol = skb->protocol;
394 * Charge the memory to the socket. This is done specifically
395 * to prevent sockets using all the memory up.
398 if (sock_queue_rcv_skb(sk,skb) == 0)
409 * Output a raw packet to a device layer. This bypasses all the other
410 * protocol layers and you must therefore supply it with a complete frame
413 static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
414 struct msghdr *msg, size_t len)
416 struct sock *sk = sock->sk;
417 struct sockaddr_pkt *saddr=(struct sockaddr_pkt *)msg->msg_name;
419 struct net_device *dev;
424 * Get and verify the address.
429 if (msg->msg_namelen < sizeof(struct sockaddr))
431 if (msg->msg_namelen==sizeof(struct sockaddr_pkt))
432 proto=saddr->spkt_protocol;
435 return(-ENOTCONN); /* SOCK_PACKET must be sent giving an address */
438 * Find the device first to size check it
441 saddr->spkt_device[13] = 0;
442 dev = dev_get_by_name(sock_net(sk), saddr->spkt_device);
448 if (!(dev->flags & IFF_UP))
452 * You may not queue a frame bigger than the mtu. This is the lowest level
453 * raw protocol and you must do your own fragmentation at this level.
457 if (len > dev->mtu + dev->hard_header_len)
461 skb = sock_wmalloc(sk, len + LL_RESERVED_SPACE(dev), 0, GFP_KERNEL);
464 * If the write buffer is full, then tough. At this level the user gets to
465 * deal with the problem - do your own algorithmic backoffs. That's far
476 /* FIXME: Save some space for broken drivers that write a
477 * hard header at transmission time by themselves. PPP is the
478 * notable one here. This should really be fixed at the driver level.
480 skb_reserve(skb, LL_RESERVED_SPACE(dev));
481 skb_reset_network_header(skb);
483 /* Try to align data part correctly */
484 if (dev->header_ops) {
485 skb->data -= dev->hard_header_len;
486 skb->tail -= dev->hard_header_len;
487 if (len < dev->hard_header_len)
488 skb_reset_network_header(skb);
491 /* Returns -EFAULT on error */
492 err = memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len);
493 skb->protocol = proto;
495 skb->priority = sk->sk_priority;
515 static inline unsigned int run_filter(struct sk_buff *skb, struct sock *sk,
518 struct sk_filter *filter;
521 filter = rcu_dereference(sk->sk_filter);
523 res = sk_run_filter(skb, filter->insns, filter->len);
524 rcu_read_unlock_bh();
530 This function makes lazy skb cloning in hope that most of packets
531 are discarded by BPF.
533 Note tricky part: we DO mangle shared skb! skb->data, skb->len
534 and skb->cb are mangled. It works because (and until) packets
535 falling here are owned by current CPU. Output packets are cloned
536 by dev_queue_xmit_nit(), input packets are processed by net_bh
537 sequencially, so that if we return skb to original state on exit,
538 we will not harm anyone.
541 static int packet_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
544 struct sockaddr_ll *sll;
545 struct packet_sock *po;
546 u8 * skb_head = skb->data;
547 int skb_len = skb->len;
548 unsigned int snaplen, res;
550 if (skb->pkt_type == PACKET_LOOPBACK)
553 sk = pt->af_packet_priv;
556 if (dev_net(dev) != sock_net(sk))
561 if (dev->header_ops) {
562 /* The device has an explicit notion of ll header,
563 exported to higher levels.
565 Otherwise, the device hides datails of it frame
566 structure, so that corresponding packet head
567 never delivered to user.
569 if (sk->sk_type != SOCK_DGRAM)
570 skb_push(skb, skb->data - skb_mac_header(skb));
571 else if (skb->pkt_type == PACKET_OUTGOING) {
572 /* Special case: outgoing packets have ll header at head */
573 skb_pull(skb, skb_network_offset(skb));
579 res = run_filter(skb, sk, snaplen);
585 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
586 (unsigned)sk->sk_rcvbuf)
589 if (skb_shared(skb)) {
590 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
594 if (skb_head != skb->data) {
595 skb->data = skb_head;
602 BUILD_BUG_ON(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8 >
605 sll = &PACKET_SKB_CB(skb)->sa.ll;
606 sll->sll_family = AF_PACKET;
607 sll->sll_hatype = dev->type;
608 sll->sll_protocol = skb->protocol;
609 sll->sll_pkttype = skb->pkt_type;
610 if (unlikely(po->origdev))
611 sll->sll_ifindex = orig_dev->ifindex;
613 sll->sll_ifindex = dev->ifindex;
615 sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
617 PACKET_SKB_CB(skb)->origlen = skb->len;
619 if (pskb_trim(skb, snaplen))
622 skb_set_owner_r(skb, sk);
624 dst_release(skb->dst);
627 /* drop conntrack reference */
630 spin_lock(&sk->sk_receive_queue.lock);
631 po->stats.tp_packets++;
632 __skb_queue_tail(&sk->sk_receive_queue, skb);
633 spin_unlock(&sk->sk_receive_queue.lock);
634 sk->sk_data_ready(sk, skb->len);
638 spin_lock(&sk->sk_receive_queue.lock);
639 po->stats.tp_drops++;
640 spin_unlock(&sk->sk_receive_queue.lock);
643 if (skb_head != skb->data && skb_shared(skb)) {
644 skb->data = skb_head;
652 #ifdef CONFIG_PACKET_MMAP
653 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
656 struct packet_sock *po;
657 struct sockaddr_ll *sll;
659 struct tpacket_hdr *h1;
660 struct tpacket2_hdr *h2;
663 u8 * skb_head = skb->data;
664 int skb_len = skb->len;
665 unsigned int snaplen, res;
666 unsigned long status = TP_STATUS_LOSING|TP_STATUS_USER;
667 unsigned short macoff, netoff, hdrlen;
668 struct sk_buff *copy_skb = NULL;
672 if (skb->pkt_type == PACKET_LOOPBACK)
675 sk = pt->af_packet_priv;
678 if (dev_net(dev) != sock_net(sk))
681 if (dev->header_ops) {
682 if (sk->sk_type != SOCK_DGRAM)
683 skb_push(skb, skb->data - skb_mac_header(skb));
684 else if (skb->pkt_type == PACKET_OUTGOING) {
685 /* Special case: outgoing packets have ll header at head */
686 skb_pull(skb, skb_network_offset(skb));
690 if (skb->ip_summed == CHECKSUM_PARTIAL)
691 status |= TP_STATUS_CSUMNOTREADY;
695 res = run_filter(skb, sk, snaplen);
701 if (sk->sk_type == SOCK_DGRAM) {
702 macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 +
705 unsigned maclen = skb_network_offset(skb);
706 netoff = TPACKET_ALIGN(po->tp_hdrlen +
707 (maclen < 16 ? 16 : maclen)) +
709 macoff = netoff - maclen;
712 if (macoff + snaplen > po->rx_ring.frame_size) {
713 if (po->copy_thresh &&
714 atomic_read(&sk->sk_rmem_alloc) + skb->truesize <
715 (unsigned)sk->sk_rcvbuf) {
716 if (skb_shared(skb)) {
717 copy_skb = skb_clone(skb, GFP_ATOMIC);
719 copy_skb = skb_get(skb);
720 skb_head = skb->data;
723 skb_set_owner_r(copy_skb, sk);
725 snaplen = po->rx_ring.frame_size - macoff;
726 if ((int)snaplen < 0)
730 spin_lock(&sk->sk_receive_queue.lock);
731 h.raw = packet_current_frame(po, &po->rx_ring, TP_STATUS_KERNEL);
734 packet_increment_head(&po->rx_ring);
735 po->stats.tp_packets++;
737 status |= TP_STATUS_COPY;
738 __skb_queue_tail(&sk->sk_receive_queue, copy_skb);
740 if (!po->stats.tp_drops)
741 status &= ~TP_STATUS_LOSING;
742 spin_unlock(&sk->sk_receive_queue.lock);
744 skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
746 switch (po->tp_version) {
748 h.h1->tp_len = skb->len;
749 h.h1->tp_snaplen = snaplen;
750 h.h1->tp_mac = macoff;
751 h.h1->tp_net = netoff;
752 if (skb->tstamp.tv64)
753 tv = ktime_to_timeval(skb->tstamp);
755 do_gettimeofday(&tv);
756 h.h1->tp_sec = tv.tv_sec;
757 h.h1->tp_usec = tv.tv_usec;
758 hdrlen = sizeof(*h.h1);
761 h.h2->tp_len = skb->len;
762 h.h2->tp_snaplen = snaplen;
763 h.h2->tp_mac = macoff;
764 h.h2->tp_net = netoff;
765 if (skb->tstamp.tv64)
766 ts = ktime_to_timespec(skb->tstamp);
769 h.h2->tp_sec = ts.tv_sec;
770 h.h2->tp_nsec = ts.tv_nsec;
771 h.h2->tp_vlan_tci = skb->vlan_tci;
772 hdrlen = sizeof(*h.h2);
778 sll = h.raw + TPACKET_ALIGN(hdrlen);
779 sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
780 sll->sll_family = AF_PACKET;
781 sll->sll_hatype = dev->type;
782 sll->sll_protocol = skb->protocol;
783 sll->sll_pkttype = skb->pkt_type;
784 if (unlikely(po->origdev))
785 sll->sll_ifindex = orig_dev->ifindex;
787 sll->sll_ifindex = dev->ifindex;
789 __packet_set_status(po, h.raw, status);
792 struct page *p_start, *p_end;
793 u8 *h_end = h.raw + macoff + snaplen - 1;
795 p_start = virt_to_page(h.raw);
796 p_end = virt_to_page(h_end);
797 while (p_start <= p_end) {
798 flush_dcache_page(p_start);
803 sk->sk_data_ready(sk, 0);
806 if (skb_head != skb->data && skb_shared(skb)) {
807 skb->data = skb_head;
815 po->stats.tp_drops++;
816 spin_unlock(&sk->sk_receive_queue.lock);
818 sk->sk_data_ready(sk, 0);
823 static void tpacket_destruct_skb(struct sk_buff *skb)
825 struct packet_sock *po = pkt_sk(skb->sk);
830 if (likely(po->tx_ring.pg_vec)) {
831 ph = skb_shinfo(skb)->destructor_arg;
832 BUG_ON(__packet_get_status(po, ph) != TP_STATUS_SENDING);
833 BUG_ON(atomic_read(&po->tx_ring.pending) == 0);
834 atomic_dec(&po->tx_ring.pending);
835 __packet_set_status(po, ph, TP_STATUS_AVAILABLE);
841 static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff * skb,
842 void * frame, struct net_device *dev, int size_max,
843 __be16 proto, unsigned char * addr)
846 struct tpacket_hdr *h1;
847 struct tpacket2_hdr *h2;
850 int to_write, offset, len, tp_len, nr_frags, len_max;
851 struct socket *sock = po->sk.sk_socket;
858 skb->protocol = proto;
860 skb->priority = po->sk.sk_priority;
861 skb_shinfo(skb)->destructor_arg = ph.raw;
863 switch (po->tp_version) {
865 tp_len = ph.h2->tp_len;
868 tp_len = ph.h1->tp_len;
871 if (unlikely(tp_len > size_max)) {
872 printk(KERN_ERR "packet size is too long (%d > %d)\n",
877 skb_reserve(skb, LL_RESERVED_SPACE(dev));
878 skb_reset_network_header(skb);
880 data = ph.raw + po->tp_hdrlen - sizeof(struct sockaddr_ll);
883 if (sock->type == SOCK_DGRAM) {
884 err = dev_hard_header(skb, dev, ntohs(proto), addr,
886 if (unlikely(err < 0))
888 } else if (dev->hard_header_len ) {
889 /* net device doesn't like empty head */
890 if (unlikely(tp_len <= dev->hard_header_len)) {
891 printk(KERN_ERR "packet size is too short "
892 "(%d < %d)\n", tp_len,
893 dev->hard_header_len);
897 skb_push(skb, dev->hard_header_len);
898 err = skb_store_bits(skb, 0, data,
899 dev->hard_header_len);
903 data += dev->hard_header_len;
904 to_write -= dev->hard_header_len;
908 page = virt_to_page(data);
909 offset = offset_in_page(data);
910 len_max = PAGE_SIZE - offset;
911 len = ((to_write > len_max) ? len_max : to_write);
913 skb->data_len = to_write;
914 skb->len += to_write;
915 skb->truesize += to_write;
916 atomic_add(to_write, &po->sk.sk_wmem_alloc);
918 while (likely(to_write)) {
919 nr_frags = skb_shinfo(skb)->nr_frags;
921 if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
922 printk(KERN_ERR "Packet exceed the number "
923 "of skb frags(%lu)\n",
928 flush_dcache_page(page);
930 skb_fill_page_desc(skb,
932 page++, offset, len);
936 len = ((to_write > len_max) ? len_max : to_write);
942 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
946 struct net_device *dev;
948 int ifindex, err, reserve = 0;
950 struct sockaddr_ll *saddr=(struct sockaddr_ll *)msg->msg_name;
951 int tp_len, size_max;
956 sock = po->sk.sk_socket;
958 mutex_lock(&po->pg_vec_lock);
962 ifindex = po->ifindex;
967 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
969 if (msg->msg_namelen < (saddr->sll_halen
970 + offsetof(struct sockaddr_ll,
973 ifindex = saddr->sll_ifindex;
974 proto = saddr->sll_protocol;
975 addr = saddr->sll_addr;
978 dev = dev_get_by_index(sock_net(&po->sk), ifindex);
980 if (unlikely(dev == NULL))
983 reserve = dev->hard_header_len;
986 if (unlikely(!(dev->flags & IFF_UP)))
989 size_max = po->tx_ring.frame_size
990 - sizeof(struct skb_shared_info)
992 - LL_ALLOCATED_SPACE(dev)
993 - sizeof(struct sockaddr_ll);
995 if (size_max > dev->mtu + reserve)
996 size_max = dev->mtu + reserve;
999 ph = packet_current_frame(po, &po->tx_ring,
1000 TP_STATUS_SEND_REQUEST);
1002 if (unlikely(ph == NULL)) {
1007 status = TP_STATUS_SEND_REQUEST;
1008 skb = sock_alloc_send_skb(&po->sk,
1009 LL_ALLOCATED_SPACE(dev)
1010 + sizeof(struct sockaddr_ll),
1013 if (unlikely(skb == NULL))
1016 tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto,
1019 if (unlikely(tp_len < 0)) {
1021 __packet_set_status(po, ph,
1022 TP_STATUS_AVAILABLE);
1023 packet_increment_head(&po->tx_ring);
1027 status = TP_STATUS_WRONG_FORMAT;
1033 skb->destructor = tpacket_destruct_skb;
1034 __packet_set_status(po, ph, TP_STATUS_SENDING);
1035 atomic_inc(&po->tx_ring.pending);
1037 status = TP_STATUS_SEND_REQUEST;
1038 err = dev_queue_xmit(skb);
1039 if (unlikely(err > 0 && (err = net_xmit_errno(err)) != 0))
1041 packet_increment_head(&po->tx_ring);
1044 while (likely((ph != NULL) || ((!(msg->msg_flags & MSG_DONTWAIT))
1045 && (atomic_read(&po->tx_ring.pending))))
1052 skb->destructor = sock_wfree;
1053 atomic_dec(&po->tx_ring.pending);
1055 __packet_set_status(po, ph, status);
1060 mutex_unlock(&po->pg_vec_lock);
1065 static int packet_snd(struct socket *sock,
1066 struct msghdr *msg, size_t len)
1068 struct sock *sk = sock->sk;
1069 struct sockaddr_ll *saddr=(struct sockaddr_ll *)msg->msg_name;
1070 struct sk_buff *skb;
1071 struct net_device *dev;
1073 unsigned char *addr;
1074 int ifindex, err, reserve = 0;
1077 * Get and verify the address.
1080 if (saddr == NULL) {
1081 struct packet_sock *po = pkt_sk(sk);
1083 ifindex = po->ifindex;
1088 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
1090 if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
1092 ifindex = saddr->sll_ifindex;
1093 proto = saddr->sll_protocol;
1094 addr = saddr->sll_addr;
1098 dev = dev_get_by_index(sock_net(sk), ifindex);
1102 if (sock->type == SOCK_RAW)
1103 reserve = dev->hard_header_len;
1106 if (!(dev->flags & IFF_UP))
1110 if (len > dev->mtu+reserve)
1113 skb = sock_alloc_send_skb(sk, len + LL_ALLOCATED_SPACE(dev),
1114 msg->msg_flags & MSG_DONTWAIT, &err);
1118 skb_reserve(skb, LL_RESERVED_SPACE(dev));
1119 skb_reset_network_header(skb);
1122 if (sock->type == SOCK_DGRAM &&
1123 dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len) < 0)
1126 /* Returns -EFAULT on error */
1127 err = memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len);
1131 skb->protocol = proto;
1133 skb->priority = sk->sk_priority;
1139 err = dev_queue_xmit(skb);
1140 if (err > 0 && (err = net_xmit_errno(err)) != 0)
1156 static int packet_sendmsg(struct kiocb *iocb, struct socket *sock,
1157 struct msghdr *msg, size_t len)
1159 #ifdef CONFIG_PACKET_MMAP
1160 struct sock *sk = sock->sk;
1161 struct packet_sock *po = pkt_sk(sk);
1162 if (po->tx_ring.pg_vec)
1163 return tpacket_snd(po, msg);
1166 return packet_snd(sock, msg, len);
1170 * Close a PACKET socket. This is fairly simple. We immediately go
1171 * to 'closed' state and remove our protocol entry in the device list.
1174 static int packet_release(struct socket *sock)
1176 struct sock *sk = sock->sk;
1177 struct packet_sock *po;
1179 #ifdef CONFIG_PACKET_MMAP
1180 struct tpacket_req req;
1189 write_lock_bh(&net->packet.sklist_lock);
1190 sk_del_node_init(sk);
1191 sock_prot_inuse_add(net, sk->sk_prot, -1);
1192 write_unlock_bh(&net->packet.sklist_lock);
1195 * Unhook packet receive handler.
1200 * Remove the protocol hook
1202 dev_remove_pack(&po->prot_hook);
1208 packet_flush_mclist(sk);
1210 #ifdef CONFIG_PACKET_MMAP
1211 memset(&req, 0, sizeof(req));
1213 if (po->rx_ring.pg_vec)
1214 packet_set_ring(sk, &req, 1, 0);
1216 if (po->tx_ring.pg_vec)
1217 packet_set_ring(sk, &req, 1, 1);
1221 * Now the socket is dead. No more input will appear.
1229 skb_queue_purge(&sk->sk_receive_queue);
1230 sk_refcnt_debug_release(sk);
1237 * Attach a packet hook.
1240 static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 protocol)
1242 struct packet_sock *po = pkt_sk(sk);
1244 * Detach an existing hook if present.
1249 spin_lock(&po->bind_lock);
1254 spin_unlock(&po->bind_lock);
1255 dev_remove_pack(&po->prot_hook);
1256 spin_lock(&po->bind_lock);
1260 po->prot_hook.type = protocol;
1261 po->prot_hook.dev = dev;
1263 po->ifindex = dev ? dev->ifindex : 0;
1268 if (!dev || (dev->flags & IFF_UP)) {
1269 dev_add_pack(&po->prot_hook);
1273 sk->sk_err = ENETDOWN;
1274 if (!sock_flag(sk, SOCK_DEAD))
1275 sk->sk_error_report(sk);
1279 spin_unlock(&po->bind_lock);
1285 * Bind a packet socket to a device
1288 static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr, int addr_len)
1290 struct sock *sk=sock->sk;
1292 struct net_device *dev;
1299 if (addr_len != sizeof(struct sockaddr))
1301 strlcpy(name,uaddr->sa_data,sizeof(name));
1303 dev = dev_get_by_name(sock_net(sk), name);
1305 err = packet_do_bind(sk, dev, pkt_sk(sk)->num);
1311 static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
1313 struct sockaddr_ll *sll = (struct sockaddr_ll*)uaddr;
1314 struct sock *sk=sock->sk;
1315 struct net_device *dev = NULL;
1323 if (addr_len < sizeof(struct sockaddr_ll))
1325 if (sll->sll_family != AF_PACKET)
1328 if (sll->sll_ifindex) {
1330 dev = dev_get_by_index(sock_net(sk), sll->sll_ifindex);
1334 err = packet_do_bind(sk, dev, sll->sll_protocol ? : pkt_sk(sk)->num);
1342 static struct proto packet_proto = {
1344 .owner = THIS_MODULE,
1345 .obj_size = sizeof(struct packet_sock),
1349 * Create a packet of type SOCK_PACKET.
1352 static int packet_create(struct net *net, struct socket *sock, int protocol)
1355 struct packet_sock *po;
1356 __be16 proto = (__force __be16)protocol; /* weird, but documented */
1359 if (!capable(CAP_NET_RAW))
1361 if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
1362 sock->type != SOCK_PACKET)
1363 return -ESOCKTNOSUPPORT;
1365 sock->state = SS_UNCONNECTED;
1368 sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto);
1372 sock->ops = &packet_ops;
1373 if (sock->type == SOCK_PACKET)
1374 sock->ops = &packet_ops_spkt;
1376 sock_init_data(sock, sk);
1379 sk->sk_family = PF_PACKET;
1382 sk->sk_destruct = packet_sock_destruct;
1383 sk_refcnt_debug_inc(sk);
1386 * Attach a protocol block
1389 spin_lock_init(&po->bind_lock);
1390 mutex_init(&po->pg_vec_lock);
1391 po->prot_hook.func = packet_rcv;
1393 if (sock->type == SOCK_PACKET)
1394 po->prot_hook.func = packet_rcv_spkt;
1396 po->prot_hook.af_packet_priv = sk;
1399 po->prot_hook.type = proto;
1400 dev_add_pack(&po->prot_hook);
1405 write_lock_bh(&net->packet.sklist_lock);
1406 sk_add_node(sk, &net->packet.sklist);
1407 sock_prot_inuse_add(net, &packet_proto, 1);
1408 write_unlock_bh(&net->packet.sklist_lock);
1415 * Pull a packet from our receive queue and hand it to the user.
1416 * If necessary we block.
1419 static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
1420 struct msghdr *msg, size_t len, int flags)
1422 struct sock *sk = sock->sk;
1423 struct sk_buff *skb;
1425 struct sockaddr_ll *sll;
1428 if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT))
1432 /* What error should we return now? EUNATTACH? */
1433 if (pkt_sk(sk)->ifindex < 0)
1438 * Call the generic datagram receiver. This handles all sorts
1439 * of horrible races and re-entrancy so we can forget about it
1440 * in the protocol layers.
1442 * Now it will return ENETDOWN, if device have just gone down,
1443 * but then it will block.
1446 skb=skb_recv_datagram(sk,flags,flags&MSG_DONTWAIT,&err);
1449 * An error occurred so return it. Because skb_recv_datagram()
1450 * handles the blocking we don't see and worry about blocking
1458 * If the address length field is there to be filled in, we fill
1462 sll = &PACKET_SKB_CB(skb)->sa.ll;
1463 if (sock->type == SOCK_PACKET)
1464 msg->msg_namelen = sizeof(struct sockaddr_pkt);
1466 msg->msg_namelen = sll->sll_halen + offsetof(struct sockaddr_ll, sll_addr);
1469 * You lose any data beyond the buffer you gave. If it worries a
1470 * user program they can ask the device for its MTU anyway.
1477 msg->msg_flags|=MSG_TRUNC;
1480 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
1484 sock_recv_timestamp(msg, sk, skb);
1487 memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa,
1490 if (pkt_sk(sk)->auxdata) {
1491 struct tpacket_auxdata aux;
1493 aux.tp_status = TP_STATUS_USER;
1494 if (skb->ip_summed == CHECKSUM_PARTIAL)
1495 aux.tp_status |= TP_STATUS_CSUMNOTREADY;
1496 aux.tp_len = PACKET_SKB_CB(skb)->origlen;
1497 aux.tp_snaplen = skb->len;
1499 aux.tp_net = skb_network_offset(skb);
1500 aux.tp_vlan_tci = skb->vlan_tci;
1502 put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
1506 * Free or return the buffer as appropriate. Again this
1507 * hides all the races and re-entrancy issues from us.
1509 err = (flags&MSG_TRUNC) ? skb->len : copied;
1512 skb_free_datagram(sk, skb);
1517 static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
1518 int *uaddr_len, int peer)
1520 struct net_device *dev;
1521 struct sock *sk = sock->sk;
1526 uaddr->sa_family = AF_PACKET;
1527 dev = dev_get_by_index(sock_net(sk), pkt_sk(sk)->ifindex);
1529 strlcpy(uaddr->sa_data, dev->name, 15);
1532 memset(uaddr->sa_data, 0, 14);
1533 *uaddr_len = sizeof(*uaddr);
1538 static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
1539 int *uaddr_len, int peer)
1541 struct net_device *dev;
1542 struct sock *sk = sock->sk;
1543 struct packet_sock *po = pkt_sk(sk);
1544 struct sockaddr_ll *sll = (struct sockaddr_ll*)uaddr;
1549 sll->sll_family = AF_PACKET;
1550 sll->sll_ifindex = po->ifindex;
1551 sll->sll_protocol = po->num;
1552 dev = dev_get_by_index(sock_net(sk), po->ifindex);
1554 sll->sll_hatype = dev->type;
1555 sll->sll_halen = dev->addr_len;
1556 memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len);
1559 sll->sll_hatype = 0; /* Bad: we have no ARPHRD_UNSPEC */
1562 *uaddr_len = offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen;
1567 static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
1571 case PACKET_MR_MULTICAST:
1573 return dev_mc_add(dev, i->addr, i->alen, 0);
1575 return dev_mc_delete(dev, i->addr, i->alen, 0);
1577 case PACKET_MR_PROMISC:
1578 return dev_set_promiscuity(dev, what);
1580 case PACKET_MR_ALLMULTI:
1581 return dev_set_allmulti(dev, what);
1583 case PACKET_MR_UNICAST:
1585 return dev_unicast_add(dev, i->addr, i->alen);
1587 return dev_unicast_delete(dev, i->addr, i->alen);
1594 static void packet_dev_mclist(struct net_device *dev, struct packet_mclist *i, int what)
1596 for ( ; i; i=i->next) {
1597 if (i->ifindex == dev->ifindex)
1598 packet_dev_mc(dev, i, what);
1602 static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
1604 struct packet_sock *po = pkt_sk(sk);
1605 struct packet_mclist *ml, *i;
1606 struct net_device *dev;
1612 dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex);
1617 if (mreq->mr_alen > dev->addr_len)
1621 i = kmalloc(sizeof(*i), GFP_KERNEL);
1626 for (ml = po->mclist; ml; ml = ml->next) {
1627 if (ml->ifindex == mreq->mr_ifindex &&
1628 ml->type == mreq->mr_type &&
1629 ml->alen == mreq->mr_alen &&
1630 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
1632 /* Free the new element ... */
1638 i->type = mreq->mr_type;
1639 i->ifindex = mreq->mr_ifindex;
1640 i->alen = mreq->mr_alen;
1641 memcpy(i->addr, mreq->mr_address, i->alen);
1643 i->next = po->mclist;
1645 err = packet_dev_mc(dev, i, 1);
1647 po->mclist = i->next;
1656 static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
1658 struct packet_mclist *ml, **mlp;
1662 for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) {
1663 if (ml->ifindex == mreq->mr_ifindex &&
1664 ml->type == mreq->mr_type &&
1665 ml->alen == mreq->mr_alen &&
1666 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
1667 if (--ml->count == 0) {
1668 struct net_device *dev;
1670 dev = dev_get_by_index(sock_net(sk), ml->ifindex);
1672 packet_dev_mc(dev, ml, -1);
1682 return -EADDRNOTAVAIL;
1685 static void packet_flush_mclist(struct sock *sk)
1687 struct packet_sock *po = pkt_sk(sk);
1688 struct packet_mclist *ml;
1694 while ((ml = po->mclist) != NULL) {
1695 struct net_device *dev;
1697 po->mclist = ml->next;
1698 if ((dev = dev_get_by_index(sock_net(sk), ml->ifindex)) != NULL) {
1699 packet_dev_mc(dev, ml, -1);
1708 packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
1710 struct sock *sk = sock->sk;
1711 struct packet_sock *po = pkt_sk(sk);
1714 if (level != SOL_PACKET)
1715 return -ENOPROTOOPT;
1718 case PACKET_ADD_MEMBERSHIP:
1719 case PACKET_DROP_MEMBERSHIP:
1721 struct packet_mreq_max mreq;
1723 memset(&mreq, 0, sizeof(mreq));
1724 if (len < sizeof(struct packet_mreq))
1726 if (len > sizeof(mreq))
1728 if (copy_from_user(&mreq,optval,len))
1730 if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address)))
1732 if (optname == PACKET_ADD_MEMBERSHIP)
1733 ret = packet_mc_add(sk, &mreq);
1735 ret = packet_mc_drop(sk, &mreq);
1739 #ifdef CONFIG_PACKET_MMAP
1740 case PACKET_RX_RING:
1741 case PACKET_TX_RING:
1743 struct tpacket_req req;
1745 if (optlen<sizeof(req))
1747 if (copy_from_user(&req,optval,sizeof(req)))
1749 return packet_set_ring(sk, &req, 0, optname == PACKET_TX_RING);
1751 case PACKET_COPY_THRESH:
1755 if (optlen!=sizeof(val))
1757 if (copy_from_user(&val,optval,sizeof(val)))
1760 pkt_sk(sk)->copy_thresh = val;
1763 case PACKET_VERSION:
1767 if (optlen != sizeof(val))
1769 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
1771 if (copy_from_user(&val, optval, sizeof(val)))
1776 po->tp_version = val;
1782 case PACKET_RESERVE:
1786 if (optlen != sizeof(val))
1788 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
1790 if (copy_from_user(&val, optval, sizeof(val)))
1792 po->tp_reserve = val;
1799 if (optlen != sizeof(val))
1801 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
1803 if (copy_from_user(&val, optval, sizeof(val)))
1805 po->tp_loss = !!val;
1809 case PACKET_AUXDATA:
1813 if (optlen < sizeof(val))
1815 if (copy_from_user(&val, optval, sizeof(val)))
1818 po->auxdata = !!val;
1821 case PACKET_ORIGDEV:
1825 if (optlen < sizeof(val))
1827 if (copy_from_user(&val, optval, sizeof(val)))
1830 po->origdev = !!val;
1834 return -ENOPROTOOPT;
1838 static int packet_getsockopt(struct socket *sock, int level, int optname,
1839 char __user *optval, int __user *optlen)
1843 struct sock *sk = sock->sk;
1844 struct packet_sock *po = pkt_sk(sk);
1846 struct tpacket_stats st;
1848 if (level != SOL_PACKET)
1849 return -ENOPROTOOPT;
1851 if (get_user(len, optlen))
1858 case PACKET_STATISTICS:
1859 if (len > sizeof(struct tpacket_stats))
1860 len = sizeof(struct tpacket_stats);
1861 spin_lock_bh(&sk->sk_receive_queue.lock);
1863 memset(&po->stats, 0, sizeof(st));
1864 spin_unlock_bh(&sk->sk_receive_queue.lock);
1865 st.tp_packets += st.tp_drops;
1869 case PACKET_AUXDATA:
1870 if (len > sizeof(int))
1876 case PACKET_ORIGDEV:
1877 if (len > sizeof(int))
1883 #ifdef CONFIG_PACKET_MMAP
1884 case PACKET_VERSION:
1885 if (len > sizeof(int))
1887 val = po->tp_version;
1891 if (len > sizeof(int))
1893 if (copy_from_user(&val, optval, len))
1897 val = sizeof(struct tpacket_hdr);
1900 val = sizeof(struct tpacket2_hdr);
1907 case PACKET_RESERVE:
1908 if (len > sizeof(unsigned int))
1909 len = sizeof(unsigned int);
1910 val = po->tp_reserve;
1914 if (len > sizeof(unsigned int))
1915 len = sizeof(unsigned int);
1921 return -ENOPROTOOPT;
1924 if (put_user(len, optlen))
1926 if (copy_to_user(optval, data, len))
1932 static int packet_notifier(struct notifier_block *this, unsigned long msg, void *data)
1935 struct hlist_node *node;
1936 struct net_device *dev = data;
1937 struct net *net = dev_net(dev);
1939 read_lock(&net->packet.sklist_lock);
1940 sk_for_each(sk, node, &net->packet.sklist) {
1941 struct packet_sock *po = pkt_sk(sk);
1944 case NETDEV_UNREGISTER:
1946 packet_dev_mclist(dev, po->mclist, -1);
1950 if (dev->ifindex == po->ifindex) {
1951 spin_lock(&po->bind_lock);
1953 __dev_remove_pack(&po->prot_hook);
1956 sk->sk_err = ENETDOWN;
1957 if (!sock_flag(sk, SOCK_DEAD))
1958 sk->sk_error_report(sk);
1960 if (msg == NETDEV_UNREGISTER) {
1962 po->prot_hook.dev = NULL;
1964 spin_unlock(&po->bind_lock);
1968 spin_lock(&po->bind_lock);
1969 if (dev->ifindex == po->ifindex && po->num &&
1971 dev_add_pack(&po->prot_hook);
1975 spin_unlock(&po->bind_lock);
1979 read_unlock(&net->packet.sklist_lock);
1984 static int packet_ioctl(struct socket *sock, unsigned int cmd,
1987 struct sock *sk = sock->sk;
1992 int amount = atomic_read(&sk->sk_wmem_alloc);
1993 return put_user(amount, (int __user *)arg);
1997 struct sk_buff *skb;
2000 spin_lock_bh(&sk->sk_receive_queue.lock);
2001 skb = skb_peek(&sk->sk_receive_queue);
2004 spin_unlock_bh(&sk->sk_receive_queue.lock);
2005 return put_user(amount, (int __user *)arg);
2008 return sock_get_timestamp(sk, (struct timeval __user *)arg);
2010 return sock_get_timestampns(sk, (struct timespec __user *)arg);
2020 case SIOCGIFBRDADDR:
2021 case SIOCSIFBRDADDR:
2022 case SIOCGIFNETMASK:
2023 case SIOCSIFNETMASK:
2024 case SIOCGIFDSTADDR:
2025 case SIOCSIFDSTADDR:
2027 if (!net_eq(sock_net(sk), &init_net))
2028 return -ENOIOCTLCMD;
2029 return inet_dgram_ops.ioctl(sock, cmd, arg);
2033 return -ENOIOCTLCMD;
2038 #ifndef CONFIG_PACKET_MMAP
2039 #define packet_mmap sock_no_mmap
2040 #define packet_poll datagram_poll
2043 static unsigned int packet_poll(struct file * file, struct socket *sock,
2046 struct sock *sk = sock->sk;
2047 struct packet_sock *po = pkt_sk(sk);
2048 unsigned int mask = datagram_poll(file, sock, wait);
2050 spin_lock_bh(&sk->sk_receive_queue.lock);
2051 if (po->rx_ring.pg_vec) {
2052 if (!packet_previous_frame(po, &po->rx_ring, TP_STATUS_KERNEL))
2053 mask |= POLLIN | POLLRDNORM;
2055 spin_unlock_bh(&sk->sk_receive_queue.lock);
2056 spin_lock_bh(&sk->sk_write_queue.lock);
2057 if (po->tx_ring.pg_vec) {
2058 if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE))
2059 mask |= POLLOUT | POLLWRNORM;
2061 spin_unlock_bh(&sk->sk_write_queue.lock);
2066 /* Dirty? Well, I still did not learn better way to account
2070 static void packet_mm_open(struct vm_area_struct *vma)
2072 struct file *file = vma->vm_file;
2073 struct socket * sock = file->private_data;
2074 struct sock *sk = sock->sk;
2077 atomic_inc(&pkt_sk(sk)->mapped);
2080 static void packet_mm_close(struct vm_area_struct *vma)
2082 struct file *file = vma->vm_file;
2083 struct socket * sock = file->private_data;
2084 struct sock *sk = sock->sk;
2087 atomic_dec(&pkt_sk(sk)->mapped);
2090 static struct vm_operations_struct packet_mmap_ops = {
2091 .open = packet_mm_open,
2092 .close =packet_mm_close,
2095 static void free_pg_vec(char **pg_vec, unsigned int order, unsigned int len)
2099 for (i = 0; i < len; i++) {
2100 if (likely(pg_vec[i]))
2101 free_pages((unsigned long) pg_vec[i], order);
2106 static inline char *alloc_one_pg_vec_page(unsigned long order)
2108 gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP | __GFP_ZERO | __GFP_NOWARN;
2110 return (char *) __get_free_pages(gfp_flags, order);
2113 static char **alloc_pg_vec(struct tpacket_req *req, int order)
2115 unsigned int block_nr = req->tp_block_nr;
2119 pg_vec = kzalloc(block_nr * sizeof(char *), GFP_KERNEL);
2120 if (unlikely(!pg_vec))
2123 for (i = 0; i < block_nr; i++) {
2124 pg_vec[i] = alloc_one_pg_vec_page(order);
2125 if (unlikely(!pg_vec[i]))
2126 goto out_free_pgvec;
2133 free_pg_vec(pg_vec, order, block_nr);
2138 static int packet_set_ring(struct sock *sk, struct tpacket_req *req,
2139 int closing, int tx_ring)
2141 char **pg_vec = NULL;
2142 struct packet_sock *po = pkt_sk(sk);
2143 int was_running, order = 0;
2144 struct packet_ring_buffer *rb;
2145 struct sk_buff_head *rb_queue;
2149 rb = tx_ring ? &po->tx_ring : &po->rx_ring;
2150 rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
2154 if (atomic_read(&po->mapped))
2156 if (atomic_read(&rb->pending))
2160 if (req->tp_block_nr) {
2161 /* Sanity tests and some calculations */
2163 if (unlikely(rb->pg_vec))
2166 switch (po->tp_version) {
2168 po->tp_hdrlen = TPACKET_HDRLEN;
2171 po->tp_hdrlen = TPACKET2_HDRLEN;
2176 if (unlikely((int)req->tp_block_size <= 0))
2178 if (unlikely(req->tp_block_size & (PAGE_SIZE - 1)))
2180 if (unlikely(req->tp_frame_size < po->tp_hdrlen +
2183 if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
2186 rb->frames_per_block = req->tp_block_size/req->tp_frame_size;
2187 if (unlikely(rb->frames_per_block <= 0))
2189 if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
2194 order = get_order(req->tp_block_size);
2195 pg_vec = alloc_pg_vec(req, order);
2196 if (unlikely(!pg_vec))
2202 if (unlikely(req->tp_frame_nr))
2208 /* Detach socket from network */
2209 spin_lock(&po->bind_lock);
2210 was_running = po->running;
2213 __dev_remove_pack(&po->prot_hook);
2218 spin_unlock(&po->bind_lock);
2223 mutex_lock(&po->pg_vec_lock);
2224 if (closing || atomic_read(&po->mapped) == 0) {
2226 #define XC(a, b) ({ __typeof__ ((a)) __t; __t = (a); (a) = (b); __t; })
2227 spin_lock_bh(&rb_queue->lock);
2228 pg_vec = XC(rb->pg_vec, pg_vec);
2229 rb->frame_max = (req->tp_frame_nr - 1);
2231 rb->frame_size = req->tp_frame_size;
2232 spin_unlock_bh(&rb_queue->lock);
2234 order = XC(rb->pg_vec_order, order);
2235 req->tp_block_nr = XC(rb->pg_vec_len, req->tp_block_nr);
2237 rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
2238 po->prot_hook.func = (po->rx_ring.pg_vec) ?
2239 tpacket_rcv : packet_rcv;
2240 skb_queue_purge(rb_queue);
2242 if (atomic_read(&po->mapped))
2243 printk(KERN_DEBUG "packet_mmap: vma is busy: %d\n",
2244 atomic_read(&po->mapped));
2246 mutex_unlock(&po->pg_vec_lock);
2248 spin_lock(&po->bind_lock);
2249 if (was_running && !po->running) {
2253 dev_add_pack(&po->prot_hook);
2255 spin_unlock(&po->bind_lock);
2260 free_pg_vec(pg_vec, order, req->tp_block_nr);
2265 static int packet_mmap(struct file *file, struct socket *sock,
2266 struct vm_area_struct *vma)
2268 struct sock *sk = sock->sk;
2269 struct packet_sock *po = pkt_sk(sk);
2270 unsigned long size, expected_size;
2271 struct packet_ring_buffer *rb;
2272 unsigned long start;
2279 mutex_lock(&po->pg_vec_lock);
2282 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
2284 expected_size += rb->pg_vec_len
2290 if (expected_size == 0)
2293 size = vma->vm_end - vma->vm_start;
2294 if (size != expected_size)
2297 start = vma->vm_start;
2298 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
2299 if (rb->pg_vec == NULL)
2302 for (i = 0; i < rb->pg_vec_len; i++) {
2303 struct page *page = virt_to_page(rb->pg_vec[i]);
2306 for (pg_num = 0; pg_num < rb->pg_vec_pages;
2308 err = vm_insert_page(vma, start, page);
2316 atomic_inc(&po->mapped);
2317 vma->vm_ops = &packet_mmap_ops;
2321 mutex_unlock(&po->pg_vec_lock);
2327 static const struct proto_ops packet_ops_spkt = {
2328 .family = PF_PACKET,
2329 .owner = THIS_MODULE,
2330 .release = packet_release,
2331 .bind = packet_bind_spkt,
2332 .connect = sock_no_connect,
2333 .socketpair = sock_no_socketpair,
2334 .accept = sock_no_accept,
2335 .getname = packet_getname_spkt,
2336 .poll = datagram_poll,
2337 .ioctl = packet_ioctl,
2338 .listen = sock_no_listen,
2339 .shutdown = sock_no_shutdown,
2340 .setsockopt = sock_no_setsockopt,
2341 .getsockopt = sock_no_getsockopt,
2342 .sendmsg = packet_sendmsg_spkt,
2343 .recvmsg = packet_recvmsg,
2344 .mmap = sock_no_mmap,
2345 .sendpage = sock_no_sendpage,
2348 static const struct proto_ops packet_ops = {
2349 .family = PF_PACKET,
2350 .owner = THIS_MODULE,
2351 .release = packet_release,
2352 .bind = packet_bind,
2353 .connect = sock_no_connect,
2354 .socketpair = sock_no_socketpair,
2355 .accept = sock_no_accept,
2356 .getname = packet_getname,
2357 .poll = packet_poll,
2358 .ioctl = packet_ioctl,
2359 .listen = sock_no_listen,
2360 .shutdown = sock_no_shutdown,
2361 .setsockopt = packet_setsockopt,
2362 .getsockopt = packet_getsockopt,
2363 .sendmsg = packet_sendmsg,
2364 .recvmsg = packet_recvmsg,
2365 .mmap = packet_mmap,
2366 .sendpage = sock_no_sendpage,
2369 static struct net_proto_family packet_family_ops = {
2370 .family = PF_PACKET,
2371 .create = packet_create,
2372 .owner = THIS_MODULE,
2375 static struct notifier_block packet_netdev_notifier = {
2376 .notifier_call =packet_notifier,
2379 #ifdef CONFIG_PROC_FS
2380 static inline struct sock *packet_seq_idx(struct net *net, loff_t off)
2383 struct hlist_node *node;
2385 sk_for_each(s, node, &net->packet.sklist) {
2392 static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
2393 __acquires(seq_file_net(seq)->packet.sklist_lock)
2395 struct net *net = seq_file_net(seq);
2396 read_lock(&net->packet.sklist_lock);
2397 return *pos ? packet_seq_idx(net, *pos - 1) : SEQ_START_TOKEN;
2400 static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2402 struct net *net = seq_file_net(seq);
2404 return (v == SEQ_START_TOKEN)
2405 ? sk_head(&net->packet.sklist)
2406 : sk_next((struct sock*)v) ;
2409 static void packet_seq_stop(struct seq_file *seq, void *v)
2410 __releases(seq_file_net(seq)->packet.sklist_lock)
2412 struct net *net = seq_file_net(seq);
2413 read_unlock(&net->packet.sklist_lock);
2416 static int packet_seq_show(struct seq_file *seq, void *v)
2418 if (v == SEQ_START_TOKEN)
2419 seq_puts(seq, "sk RefCnt Type Proto Iface R Rmem User Inode\n");
2422 const struct packet_sock *po = pkt_sk(s);
2425 "%p %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
2427 atomic_read(&s->sk_refcnt),
2432 atomic_read(&s->sk_rmem_alloc),
2440 static const struct seq_operations packet_seq_ops = {
2441 .start = packet_seq_start,
2442 .next = packet_seq_next,
2443 .stop = packet_seq_stop,
2444 .show = packet_seq_show,
2447 static int packet_seq_open(struct inode *inode, struct file *file)
2449 return seq_open_net(inode, file, &packet_seq_ops,
2450 sizeof(struct seq_net_private));
2453 static const struct file_operations packet_seq_fops = {
2454 .owner = THIS_MODULE,
2455 .open = packet_seq_open,
2457 .llseek = seq_lseek,
2458 .release = seq_release_net,
2463 static int packet_net_init(struct net *net)
2465 rwlock_init(&net->packet.sklist_lock);
2466 INIT_HLIST_HEAD(&net->packet.sklist);
2468 if (!proc_net_fops_create(net, "packet", 0, &packet_seq_fops))
2474 static void packet_net_exit(struct net *net)
2476 proc_net_remove(net, "packet");
2479 static struct pernet_operations packet_net_ops = {
2480 .init = packet_net_init,
2481 .exit = packet_net_exit,
2485 static void __exit packet_exit(void)
2487 unregister_netdevice_notifier(&packet_netdev_notifier);
2488 unregister_pernet_subsys(&packet_net_ops);
2489 sock_unregister(PF_PACKET);
2490 proto_unregister(&packet_proto);
2493 static int __init packet_init(void)
2495 int rc = proto_register(&packet_proto, 0);
2500 sock_register(&packet_family_ops);
2501 register_pernet_subsys(&packet_net_ops);
2502 register_netdevice_notifier(&packet_netdev_notifier);
2507 module_init(packet_init);
2508 module_exit(packet_exit);
2509 MODULE_LICENSE("GPL");
2510 MODULE_ALIAS_NETPROTO(PF_PACKET);