2 * Common framework for low-level network console, dump, and debugger code
4 * Sep 8 2003 Matt Mackall <mpm@selenic.com>
6 * based on the netconsole code from:
8 * Copyright (C) 2001 Ingo Molnar <mingo@redhat.com>
9 * Copyright (C) 2002 Red Hat, Inc.
12 #include <linux/netdevice.h>
13 #include <linux/etherdevice.h>
14 #include <linux/string.h>
15 #include <linux/if_arp.h>
16 #include <linux/inetdevice.h>
17 #include <linux/inet.h>
18 #include <linux/interrupt.h>
19 #include <linux/netpoll.h>
20 #include <linux/sched.h>
21 #include <linux/delay.h>
22 #include <linux/rcupdate.h>
23 #include <linux/workqueue.h>
26 #include <asm/unaligned.h>
29 * We maintain a small pool of fully-sized skbs, to make sure the
30 * message gets out even in extreme OOM situations.
33 #define MAX_UDP_CHUNK 1460
35 #define MAX_QUEUE_DEPTH (MAX_SKBS / 2)
37 static struct sk_buff_head skb_pool;
39 static atomic_t trapped;
41 #define USEC_PER_POLL 50
42 #define NETPOLL_RX_ENABLED 1
43 #define NETPOLL_RX_DROP 2
45 #define MAX_SKB_SIZE \
46 (MAX_UDP_CHUNK + sizeof(struct udphdr) + \
47 sizeof(struct iphdr) + sizeof(struct ethhdr))
49 static void zap_completion_queue(void);
50 static void arp_reply(struct sk_buff *skb);
52 static void queue_process(struct work_struct *work)
54 struct netpoll_info *npinfo =
55 container_of(work, struct netpoll_info, tx_work.work);
59 while ((skb = skb_dequeue(&npinfo->txq))) {
60 struct net_device *dev = skb->dev;
62 if (!netif_device_present(dev) || !netif_running(dev)) {
67 local_irq_save(flags);
69 if ((netif_queue_stopped(dev) ||
70 netif_subqueue_stopped(dev, skb)) ||
71 dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) {
72 skb_queue_head(&npinfo->txq, skb);
74 local_irq_restore(flags);
76 schedule_delayed_work(&npinfo->tx_work, HZ/10);
80 local_irq_restore(flags);
84 static __sum16 checksum_udp(struct sk_buff *skb, struct udphdr *uh,
85 unsigned short ulen, __be32 saddr, __be32 daddr)
89 if (uh->check == 0 || skb_csum_unnecessary(skb))
92 psum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0);
94 if (skb->ip_summed == CHECKSUM_COMPLETE &&
95 !csum_fold(csum_add(psum, skb->csum)))
100 return __skb_checksum_complete(skb);
104 * Check whether delayed processing was scheduled for our NIC. If so,
105 * we attempt to grab the poll lock and use ->poll() to pump the card.
106 * If this fails, either we've recursed in ->poll() or it's already
107 * running on another CPU.
109 * Note: we don't mask interrupts with this lock because we're using
110 * trylock here and interrupts are already disabled in the softirq
111 * case. Further, we test the poll_owner to avoid recursion on UP
112 * systems where the lock doesn't exist.
114 * In cases where there is bi-directional communications, reading only
115 * one message at a time can lead to packets being dropped by the
116 * network adapter, forcing superfluous retries and possibly timeouts.
117 * Thus, we set our budget to greater than 1.
119 static int poll_one_napi(struct netpoll_info *npinfo,
120 struct napi_struct *napi, int budget)
124 /* net_rx_action's ->poll() invocations and our's are
125 * synchronized by this test which is only made while
126 * holding the napi->poll_lock.
128 if (!test_bit(NAPI_STATE_SCHED, &napi->state))
131 npinfo->rx_flags |= NETPOLL_RX_DROP;
132 atomic_inc(&trapped);
134 work = napi->poll(napi, budget);
136 atomic_dec(&trapped);
137 npinfo->rx_flags &= ~NETPOLL_RX_DROP;
139 return budget - work;
142 static void poll_napi(struct net_device *dev)
144 struct napi_struct *napi;
147 list_for_each_entry(napi, &dev->napi_list, dev_list) {
148 if (napi->poll_owner != smp_processor_id() &&
149 spin_trylock(&napi->poll_lock)) {
150 budget = poll_one_napi(dev->npinfo, napi, budget);
151 spin_unlock(&napi->poll_lock);
159 static void service_arp_queue(struct netpoll_info *npi)
164 while ((skb = skb_dequeue(&npi->arp_tx)))
169 void netpoll_poll(struct netpoll *np)
171 struct net_device *dev = np->dev;
173 if (!dev || !netif_running(dev) || !dev->poll_controller)
176 /* Process pending work on NIC */
177 dev->poll_controller(dev);
181 service_arp_queue(dev->npinfo);
183 zap_completion_queue();
186 static void refill_skbs(void)
191 spin_lock_irqsave(&skb_pool.lock, flags);
192 while (skb_pool.qlen < MAX_SKBS) {
193 skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
197 __skb_queue_tail(&skb_pool, skb);
199 spin_unlock_irqrestore(&skb_pool.lock, flags);
202 static void zap_completion_queue(void)
205 struct softnet_data *sd = &get_cpu_var(softnet_data);
207 if (sd->completion_queue) {
208 struct sk_buff *clist;
210 local_irq_save(flags);
211 clist = sd->completion_queue;
212 sd->completion_queue = NULL;
213 local_irq_restore(flags);
215 while (clist != NULL) {
216 struct sk_buff *skb = clist;
218 if (skb->destructor) {
219 atomic_inc(&skb->users);
220 dev_kfree_skb_any(skb); /* put this one back */
227 put_cpu_var(softnet_data);
230 static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
235 zap_completion_queue();
239 skb = alloc_skb(len, GFP_ATOMIC);
241 skb = skb_dequeue(&skb_pool);
251 atomic_set(&skb->users, 1);
252 skb_reserve(skb, reserve);
256 static int netpoll_owner_active(struct net_device *dev)
258 struct napi_struct *napi;
260 list_for_each_entry(napi, &dev->napi_list, dev_list) {
261 if (napi->poll_owner == smp_processor_id())
267 static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
269 int status = NETDEV_TX_BUSY;
271 struct net_device *dev = np->dev;
272 struct netpoll_info *npinfo = np->dev->npinfo;
274 if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
279 /* don't get messages out of order, and no recursion */
280 if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
283 local_irq_save(flags);
284 /* try until next clock tick */
285 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
286 tries > 0; --tries) {
287 if (netif_tx_trylock(dev)) {
288 if (!netif_queue_stopped(dev) &&
289 !netif_subqueue_stopped(dev, skb))
290 status = dev->hard_start_xmit(skb, dev);
291 netif_tx_unlock(dev);
293 if (status == NETDEV_TX_OK)
298 /* tickle device maybe there is some cleanup */
301 udelay(USEC_PER_POLL);
303 local_irq_restore(flags);
306 if (status != NETDEV_TX_OK) {
307 skb_queue_tail(&npinfo->txq, skb);
308 schedule_delayed_work(&npinfo->tx_work,0);
312 void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
314 int total_len, eth_len, ip_len, udp_len;
320 udp_len = len + sizeof(*udph);
321 ip_len = eth_len = udp_len + sizeof(*iph);
322 total_len = eth_len + ETH_HLEN + NET_IP_ALIGN;
324 skb = find_skb(np, total_len, total_len - len);
328 skb_copy_to_linear_data(skb, msg, len);
331 skb_push(skb, sizeof(*udph));
332 skb_reset_transport_header(skb);
334 udph->source = htons(np->local_port);
335 udph->dest = htons(np->remote_port);
336 udph->len = htons(udp_len);
338 udph->check = csum_tcpudp_magic(htonl(np->local_ip),
339 htonl(np->remote_ip),
340 udp_len, IPPROTO_UDP,
341 csum_partial((unsigned char *)udph, udp_len, 0));
342 if (udph->check == 0)
343 udph->check = CSUM_MANGLED_0;
345 skb_push(skb, sizeof(*iph));
346 skb_reset_network_header(skb);
349 /* iph->version = 4; iph->ihl = 5; */
350 put_unaligned(0x45, (unsigned char *)iph);
352 put_unaligned(htons(ip_len), &(iph->tot_len));
356 iph->protocol = IPPROTO_UDP;
358 put_unaligned(htonl(np->local_ip), &(iph->saddr));
359 put_unaligned(htonl(np->remote_ip), &(iph->daddr));
360 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
362 eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
363 skb_reset_mac_header(skb);
364 skb->protocol = eth->h_proto = htons(ETH_P_IP);
365 memcpy(eth->h_source, np->dev->dev_addr, ETH_ALEN);
366 memcpy(eth->h_dest, np->remote_mac, ETH_ALEN);
370 netpoll_send_skb(np, skb);
373 static void arp_reply(struct sk_buff *skb)
375 struct netpoll_info *npinfo = skb->dev->npinfo;
377 unsigned char *arp_ptr;
378 int size, type = ARPOP_REPLY, ptype = ETH_P_ARP;
381 struct sk_buff *send_skb;
382 struct netpoll *np = NULL;
384 if (npinfo->rx_np && npinfo->rx_np->dev == skb->dev)
389 /* No arp on this interface */
390 if (skb->dev->flags & IFF_NOARP)
393 if (!pskb_may_pull(skb, arp_hdr_len(skb->dev)))
396 skb_reset_network_header(skb);
397 skb_reset_transport_header(skb);
400 if ((arp->ar_hrd != htons(ARPHRD_ETHER) &&
401 arp->ar_hrd != htons(ARPHRD_IEEE802)) ||
402 arp->ar_pro != htons(ETH_P_IP) ||
403 arp->ar_op != htons(ARPOP_REQUEST))
406 arp_ptr = (unsigned char *)(arp+1);
407 /* save the location of the src hw addr */
409 arp_ptr += skb->dev->addr_len;
410 memcpy(&sip, arp_ptr, 4);
412 /* if we actually cared about dst hw addr, it would get copied here */
413 arp_ptr += skb->dev->addr_len;
414 memcpy(&tip, arp_ptr, 4);
416 /* Should we ignore arp? */
417 if (tip != htonl(np->local_ip) ||
418 ipv4_is_loopback(tip) || ipv4_is_multicast(tip))
421 size = arp_hdr_len(skb->dev);
422 send_skb = find_skb(np, size + LL_RESERVED_SPACE(np->dev),
423 LL_RESERVED_SPACE(np->dev));
428 skb_reset_network_header(send_skb);
429 arp = (struct arphdr *) skb_put(send_skb, size);
430 send_skb->dev = skb->dev;
431 send_skb->protocol = htons(ETH_P_ARP);
433 /* Fill the device header for the ARP frame */
434 if (dev_hard_header(send_skb, skb->dev, ptype,
435 sha, np->dev->dev_addr,
436 send_skb->len) < 0) {
442 * Fill out the arp protocol part.
444 * we only support ethernet device type,
445 * which (according to RFC 1390) should always equal 1 (Ethernet).
448 arp->ar_hrd = htons(np->dev->type);
449 arp->ar_pro = htons(ETH_P_IP);
450 arp->ar_hln = np->dev->addr_len;
452 arp->ar_op = htons(type);
454 arp_ptr=(unsigned char *)(arp + 1);
455 memcpy(arp_ptr, np->dev->dev_addr, np->dev->addr_len);
456 arp_ptr += np->dev->addr_len;
457 memcpy(arp_ptr, &tip, 4);
459 memcpy(arp_ptr, sha, np->dev->addr_len);
460 arp_ptr += np->dev->addr_len;
461 memcpy(arp_ptr, &sip, 4);
463 netpoll_send_skb(np, send_skb);
466 int __netpoll_rx(struct sk_buff *skb)
468 int proto, len, ulen;
471 struct netpoll_info *npi = skb->dev->npinfo;
472 struct netpoll *np = npi->rx_np;
476 if (skb->dev->type != ARPHRD_ETHER)
479 /* check if netpoll clients need ARP */
480 if (skb->protocol == htons(ETH_P_ARP) &&
481 atomic_read(&trapped)) {
482 skb_queue_tail(&npi->arp_tx, skb);
486 proto = ntohs(eth_hdr(skb)->h_proto);
487 if (proto != ETH_P_IP)
489 if (skb->pkt_type == PACKET_OTHERHOST)
494 iph = (struct iphdr *)skb->data;
495 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
497 if (iph->ihl < 5 || iph->version != 4)
499 if (!pskb_may_pull(skb, iph->ihl*4))
501 if (ip_fast_csum((u8 *)iph, iph->ihl) != 0)
504 len = ntohs(iph->tot_len);
505 if (skb->len < len || len < iph->ihl*4)
509 * Our transport medium may have padded the buffer out.
510 * Now We trim to the true length of the frame.
512 if (pskb_trim_rcsum(skb, len))
515 if (iph->protocol != IPPROTO_UDP)
519 uh = (struct udphdr *)(((char *)iph) + iph->ihl*4);
520 ulen = ntohs(uh->len);
524 if (checksum_udp(skb, uh, ulen, iph->saddr, iph->daddr))
526 if (np->local_ip && np->local_ip != ntohl(iph->daddr))
528 if (np->remote_ip && np->remote_ip != ntohl(iph->saddr))
530 if (np->local_port && np->local_port != ntohs(uh->dest))
533 np->rx_hook(np, ntohs(uh->source),
535 ulen - sizeof(struct udphdr));
541 if (atomic_read(&trapped)) {
549 void netpoll_print_options(struct netpoll *np)
551 DECLARE_MAC_BUF(mac);
552 printk(KERN_INFO "%s: local port %d\n",
553 np->name, np->local_port);
554 printk(KERN_INFO "%s: local IP %d.%d.%d.%d\n",
555 np->name, HIPQUAD(np->local_ip));
556 printk(KERN_INFO "%s: interface %s\n",
557 np->name, np->dev_name);
558 printk(KERN_INFO "%s: remote port %d\n",
559 np->name, np->remote_port);
560 printk(KERN_INFO "%s: remote IP %d.%d.%d.%d\n",
561 np->name, HIPQUAD(np->remote_ip));
562 printk(KERN_INFO "%s: remote ethernet address %s\n",
563 np->name, print_mac(mac, np->remote_mac));
566 int netpoll_parse_options(struct netpoll *np, char *opt)
568 char *cur=opt, *delim;
571 if ((delim = strchr(cur, '@')) == NULL)
574 np->local_port = simple_strtol(cur, NULL, 10);
580 if ((delim = strchr(cur, '/')) == NULL)
583 np->local_ip = ntohl(in_aton(cur));
589 /* parse out dev name */
590 if ((delim = strchr(cur, ',')) == NULL)
593 strlcpy(np->dev_name, cur, sizeof(np->dev_name));
600 if ((delim = strchr(cur, '@')) == NULL)
603 np->remote_port = simple_strtol(cur, NULL, 10);
609 if ((delim = strchr(cur, '/')) == NULL)
612 np->remote_ip = ntohl(in_aton(cur));
617 if ((delim = strchr(cur, ':')) == NULL)
620 np->remote_mac[0] = simple_strtol(cur, NULL, 16);
622 if ((delim = strchr(cur, ':')) == NULL)
625 np->remote_mac[1] = simple_strtol(cur, NULL, 16);
627 if ((delim = strchr(cur, ':')) == NULL)
630 np->remote_mac[2] = simple_strtol(cur, NULL, 16);
632 if ((delim = strchr(cur, ':')) == NULL)
635 np->remote_mac[3] = simple_strtol(cur, NULL, 16);
637 if ((delim = strchr(cur, ':')) == NULL)
640 np->remote_mac[4] = simple_strtol(cur, NULL, 16);
642 np->remote_mac[5] = simple_strtol(cur, NULL, 16);
645 netpoll_print_options(np);
650 printk(KERN_INFO "%s: couldn't parse config at %s!\n",
655 int netpoll_setup(struct netpoll *np)
657 struct net_device *ndev = NULL;
658 struct in_device *in_dev;
659 struct netpoll_info *npinfo;
664 ndev = dev_get_by_name(&init_net, np->dev_name);
666 printk(KERN_ERR "%s: %s doesn't exist, aborting.\n",
667 np->name, np->dev_name);
673 npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
679 npinfo->rx_flags = 0;
680 npinfo->rx_np = NULL;
682 spin_lock_init(&npinfo->rx_lock);
683 skb_queue_head_init(&npinfo->arp_tx);
684 skb_queue_head_init(&npinfo->txq);
685 INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
687 atomic_set(&npinfo->refcnt, 1);
689 npinfo = ndev->npinfo;
690 atomic_inc(&npinfo->refcnt);
693 if (!ndev->poll_controller) {
694 printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n",
695 np->name, np->dev_name);
700 if (!netif_running(ndev)) {
701 unsigned long atmost, atleast;
703 printk(KERN_INFO "%s: device %s not up yet, forcing it\n",
704 np->name, np->dev_name);
707 err = dev_open(ndev);
711 printk(KERN_ERR "%s: failed to open %s\n",
712 np->name, ndev->name);
716 atleast = jiffies + HZ/10;
717 atmost = jiffies + 4*HZ;
718 while (!netif_carrier_ok(ndev)) {
719 if (time_after(jiffies, atmost)) {
721 "%s: timeout waiting for carrier\n",
728 /* If carrier appears to come up instantly, we don't
729 * trust it and pause so that we don't pump all our
730 * queued console messages into the bitbucket.
733 if (time_before(jiffies, atleast)) {
734 printk(KERN_NOTICE "%s: carrier detect appears"
735 " untrustworthy, waiting 4 seconds\n",
743 in_dev = __in_dev_get_rcu(ndev);
745 if (!in_dev || !in_dev->ifa_list) {
747 printk(KERN_ERR "%s: no IP address for %s, aborting\n",
748 np->name, np->dev_name);
753 np->local_ip = ntohl(in_dev->ifa_list->ifa_local);
755 printk(KERN_INFO "%s: local IP %d.%d.%d.%d\n",
756 np->name, HIPQUAD(np->local_ip));
760 spin_lock_irqsave(&npinfo->rx_lock, flags);
761 npinfo->rx_flags |= NETPOLL_RX_ENABLED;
763 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
766 /* fill up the skb queue */
769 /* last thing to do is link it to the net device structure */
770 ndev->npinfo = npinfo;
772 /* avoid racing with NAPI reading npinfo */
785 static int __init netpoll_init(void)
787 skb_queue_head_init(&skb_pool);
790 core_initcall(netpoll_init);
792 void netpoll_cleanup(struct netpoll *np)
794 struct netpoll_info *npinfo;
798 npinfo = np->dev->npinfo;
800 if (npinfo->rx_np == np) {
801 spin_lock_irqsave(&npinfo->rx_lock, flags);
802 npinfo->rx_np = NULL;
803 npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
804 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
807 if (atomic_dec_and_test(&npinfo->refcnt)) {
808 skb_queue_purge(&npinfo->arp_tx);
809 skb_queue_purge(&npinfo->txq);
810 cancel_rearming_delayed_work(&npinfo->tx_work);
812 /* clean after last, unfinished work */
813 __skb_queue_purge(&npinfo->txq);
815 np->dev->npinfo = NULL;
825 int netpoll_trap(void)
827 return atomic_read(&trapped);
830 void netpoll_set_trap(int trap)
833 atomic_inc(&trapped);
835 atomic_dec(&trapped);
838 EXPORT_SYMBOL(netpoll_set_trap);
839 EXPORT_SYMBOL(netpoll_trap);
840 EXPORT_SYMBOL(netpoll_print_options);
841 EXPORT_SYMBOL(netpoll_parse_options);
842 EXPORT_SYMBOL(netpoll_setup);
843 EXPORT_SYMBOL(netpoll_cleanup);
844 EXPORT_SYMBOL(netpoll_send_udp);
845 EXPORT_SYMBOL(netpoll_poll);